Show More
@@ -1,50 +1,48 | |||||
1 | """utilities to assist in working with pygit2""" |
|
1 | """utilities to assist in working with pygit2""" | |
2 |
|
2 | |||
3 |
from mercurial.node import bin, |
|
3 | from mercurial.node import bin, sha1nodeconstants | |
4 |
|
||||
5 | from mercurial import pycompat |
|
|||
6 |
|
4 | |||
7 | pygit2_module = None |
|
5 | pygit2_module = None | |
8 |
|
6 | |||
9 |
|
7 | |||
10 | def get_pygit2(): |
|
8 | def get_pygit2(): | |
11 | global pygit2_module |
|
9 | global pygit2_module | |
12 | if pygit2_module is None: |
|
10 | if pygit2_module is None: | |
13 | try: |
|
11 | try: | |
14 | import pygit2 as pygit2_module |
|
12 | import pygit2 as pygit2_module | |
15 |
|
13 | |||
16 | pygit2_module.InvalidSpecError |
|
14 | pygit2_module.InvalidSpecError | |
17 | except (ImportError, AttributeError): |
|
15 | except (ImportError, AttributeError): | |
18 | pass |
|
16 | pass | |
19 | return pygit2_module |
|
17 | return pygit2_module | |
20 |
|
18 | |||
21 |
|
19 | |||
22 | def pygit2_version(): |
|
20 | def pygit2_version(): | |
23 | mod = get_pygit2() |
|
21 | mod = get_pygit2() | |
24 | v = "N/A" |
|
22 | v = "N/A" | |
25 |
|
23 | |||
26 | if mod: |
|
24 | if mod: | |
27 | try: |
|
25 | try: | |
28 | v = mod.__version__ |
|
26 | v = mod.__version__ | |
29 | except AttributeError: |
|
27 | except AttributeError: | |
30 | pass |
|
28 | pass | |
31 |
|
29 | |||
32 | return b"(pygit2 %s)" % v.encode("utf-8") |
|
30 | return b"(pygit2 %s)" % v.encode("utf-8") | |
33 |
|
31 | |||
34 |
|
32 | |||
35 | def togitnode(n): |
|
33 | def togitnode(n): | |
36 | """Wrapper to convert a Mercurial binary node to a unicode hexlified node. |
|
34 | """Wrapper to convert a Mercurial binary node to a unicode hexlified node. | |
37 |
|
35 | |||
38 | pygit2 and sqlite both need nodes as strings, not bytes. |
|
36 | pygit2 and sqlite both need nodes as strings, not bytes. | |
39 | """ |
|
37 | """ | |
40 | assert len(n) == 20 |
|
38 | assert len(n) == 20 | |
41 |
return |
|
39 | return n.hex() | |
42 |
|
40 | |||
43 |
|
41 | |||
44 | def fromgitnode(n): |
|
42 | def fromgitnode(n): | |
45 | """Opposite of togitnode.""" |
|
43 | """Opposite of togitnode.""" | |
46 | assert len(n) == 40 |
|
44 | assert len(n) == 40 | |
47 | return bin(n) |
|
45 | return bin(n) | |
48 |
|
46 | |||
49 |
|
47 | |||
50 | nullgit = togitnode(sha1nodeconstants.nullid) |
|
48 | nullgit = togitnode(sha1nodeconstants.nullid) |
@@ -1,880 +1,880 | |||||
1 | # branchmap.py - logic to computes, maintain and stores branchmap for local repo |
|
1 | # branchmap.py - logic to computes, maintain and stores branchmap for local repo | |
2 | # |
|
2 | # | |
3 | # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com> |
|
3 | # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 |
|
8 | |||
9 | import struct |
|
9 | import struct | |
10 |
|
10 | |||
11 | from .node import ( |
|
11 | from .node import ( | |
12 | bin, |
|
12 | bin, | |
13 | hex, |
|
13 | hex, | |
14 | nullrev, |
|
14 | nullrev, | |
15 | ) |
|
15 | ) | |
16 | from . import ( |
|
16 | from . import ( | |
17 | encoding, |
|
17 | encoding, | |
18 | error, |
|
18 | error, | |
19 | obsolete, |
|
19 | obsolete, | |
20 | pycompat, |
|
20 | pycompat, | |
21 | scmutil, |
|
21 | scmutil, | |
22 | util, |
|
22 | util, | |
23 | ) |
|
23 | ) | |
24 | from .utils import ( |
|
24 | from .utils import ( | |
25 | repoviewutil, |
|
25 | repoviewutil, | |
26 | stringutil, |
|
26 | stringutil, | |
27 | ) |
|
27 | ) | |
28 |
|
28 | |||
29 | if pycompat.TYPE_CHECKING: |
|
29 | if pycompat.TYPE_CHECKING: | |
30 | from typing import ( |
|
30 | from typing import ( | |
31 | Any, |
|
31 | Any, | |
32 | Callable, |
|
32 | Callable, | |
33 | Dict, |
|
33 | Dict, | |
34 | Iterable, |
|
34 | Iterable, | |
35 | List, |
|
35 | List, | |
36 | Optional, |
|
36 | Optional, | |
37 | Set, |
|
37 | Set, | |
38 | Tuple, |
|
38 | Tuple, | |
39 | Union, |
|
39 | Union, | |
40 | ) |
|
40 | ) | |
41 | from . import localrepo |
|
41 | from . import localrepo | |
42 |
|
42 | |||
43 | assert any( |
|
43 | assert any( | |
44 | ( |
|
44 | ( | |
45 | Any, |
|
45 | Any, | |
46 | Callable, |
|
46 | Callable, | |
47 | Dict, |
|
47 | Dict, | |
48 | Iterable, |
|
48 | Iterable, | |
49 | List, |
|
49 | List, | |
50 | Optional, |
|
50 | Optional, | |
51 | Set, |
|
51 | Set, | |
52 | Tuple, |
|
52 | Tuple, | |
53 | Union, |
|
53 | Union, | |
54 | localrepo, |
|
54 | localrepo, | |
55 | ) |
|
55 | ) | |
56 | ) |
|
56 | ) | |
57 |
|
57 | |||
58 | subsettable = repoviewutil.subsettable |
|
58 | subsettable = repoviewutil.subsettable | |
59 |
|
59 | |||
60 | calcsize = struct.calcsize |
|
60 | calcsize = struct.calcsize | |
61 | pack_into = struct.pack_into |
|
61 | pack_into = struct.pack_into | |
62 | unpack_from = struct.unpack_from |
|
62 | unpack_from = struct.unpack_from | |
63 |
|
63 | |||
64 |
|
64 | |||
65 | class BranchMapCache: |
|
65 | class BranchMapCache: | |
66 | """mapping of filtered views of repo with their branchcache""" |
|
66 | """mapping of filtered views of repo with their branchcache""" | |
67 |
|
67 | |||
68 | def __init__(self): |
|
68 | def __init__(self): | |
69 | self._per_filter = {} |
|
69 | self._per_filter = {} | |
70 |
|
70 | |||
71 | def __getitem__(self, repo): |
|
71 | def __getitem__(self, repo): | |
72 | self.updatecache(repo) |
|
72 | self.updatecache(repo) | |
73 | return self._per_filter[repo.filtername] |
|
73 | return self._per_filter[repo.filtername] | |
74 |
|
74 | |||
75 | def updatecache(self, repo): |
|
75 | def updatecache(self, repo): | |
76 | """Update the cache for the given filtered view on a repository""" |
|
76 | """Update the cache for the given filtered view on a repository""" | |
77 | # This can trigger updates for the caches for subsets of the filtered |
|
77 | # This can trigger updates for the caches for subsets of the filtered | |
78 | # view, e.g. when there is no cache for this filtered view or the cache |
|
78 | # view, e.g. when there is no cache for this filtered view or the cache | |
79 | # is stale. |
|
79 | # is stale. | |
80 |
|
80 | |||
81 | cl = repo.changelog |
|
81 | cl = repo.changelog | |
82 | filtername = repo.filtername |
|
82 | filtername = repo.filtername | |
83 | bcache = self._per_filter.get(filtername) |
|
83 | bcache = self._per_filter.get(filtername) | |
84 | if bcache is None or not bcache.validfor(repo): |
|
84 | if bcache is None or not bcache.validfor(repo): | |
85 | # cache object missing or cache object stale? Read from disk |
|
85 | # cache object missing or cache object stale? Read from disk | |
86 | bcache = branchcache.fromfile(repo) |
|
86 | bcache = branchcache.fromfile(repo) | |
87 |
|
87 | |||
88 | revs = [] |
|
88 | revs = [] | |
89 | if bcache is None: |
|
89 | if bcache is None: | |
90 | # no (fresh) cache available anymore, perhaps we can re-use |
|
90 | # no (fresh) cache available anymore, perhaps we can re-use | |
91 | # the cache for a subset, then extend that to add info on missing |
|
91 | # the cache for a subset, then extend that to add info on missing | |
92 | # revisions. |
|
92 | # revisions. | |
93 | subsetname = subsettable.get(filtername) |
|
93 | subsetname = subsettable.get(filtername) | |
94 | if subsetname is not None: |
|
94 | if subsetname is not None: | |
95 | subset = repo.filtered(subsetname) |
|
95 | subset = repo.filtered(subsetname) | |
96 | bcache = self[subset].copy() |
|
96 | bcache = self[subset].copy() | |
97 | extrarevs = subset.changelog.filteredrevs - cl.filteredrevs |
|
97 | extrarevs = subset.changelog.filteredrevs - cl.filteredrevs | |
98 | revs.extend(r for r in extrarevs if r <= bcache.tiprev) |
|
98 | revs.extend(r for r in extrarevs if r <= bcache.tiprev) | |
99 | else: |
|
99 | else: | |
100 | # nothing to fall back on, start empty. |
|
100 | # nothing to fall back on, start empty. | |
101 | bcache = branchcache(repo) |
|
101 | bcache = branchcache(repo) | |
102 |
|
102 | |||
103 | revs.extend(cl.revs(start=bcache.tiprev + 1)) |
|
103 | revs.extend(cl.revs(start=bcache.tiprev + 1)) | |
104 | if revs: |
|
104 | if revs: | |
105 | bcache.update(repo, revs) |
|
105 | bcache.update(repo, revs) | |
106 |
|
106 | |||
107 | assert bcache.validfor(repo), filtername |
|
107 | assert bcache.validfor(repo), filtername | |
108 | self._per_filter[repo.filtername] = bcache |
|
108 | self._per_filter[repo.filtername] = bcache | |
109 |
|
109 | |||
110 | def replace(self, repo, remotebranchmap): |
|
110 | def replace(self, repo, remotebranchmap): | |
111 | """Replace the branchmap cache for a repo with a branch mapping. |
|
111 | """Replace the branchmap cache for a repo with a branch mapping. | |
112 |
|
112 | |||
113 | This is likely only called during clone with a branch map from a |
|
113 | This is likely only called during clone with a branch map from a | |
114 | remote. |
|
114 | remote. | |
115 |
|
115 | |||
116 | """ |
|
116 | """ | |
117 | cl = repo.changelog |
|
117 | cl = repo.changelog | |
118 | clrev = cl.rev |
|
118 | clrev = cl.rev | |
119 | clbranchinfo = cl.branchinfo |
|
119 | clbranchinfo = cl.branchinfo | |
120 | rbheads = [] |
|
120 | rbheads = [] | |
121 | closed = set() |
|
121 | closed = set() | |
122 | for bheads in remotebranchmap.values(): |
|
122 | for bheads in remotebranchmap.values(): | |
123 | rbheads += bheads |
|
123 | rbheads += bheads | |
124 | for h in bheads: |
|
124 | for h in bheads: | |
125 | r = clrev(h) |
|
125 | r = clrev(h) | |
126 | b, c = clbranchinfo(r) |
|
126 | b, c = clbranchinfo(r) | |
127 | if c: |
|
127 | if c: | |
128 | closed.add(h) |
|
128 | closed.add(h) | |
129 |
|
129 | |||
130 | if rbheads: |
|
130 | if rbheads: | |
131 | rtiprev = max((int(clrev(node)) for node in rbheads)) |
|
131 | rtiprev = max((int(clrev(node)) for node in rbheads)) | |
132 | cache = branchcache( |
|
132 | cache = branchcache( | |
133 | repo, |
|
133 | repo, | |
134 | remotebranchmap, |
|
134 | remotebranchmap, | |
135 | repo[rtiprev].node(), |
|
135 | repo[rtiprev].node(), | |
136 | rtiprev, |
|
136 | rtiprev, | |
137 | closednodes=closed, |
|
137 | closednodes=closed, | |
138 | ) |
|
138 | ) | |
139 |
|
139 | |||
140 | # Try to stick it as low as possible |
|
140 | # Try to stick it as low as possible | |
141 | # filter above served are unlikely to be fetch from a clone |
|
141 | # filter above served are unlikely to be fetch from a clone | |
142 | for candidate in (b'base', b'immutable', b'served'): |
|
142 | for candidate in (b'base', b'immutable', b'served'): | |
143 | rview = repo.filtered(candidate) |
|
143 | rview = repo.filtered(candidate) | |
144 | if cache.validfor(rview): |
|
144 | if cache.validfor(rview): | |
145 | self._per_filter[candidate] = cache |
|
145 | self._per_filter[candidate] = cache | |
146 | cache.write(rview) |
|
146 | cache.write(rview) | |
147 | return |
|
147 | return | |
148 |
|
148 | |||
149 | def clear(self): |
|
149 | def clear(self): | |
150 | self._per_filter.clear() |
|
150 | self._per_filter.clear() | |
151 |
|
151 | |||
152 | def write_delayed(self, repo): |
|
152 | def write_delayed(self, repo): | |
153 | unfi = repo.unfiltered() |
|
153 | unfi = repo.unfiltered() | |
154 | for filtername, cache in self._per_filter.items(): |
|
154 | for filtername, cache in self._per_filter.items(): | |
155 | if cache._delayed: |
|
155 | if cache._delayed: | |
156 | repo = unfi.filtered(filtername) |
|
156 | repo = unfi.filtered(filtername) | |
157 | cache.write(repo) |
|
157 | cache.write(repo) | |
158 |
|
158 | |||
159 |
|
159 | |||
160 | def _unknownnode(node): |
|
160 | def _unknownnode(node): | |
161 | """raises ValueError when branchcache found a node which does not exists""" |
|
161 | """raises ValueError when branchcache found a node which does not exists""" | |
162 |
raise ValueError('node %s does not exist' % |
|
162 | raise ValueError('node %s does not exist' % node.hex()) | |
163 |
|
163 | |||
164 |
|
164 | |||
165 | def _branchcachedesc(repo): |
|
165 | def _branchcachedesc(repo): | |
166 | if repo.filtername is not None: |
|
166 | if repo.filtername is not None: | |
167 | return b'branch cache (%s)' % repo.filtername |
|
167 | return b'branch cache (%s)' % repo.filtername | |
168 | else: |
|
168 | else: | |
169 | return b'branch cache' |
|
169 | return b'branch cache' | |
170 |
|
170 | |||
171 |
|
171 | |||
172 | class branchcache: |
|
172 | class branchcache: | |
173 | """A dict like object that hold branches heads cache. |
|
173 | """A dict like object that hold branches heads cache. | |
174 |
|
174 | |||
175 | This cache is used to avoid costly computations to determine all the |
|
175 | This cache is used to avoid costly computations to determine all the | |
176 | branch heads of a repo. |
|
176 | branch heads of a repo. | |
177 |
|
177 | |||
178 | The cache is serialized on disk in the following format: |
|
178 | The cache is serialized on disk in the following format: | |
179 |
|
179 | |||
180 | <tip hex node> <tip rev number> [optional filtered repo hex hash] |
|
180 | <tip hex node> <tip rev number> [optional filtered repo hex hash] | |
181 | <branch head hex node> <open/closed state> <branch name> |
|
181 | <branch head hex node> <open/closed state> <branch name> | |
182 | <branch head hex node> <open/closed state> <branch name> |
|
182 | <branch head hex node> <open/closed state> <branch name> | |
183 | ... |
|
183 | ... | |
184 |
|
184 | |||
185 | The first line is used to check if the cache is still valid. If the |
|
185 | The first line is used to check if the cache is still valid. If the | |
186 | branch cache is for a filtered repo view, an optional third hash is |
|
186 | branch cache is for a filtered repo view, an optional third hash is | |
187 | included that hashes the hashes of all filtered and obsolete revisions. |
|
187 | included that hashes the hashes of all filtered and obsolete revisions. | |
188 |
|
188 | |||
189 | The open/closed state is represented by a single letter 'o' or 'c'. |
|
189 | The open/closed state is represented by a single letter 'o' or 'c'. | |
190 | This field can be used to avoid changelog reads when determining if a |
|
190 | This field can be used to avoid changelog reads when determining if a | |
191 | branch head closes a branch or not. |
|
191 | branch head closes a branch or not. | |
192 | """ |
|
192 | """ | |
193 |
|
193 | |||
194 | def __init__( |
|
194 | def __init__( | |
195 | self, |
|
195 | self, | |
196 | repo, |
|
196 | repo, | |
197 | entries=(), |
|
197 | entries=(), | |
198 | tipnode=None, |
|
198 | tipnode=None, | |
199 | tiprev=nullrev, |
|
199 | tiprev=nullrev, | |
200 | filteredhash=None, |
|
200 | filteredhash=None, | |
201 | closednodes=None, |
|
201 | closednodes=None, | |
202 | hasnode=None, |
|
202 | hasnode=None, | |
203 | ): |
|
203 | ): | |
204 | # type: (localrepo.localrepository, Union[Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]], bytes, int, Optional[bytes], Optional[Set[bytes]], Optional[Callable[[bytes], bool]]) -> None |
|
204 | # type: (localrepo.localrepository, Union[Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]], bytes, int, Optional[bytes], Optional[Set[bytes]], Optional[Callable[[bytes], bool]]) -> None | |
205 | """hasnode is a function which can be used to verify whether changelog |
|
205 | """hasnode is a function which can be used to verify whether changelog | |
206 | has a given node or not. If it's not provided, we assume that every node |
|
206 | has a given node or not. If it's not provided, we assume that every node | |
207 | we have exists in changelog""" |
|
207 | we have exists in changelog""" | |
208 | self._repo = repo |
|
208 | self._repo = repo | |
209 | self._delayed = False |
|
209 | self._delayed = False | |
210 | if tipnode is None: |
|
210 | if tipnode is None: | |
211 | self.tipnode = repo.nullid |
|
211 | self.tipnode = repo.nullid | |
212 | else: |
|
212 | else: | |
213 | self.tipnode = tipnode |
|
213 | self.tipnode = tipnode | |
214 | self.tiprev = tiprev |
|
214 | self.tiprev = tiprev | |
215 | self.filteredhash = filteredhash |
|
215 | self.filteredhash = filteredhash | |
216 | # closednodes is a set of nodes that close their branch. If the branch |
|
216 | # closednodes is a set of nodes that close their branch. If the branch | |
217 | # cache has been updated, it may contain nodes that are no longer |
|
217 | # cache has been updated, it may contain nodes that are no longer | |
218 | # heads. |
|
218 | # heads. | |
219 | if closednodes is None: |
|
219 | if closednodes is None: | |
220 | self._closednodes = set() |
|
220 | self._closednodes = set() | |
221 | else: |
|
221 | else: | |
222 | self._closednodes = closednodes |
|
222 | self._closednodes = closednodes | |
223 | self._entries = dict(entries) |
|
223 | self._entries = dict(entries) | |
224 | # whether closed nodes are verified or not |
|
224 | # whether closed nodes are verified or not | |
225 | self._closedverified = False |
|
225 | self._closedverified = False | |
226 | # branches for which nodes are verified |
|
226 | # branches for which nodes are verified | |
227 | self._verifiedbranches = set() |
|
227 | self._verifiedbranches = set() | |
228 | self._hasnode = hasnode |
|
228 | self._hasnode = hasnode | |
229 | if self._hasnode is None: |
|
229 | if self._hasnode is None: | |
230 | self._hasnode = lambda x: True |
|
230 | self._hasnode = lambda x: True | |
231 |
|
231 | |||
232 | def _verifyclosed(self): |
|
232 | def _verifyclosed(self): | |
233 | """verify the closed nodes we have""" |
|
233 | """verify the closed nodes we have""" | |
234 | if self._closedverified: |
|
234 | if self._closedverified: | |
235 | return |
|
235 | return | |
236 | for node in self._closednodes: |
|
236 | for node in self._closednodes: | |
237 | if not self._hasnode(node): |
|
237 | if not self._hasnode(node): | |
238 | _unknownnode(node) |
|
238 | _unknownnode(node) | |
239 |
|
239 | |||
240 | self._closedverified = True |
|
240 | self._closedverified = True | |
241 |
|
241 | |||
242 | def _verifybranch(self, branch): |
|
242 | def _verifybranch(self, branch): | |
243 | """verify head nodes for the given branch.""" |
|
243 | """verify head nodes for the given branch.""" | |
244 | if branch not in self._entries or branch in self._verifiedbranches: |
|
244 | if branch not in self._entries or branch in self._verifiedbranches: | |
245 | return |
|
245 | return | |
246 | for n in self._entries[branch]: |
|
246 | for n in self._entries[branch]: | |
247 | if not self._hasnode(n): |
|
247 | if not self._hasnode(n): | |
248 | _unknownnode(n) |
|
248 | _unknownnode(n) | |
249 |
|
249 | |||
250 | self._verifiedbranches.add(branch) |
|
250 | self._verifiedbranches.add(branch) | |
251 |
|
251 | |||
252 | def _verifyall(self): |
|
252 | def _verifyall(self): | |
253 | """verifies nodes of all the branches""" |
|
253 | """verifies nodes of all the branches""" | |
254 | needverification = set(self._entries.keys()) - self._verifiedbranches |
|
254 | needverification = set(self._entries.keys()) - self._verifiedbranches | |
255 | for b in needverification: |
|
255 | for b in needverification: | |
256 | self._verifybranch(b) |
|
256 | self._verifybranch(b) | |
257 |
|
257 | |||
258 | def __iter__(self): |
|
258 | def __iter__(self): | |
259 | return iter(self._entries) |
|
259 | return iter(self._entries) | |
260 |
|
260 | |||
261 | def __setitem__(self, key, value): |
|
261 | def __setitem__(self, key, value): | |
262 | self._entries[key] = value |
|
262 | self._entries[key] = value | |
263 |
|
263 | |||
264 | def __getitem__(self, key): |
|
264 | def __getitem__(self, key): | |
265 | self._verifybranch(key) |
|
265 | self._verifybranch(key) | |
266 | return self._entries[key] |
|
266 | return self._entries[key] | |
267 |
|
267 | |||
268 | def __contains__(self, key): |
|
268 | def __contains__(self, key): | |
269 | self._verifybranch(key) |
|
269 | self._verifybranch(key) | |
270 | return key in self._entries |
|
270 | return key in self._entries | |
271 |
|
271 | |||
272 | def iteritems(self): |
|
272 | def iteritems(self): | |
273 | for k, v in self._entries.items(): |
|
273 | for k, v in self._entries.items(): | |
274 | self._verifybranch(k) |
|
274 | self._verifybranch(k) | |
275 | yield k, v |
|
275 | yield k, v | |
276 |
|
276 | |||
277 | items = iteritems |
|
277 | items = iteritems | |
278 |
|
278 | |||
279 | def hasbranch(self, label): |
|
279 | def hasbranch(self, label): | |
280 | """checks whether a branch of this name exists or not""" |
|
280 | """checks whether a branch of this name exists or not""" | |
281 | self._verifybranch(label) |
|
281 | self._verifybranch(label) | |
282 | return label in self._entries |
|
282 | return label in self._entries | |
283 |
|
283 | |||
284 | @classmethod |
|
284 | @classmethod | |
285 | def fromfile(cls, repo): |
|
285 | def fromfile(cls, repo): | |
286 | f = None |
|
286 | f = None | |
287 | try: |
|
287 | try: | |
288 | f = repo.cachevfs(cls._filename(repo)) |
|
288 | f = repo.cachevfs(cls._filename(repo)) | |
289 | lineiter = iter(f) |
|
289 | lineiter = iter(f) | |
290 | cachekey = next(lineiter).rstrip(b'\n').split(b" ", 2) |
|
290 | cachekey = next(lineiter).rstrip(b'\n').split(b" ", 2) | |
291 | last, lrev = cachekey[:2] |
|
291 | last, lrev = cachekey[:2] | |
292 | last, lrev = bin(last), int(lrev) |
|
292 | last, lrev = bin(last), int(lrev) | |
293 | filteredhash = None |
|
293 | filteredhash = None | |
294 | hasnode = repo.changelog.hasnode |
|
294 | hasnode = repo.changelog.hasnode | |
295 | if len(cachekey) > 2: |
|
295 | if len(cachekey) > 2: | |
296 | filteredhash = bin(cachekey[2]) |
|
296 | filteredhash = bin(cachekey[2]) | |
297 | bcache = cls( |
|
297 | bcache = cls( | |
298 | repo, |
|
298 | repo, | |
299 | tipnode=last, |
|
299 | tipnode=last, | |
300 | tiprev=lrev, |
|
300 | tiprev=lrev, | |
301 | filteredhash=filteredhash, |
|
301 | filteredhash=filteredhash, | |
302 | hasnode=hasnode, |
|
302 | hasnode=hasnode, | |
303 | ) |
|
303 | ) | |
304 | if not bcache.validfor(repo): |
|
304 | if not bcache.validfor(repo): | |
305 | # invalidate the cache |
|
305 | # invalidate the cache | |
306 | raise ValueError('tip differs') |
|
306 | raise ValueError('tip differs') | |
307 | bcache.load(repo, lineiter) |
|
307 | bcache.load(repo, lineiter) | |
308 | except (IOError, OSError): |
|
308 | except (IOError, OSError): | |
309 | return None |
|
309 | return None | |
310 |
|
310 | |||
311 | except Exception as inst: |
|
311 | except Exception as inst: | |
312 | if repo.ui.debugflag: |
|
312 | if repo.ui.debugflag: | |
313 | msg = b'invalid %s: %s\n' |
|
313 | msg = b'invalid %s: %s\n' | |
314 | repo.ui.debug( |
|
314 | repo.ui.debug( | |
315 | msg |
|
315 | msg | |
316 | % ( |
|
316 | % ( | |
317 | _branchcachedesc(repo), |
|
317 | _branchcachedesc(repo), | |
318 | stringutil.forcebytestr(inst), |
|
318 | stringutil.forcebytestr(inst), | |
319 | ) |
|
319 | ) | |
320 | ) |
|
320 | ) | |
321 | bcache = None |
|
321 | bcache = None | |
322 |
|
322 | |||
323 | finally: |
|
323 | finally: | |
324 | if f: |
|
324 | if f: | |
325 | f.close() |
|
325 | f.close() | |
326 |
|
326 | |||
327 | return bcache |
|
327 | return bcache | |
328 |
|
328 | |||
329 | def load(self, repo, lineiter): |
|
329 | def load(self, repo, lineiter): | |
330 | """fully loads the branchcache by reading from the file using the line |
|
330 | """fully loads the branchcache by reading from the file using the line | |
331 | iterator passed""" |
|
331 | iterator passed""" | |
332 | for line in lineiter: |
|
332 | for line in lineiter: | |
333 | line = line.rstrip(b'\n') |
|
333 | line = line.rstrip(b'\n') | |
334 | if not line: |
|
334 | if not line: | |
335 | continue |
|
335 | continue | |
336 | node, state, label = line.split(b" ", 2) |
|
336 | node, state, label = line.split(b" ", 2) | |
337 | if state not in b'oc': |
|
337 | if state not in b'oc': | |
338 | raise ValueError('invalid branch state') |
|
338 | raise ValueError('invalid branch state') | |
339 | label = encoding.tolocal(label.strip()) |
|
339 | label = encoding.tolocal(label.strip()) | |
340 | node = bin(node) |
|
340 | node = bin(node) | |
341 | self._entries.setdefault(label, []).append(node) |
|
341 | self._entries.setdefault(label, []).append(node) | |
342 | if state == b'c': |
|
342 | if state == b'c': | |
343 | self._closednodes.add(node) |
|
343 | self._closednodes.add(node) | |
344 |
|
344 | |||
345 | @staticmethod |
|
345 | @staticmethod | |
346 | def _filename(repo): |
|
346 | def _filename(repo): | |
347 | """name of a branchcache file for a given repo or repoview""" |
|
347 | """name of a branchcache file for a given repo or repoview""" | |
348 | filename = b"branch2" |
|
348 | filename = b"branch2" | |
349 | if repo.filtername: |
|
349 | if repo.filtername: | |
350 | filename = b'%s-%s' % (filename, repo.filtername) |
|
350 | filename = b'%s-%s' % (filename, repo.filtername) | |
351 | return filename |
|
351 | return filename | |
352 |
|
352 | |||
353 | def validfor(self, repo): |
|
353 | def validfor(self, repo): | |
354 | """check that cache contents are valid for (a subset of) this repo |
|
354 | """check that cache contents are valid for (a subset of) this repo | |
355 |
|
355 | |||
356 | - False when the order of changesets changed or if we detect a strip. |
|
356 | - False when the order of changesets changed or if we detect a strip. | |
357 | - True when cache is up-to-date for the current repo or its subset.""" |
|
357 | - True when cache is up-to-date for the current repo or its subset.""" | |
358 | try: |
|
358 | try: | |
359 | node = repo.changelog.node(self.tiprev) |
|
359 | node = repo.changelog.node(self.tiprev) | |
360 | except IndexError: |
|
360 | except IndexError: | |
361 | # changesets were stripped and now we don't even have enough to |
|
361 | # changesets were stripped and now we don't even have enough to | |
362 | # find tiprev |
|
362 | # find tiprev | |
363 | return False |
|
363 | return False | |
364 | if self.tipnode != node: |
|
364 | if self.tipnode != node: | |
365 | # tiprev doesn't correspond to tipnode: repo was stripped, or this |
|
365 | # tiprev doesn't correspond to tipnode: repo was stripped, or this | |
366 | # repo has a different order of changesets |
|
366 | # repo has a different order of changesets | |
367 | return False |
|
367 | return False | |
368 | tiphash = scmutil.filteredhash(repo, self.tiprev, needobsolete=True) |
|
368 | tiphash = scmutil.filteredhash(repo, self.tiprev, needobsolete=True) | |
369 | # hashes don't match if this repo view has a different set of filtered |
|
369 | # hashes don't match if this repo view has a different set of filtered | |
370 | # revisions (e.g. due to phase changes) or obsolete revisions (e.g. |
|
370 | # revisions (e.g. due to phase changes) or obsolete revisions (e.g. | |
371 | # history was rewritten) |
|
371 | # history was rewritten) | |
372 | return self.filteredhash == tiphash |
|
372 | return self.filteredhash == tiphash | |
373 |
|
373 | |||
374 | def _branchtip(self, heads): |
|
374 | def _branchtip(self, heads): | |
375 | """Return tuple with last open head in heads and false, |
|
375 | """Return tuple with last open head in heads and false, | |
376 | otherwise return last closed head and true.""" |
|
376 | otherwise return last closed head and true.""" | |
377 | tip = heads[-1] |
|
377 | tip = heads[-1] | |
378 | closed = True |
|
378 | closed = True | |
379 | for h in reversed(heads): |
|
379 | for h in reversed(heads): | |
380 | if h not in self._closednodes: |
|
380 | if h not in self._closednodes: | |
381 | tip = h |
|
381 | tip = h | |
382 | closed = False |
|
382 | closed = False | |
383 | break |
|
383 | break | |
384 | return tip, closed |
|
384 | return tip, closed | |
385 |
|
385 | |||
386 | def branchtip(self, branch): |
|
386 | def branchtip(self, branch): | |
387 | """Return the tipmost open head on branch head, otherwise return the |
|
387 | """Return the tipmost open head on branch head, otherwise return the | |
388 | tipmost closed head on branch. |
|
388 | tipmost closed head on branch. | |
389 | Raise KeyError for unknown branch.""" |
|
389 | Raise KeyError for unknown branch.""" | |
390 | return self._branchtip(self[branch])[0] |
|
390 | return self._branchtip(self[branch])[0] | |
391 |
|
391 | |||
392 | def iteropen(self, nodes): |
|
392 | def iteropen(self, nodes): | |
393 | return (n for n in nodes if n not in self._closednodes) |
|
393 | return (n for n in nodes if n not in self._closednodes) | |
394 |
|
394 | |||
395 | def branchheads(self, branch, closed=False): |
|
395 | def branchheads(self, branch, closed=False): | |
396 | self._verifybranch(branch) |
|
396 | self._verifybranch(branch) | |
397 | heads = self._entries[branch] |
|
397 | heads = self._entries[branch] | |
398 | if not closed: |
|
398 | if not closed: | |
399 | heads = list(self.iteropen(heads)) |
|
399 | heads = list(self.iteropen(heads)) | |
400 | return heads |
|
400 | return heads | |
401 |
|
401 | |||
402 | def iterbranches(self): |
|
402 | def iterbranches(self): | |
403 | for bn, heads in self.items(): |
|
403 | for bn, heads in self.items(): | |
404 | yield (bn, heads) + self._branchtip(heads) |
|
404 | yield (bn, heads) + self._branchtip(heads) | |
405 |
|
405 | |||
406 | def iterheads(self): |
|
406 | def iterheads(self): | |
407 | """returns all the heads""" |
|
407 | """returns all the heads""" | |
408 | self._verifyall() |
|
408 | self._verifyall() | |
409 | return self._entries.values() |
|
409 | return self._entries.values() | |
410 |
|
410 | |||
411 | def copy(self): |
|
411 | def copy(self): | |
412 | """return an deep copy of the branchcache object""" |
|
412 | """return an deep copy of the branchcache object""" | |
413 | return type(self)( |
|
413 | return type(self)( | |
414 | self._repo, |
|
414 | self._repo, | |
415 | self._entries, |
|
415 | self._entries, | |
416 | self.tipnode, |
|
416 | self.tipnode, | |
417 | self.tiprev, |
|
417 | self.tiprev, | |
418 | self.filteredhash, |
|
418 | self.filteredhash, | |
419 | self._closednodes, |
|
419 | self._closednodes, | |
420 | ) |
|
420 | ) | |
421 |
|
421 | |||
422 | def write(self, repo): |
|
422 | def write(self, repo): | |
423 | tr = repo.currenttransaction() |
|
423 | tr = repo.currenttransaction() | |
424 | if not getattr(tr, 'finalized', True): |
|
424 | if not getattr(tr, 'finalized', True): | |
425 | # Avoid premature writing. |
|
425 | # Avoid premature writing. | |
426 | # |
|
426 | # | |
427 | # (The cache warming setup by localrepo will update the file later.) |
|
427 | # (The cache warming setup by localrepo will update the file later.) | |
428 | self._delayed = True |
|
428 | self._delayed = True | |
429 | return |
|
429 | return | |
430 | try: |
|
430 | try: | |
431 | filename = self._filename(repo) |
|
431 | filename = self._filename(repo) | |
432 | with repo.cachevfs(filename, b"w", atomictemp=True) as f: |
|
432 | with repo.cachevfs(filename, b"w", atomictemp=True) as f: | |
433 | cachekey = [hex(self.tipnode), b'%d' % self.tiprev] |
|
433 | cachekey = [hex(self.tipnode), b'%d' % self.tiprev] | |
434 | if self.filteredhash is not None: |
|
434 | if self.filteredhash is not None: | |
435 | cachekey.append(hex(self.filteredhash)) |
|
435 | cachekey.append(hex(self.filteredhash)) | |
436 | f.write(b" ".join(cachekey) + b'\n') |
|
436 | f.write(b" ".join(cachekey) + b'\n') | |
437 | nodecount = 0 |
|
437 | nodecount = 0 | |
438 | for label, nodes in sorted(self._entries.items()): |
|
438 | for label, nodes in sorted(self._entries.items()): | |
439 | label = encoding.fromlocal(label) |
|
439 | label = encoding.fromlocal(label) | |
440 | for node in nodes: |
|
440 | for node in nodes: | |
441 | nodecount += 1 |
|
441 | nodecount += 1 | |
442 | if node in self._closednodes: |
|
442 | if node in self._closednodes: | |
443 | state = b'c' |
|
443 | state = b'c' | |
444 | else: |
|
444 | else: | |
445 | state = b'o' |
|
445 | state = b'o' | |
446 | f.write(b"%s %s %s\n" % (hex(node), state, label)) |
|
446 | f.write(b"%s %s %s\n" % (hex(node), state, label)) | |
447 | repo.ui.log( |
|
447 | repo.ui.log( | |
448 | b'branchcache', |
|
448 | b'branchcache', | |
449 | b'wrote %s with %d labels and %d nodes\n', |
|
449 | b'wrote %s with %d labels and %d nodes\n', | |
450 | _branchcachedesc(repo), |
|
450 | _branchcachedesc(repo), | |
451 | len(self._entries), |
|
451 | len(self._entries), | |
452 | nodecount, |
|
452 | nodecount, | |
453 | ) |
|
453 | ) | |
454 | self._delayed = False |
|
454 | self._delayed = False | |
455 | except (IOError, OSError, error.Abort) as inst: |
|
455 | except (IOError, OSError, error.Abort) as inst: | |
456 | # Abort may be raised by read only opener, so log and continue |
|
456 | # Abort may be raised by read only opener, so log and continue | |
457 | repo.ui.debug( |
|
457 | repo.ui.debug( | |
458 | b"couldn't write branch cache: %s\n" |
|
458 | b"couldn't write branch cache: %s\n" | |
459 | % stringutil.forcebytestr(inst) |
|
459 | % stringutil.forcebytestr(inst) | |
460 | ) |
|
460 | ) | |
461 |
|
461 | |||
462 | def update(self, repo, revgen): |
|
462 | def update(self, repo, revgen): | |
463 | """Given a branchhead cache, self, that may have extra nodes or be |
|
463 | """Given a branchhead cache, self, that may have extra nodes or be | |
464 | missing heads, and a generator of nodes that are strictly a superset of |
|
464 | missing heads, and a generator of nodes that are strictly a superset of | |
465 | heads missing, this function updates self to be correct. |
|
465 | heads missing, this function updates self to be correct. | |
466 | """ |
|
466 | """ | |
467 | starttime = util.timer() |
|
467 | starttime = util.timer() | |
468 | cl = repo.changelog |
|
468 | cl = repo.changelog | |
469 | # collect new branch entries |
|
469 | # collect new branch entries | |
470 | newbranches = {} |
|
470 | newbranches = {} | |
471 | getbranchinfo = repo.revbranchcache().branchinfo |
|
471 | getbranchinfo = repo.revbranchcache().branchinfo | |
472 | for r in revgen: |
|
472 | for r in revgen: | |
473 | branch, closesbranch = getbranchinfo(r) |
|
473 | branch, closesbranch = getbranchinfo(r) | |
474 | newbranches.setdefault(branch, []).append(r) |
|
474 | newbranches.setdefault(branch, []).append(r) | |
475 | if closesbranch: |
|
475 | if closesbranch: | |
476 | self._closednodes.add(cl.node(r)) |
|
476 | self._closednodes.add(cl.node(r)) | |
477 |
|
477 | |||
478 | # new tip revision which we found after iterating items from new |
|
478 | # new tip revision which we found after iterating items from new | |
479 | # branches |
|
479 | # branches | |
480 | ntiprev = self.tiprev |
|
480 | ntiprev = self.tiprev | |
481 |
|
481 | |||
482 | # Delay fetching the topological heads until they are needed. |
|
482 | # Delay fetching the topological heads until they are needed. | |
483 | # A repository without non-continous branches can skip this part. |
|
483 | # A repository without non-continous branches can skip this part. | |
484 | topoheads = None |
|
484 | topoheads = None | |
485 |
|
485 | |||
486 | # If a changeset is visible, its parents must be visible too, so |
|
486 | # If a changeset is visible, its parents must be visible too, so | |
487 | # use the faster unfiltered parent accessor. |
|
487 | # use the faster unfiltered parent accessor. | |
488 | parentrevs = repo.unfiltered().changelog.parentrevs |
|
488 | parentrevs = repo.unfiltered().changelog.parentrevs | |
489 |
|
489 | |||
490 | # Faster than using ctx.obsolete() |
|
490 | # Faster than using ctx.obsolete() | |
491 | obsrevs = obsolete.getrevs(repo, b'obsolete') |
|
491 | obsrevs = obsolete.getrevs(repo, b'obsolete') | |
492 |
|
492 | |||
493 | for branch, newheadrevs in newbranches.items(): |
|
493 | for branch, newheadrevs in newbranches.items(): | |
494 | # For every branch, compute the new branchheads. |
|
494 | # For every branch, compute the new branchheads. | |
495 | # A branchhead is a revision such that no descendant is on |
|
495 | # A branchhead is a revision such that no descendant is on | |
496 | # the same branch. |
|
496 | # the same branch. | |
497 | # |
|
497 | # | |
498 | # The branchheads are computed iteratively in revision order. |
|
498 | # The branchheads are computed iteratively in revision order. | |
499 | # This ensures topological order, i.e. parents are processed |
|
499 | # This ensures topological order, i.e. parents are processed | |
500 | # before their children. Ancestors are inclusive here, i.e. |
|
500 | # before their children. Ancestors are inclusive here, i.e. | |
501 | # any revision is an ancestor of itself. |
|
501 | # any revision is an ancestor of itself. | |
502 | # |
|
502 | # | |
503 | # Core observations: |
|
503 | # Core observations: | |
504 | # - The current revision is always a branchhead for the |
|
504 | # - The current revision is always a branchhead for the | |
505 | # repository up to that point. |
|
505 | # repository up to that point. | |
506 | # - It is the first revision of the branch if and only if |
|
506 | # - It is the first revision of the branch if and only if | |
507 | # there was no branchhead before. In that case, it is the |
|
507 | # there was no branchhead before. In that case, it is the | |
508 | # only branchhead as there are no possible ancestors on |
|
508 | # only branchhead as there are no possible ancestors on | |
509 | # the same branch. |
|
509 | # the same branch. | |
510 | # - If a parent is on the same branch, a branchhead can |
|
510 | # - If a parent is on the same branch, a branchhead can | |
511 | # only be an ancestor of that parent, if it is parent |
|
511 | # only be an ancestor of that parent, if it is parent | |
512 | # itself. Otherwise it would have been removed as ancestor |
|
512 | # itself. Otherwise it would have been removed as ancestor | |
513 | # of that parent before. |
|
513 | # of that parent before. | |
514 | # - Therefore, if all parents are on the same branch, they |
|
514 | # - Therefore, if all parents are on the same branch, they | |
515 | # can just be removed from the branchhead set. |
|
515 | # can just be removed from the branchhead set. | |
516 | # - If one parent is on the same branch and the other is not |
|
516 | # - If one parent is on the same branch and the other is not | |
517 | # and there was exactly one branchhead known, the existing |
|
517 | # and there was exactly one branchhead known, the existing | |
518 | # branchhead can only be an ancestor if it is the parent. |
|
518 | # branchhead can only be an ancestor if it is the parent. | |
519 | # Otherwise it would have been removed as ancestor of |
|
519 | # Otherwise it would have been removed as ancestor of | |
520 | # the parent before. The other parent therefore can't have |
|
520 | # the parent before. The other parent therefore can't have | |
521 | # a branchhead as ancestor. |
|
521 | # a branchhead as ancestor. | |
522 | # - In all other cases, the parents on different branches |
|
522 | # - In all other cases, the parents on different branches | |
523 | # could have a branchhead as ancestor. Those parents are |
|
523 | # could have a branchhead as ancestor. Those parents are | |
524 | # kept in the "uncertain" set. If all branchheads are also |
|
524 | # kept in the "uncertain" set. If all branchheads are also | |
525 | # topological heads, they can't have descendants and further |
|
525 | # topological heads, they can't have descendants and further | |
526 | # checks can be skipped. Otherwise, the ancestors of the |
|
526 | # checks can be skipped. Otherwise, the ancestors of the | |
527 | # "uncertain" set are removed from branchheads. |
|
527 | # "uncertain" set are removed from branchheads. | |
528 | # This computation is heavy and avoided if at all possible. |
|
528 | # This computation is heavy and avoided if at all possible. | |
529 | bheads = self._entries.get(branch, []) |
|
529 | bheads = self._entries.get(branch, []) | |
530 | bheadset = {cl.rev(node) for node in bheads} |
|
530 | bheadset = {cl.rev(node) for node in bheads} | |
531 | uncertain = set() |
|
531 | uncertain = set() | |
532 | for newrev in sorted(newheadrevs): |
|
532 | for newrev in sorted(newheadrevs): | |
533 | if newrev in obsrevs: |
|
533 | if newrev in obsrevs: | |
534 | # We ignore obsolete changesets as they shouldn't be |
|
534 | # We ignore obsolete changesets as they shouldn't be | |
535 | # considered heads. |
|
535 | # considered heads. | |
536 | continue |
|
536 | continue | |
537 |
|
537 | |||
538 | if not bheadset: |
|
538 | if not bheadset: | |
539 | bheadset.add(newrev) |
|
539 | bheadset.add(newrev) | |
540 | continue |
|
540 | continue | |
541 |
|
541 | |||
542 | parents = [p for p in parentrevs(newrev) if p != nullrev] |
|
542 | parents = [p for p in parentrevs(newrev) if p != nullrev] | |
543 | samebranch = set() |
|
543 | samebranch = set() | |
544 | otherbranch = set() |
|
544 | otherbranch = set() | |
545 | obsparents = set() |
|
545 | obsparents = set() | |
546 | for p in parents: |
|
546 | for p in parents: | |
547 | if p in obsrevs: |
|
547 | if p in obsrevs: | |
548 | # We ignored this obsolete changeset earlier, but now |
|
548 | # We ignored this obsolete changeset earlier, but now | |
549 | # that it has non-ignored children, we need to make |
|
549 | # that it has non-ignored children, we need to make | |
550 | # sure their ancestors are not considered heads. To |
|
550 | # sure their ancestors are not considered heads. To | |
551 | # achieve that, we will simply treat this obsolete |
|
551 | # achieve that, we will simply treat this obsolete | |
552 | # changeset as a parent from other branch. |
|
552 | # changeset as a parent from other branch. | |
553 | obsparents.add(p) |
|
553 | obsparents.add(p) | |
554 | elif p in bheadset or getbranchinfo(p)[0] == branch: |
|
554 | elif p in bheadset or getbranchinfo(p)[0] == branch: | |
555 | samebranch.add(p) |
|
555 | samebranch.add(p) | |
556 | else: |
|
556 | else: | |
557 | otherbranch.add(p) |
|
557 | otherbranch.add(p) | |
558 | if not (len(bheadset) == len(samebranch) == 1): |
|
558 | if not (len(bheadset) == len(samebranch) == 1): | |
559 | uncertain.update(otherbranch) |
|
559 | uncertain.update(otherbranch) | |
560 | uncertain.update(obsparents) |
|
560 | uncertain.update(obsparents) | |
561 | bheadset.difference_update(samebranch) |
|
561 | bheadset.difference_update(samebranch) | |
562 | bheadset.add(newrev) |
|
562 | bheadset.add(newrev) | |
563 |
|
563 | |||
564 | if uncertain: |
|
564 | if uncertain: | |
565 | if topoheads is None: |
|
565 | if topoheads is None: | |
566 | topoheads = set(cl.headrevs()) |
|
566 | topoheads = set(cl.headrevs()) | |
567 | if bheadset - topoheads: |
|
567 | if bheadset - topoheads: | |
568 | floorrev = min(bheadset) |
|
568 | floorrev = min(bheadset) | |
569 | if floorrev <= max(uncertain): |
|
569 | if floorrev <= max(uncertain): | |
570 | ancestors = set(cl.ancestors(uncertain, floorrev)) |
|
570 | ancestors = set(cl.ancestors(uncertain, floorrev)) | |
571 | bheadset -= ancestors |
|
571 | bheadset -= ancestors | |
572 | if bheadset: |
|
572 | if bheadset: | |
573 | self[branch] = [cl.node(rev) for rev in sorted(bheadset)] |
|
573 | self[branch] = [cl.node(rev) for rev in sorted(bheadset)] | |
574 | tiprev = max(newheadrevs) |
|
574 | tiprev = max(newheadrevs) | |
575 | if tiprev > ntiprev: |
|
575 | if tiprev > ntiprev: | |
576 | ntiprev = tiprev |
|
576 | ntiprev = tiprev | |
577 |
|
577 | |||
578 | if ntiprev > self.tiprev: |
|
578 | if ntiprev > self.tiprev: | |
579 | self.tiprev = ntiprev |
|
579 | self.tiprev = ntiprev | |
580 | self.tipnode = cl.node(ntiprev) |
|
580 | self.tipnode = cl.node(ntiprev) | |
581 |
|
581 | |||
582 | if not self.validfor(repo): |
|
582 | if not self.validfor(repo): | |
583 | # old cache key is now invalid for the repo, but we've just updated |
|
583 | # old cache key is now invalid for the repo, but we've just updated | |
584 | # the cache and we assume it's valid, so let's make the cache key |
|
584 | # the cache and we assume it's valid, so let's make the cache key | |
585 | # valid as well by recomputing it from the cached data |
|
585 | # valid as well by recomputing it from the cached data | |
586 | self.tipnode = repo.nullid |
|
586 | self.tipnode = repo.nullid | |
587 | self.tiprev = nullrev |
|
587 | self.tiprev = nullrev | |
588 | for heads in self.iterheads(): |
|
588 | for heads in self.iterheads(): | |
589 | if not heads: |
|
589 | if not heads: | |
590 | # all revisions on a branch are obsolete |
|
590 | # all revisions on a branch are obsolete | |
591 | continue |
|
591 | continue | |
592 | # note: tiprev is not necessarily the tip revision of repo, |
|
592 | # note: tiprev is not necessarily the tip revision of repo, | |
593 | # because the tip could be obsolete (i.e. not a head) |
|
593 | # because the tip could be obsolete (i.e. not a head) | |
594 | tiprev = max(cl.rev(node) for node in heads) |
|
594 | tiprev = max(cl.rev(node) for node in heads) | |
595 | if tiprev > self.tiprev: |
|
595 | if tiprev > self.tiprev: | |
596 | self.tipnode = cl.node(tiprev) |
|
596 | self.tipnode = cl.node(tiprev) | |
597 | self.tiprev = tiprev |
|
597 | self.tiprev = tiprev | |
598 | self.filteredhash = scmutil.filteredhash( |
|
598 | self.filteredhash = scmutil.filteredhash( | |
599 | repo, self.tiprev, needobsolete=True |
|
599 | repo, self.tiprev, needobsolete=True | |
600 | ) |
|
600 | ) | |
601 |
|
601 | |||
602 | duration = util.timer() - starttime |
|
602 | duration = util.timer() - starttime | |
603 | repo.ui.log( |
|
603 | repo.ui.log( | |
604 | b'branchcache', |
|
604 | b'branchcache', | |
605 | b'updated %s in %.4f seconds\n', |
|
605 | b'updated %s in %.4f seconds\n', | |
606 | _branchcachedesc(repo), |
|
606 | _branchcachedesc(repo), | |
607 | duration, |
|
607 | duration, | |
608 | ) |
|
608 | ) | |
609 |
|
609 | |||
610 | self.write(repo) |
|
610 | self.write(repo) | |
611 |
|
611 | |||
612 |
|
612 | |||
613 | class remotebranchcache(branchcache): |
|
613 | class remotebranchcache(branchcache): | |
614 | """Branchmap info for a remote connection, should not write locally""" |
|
614 | """Branchmap info for a remote connection, should not write locally""" | |
615 |
|
615 | |||
616 | def write(self, repo): |
|
616 | def write(self, repo): | |
617 | pass |
|
617 | pass | |
618 |
|
618 | |||
619 |
|
619 | |||
620 | # Revision branch info cache |
|
620 | # Revision branch info cache | |
621 |
|
621 | |||
622 | _rbcversion = b'-v1' |
|
622 | _rbcversion = b'-v1' | |
623 | _rbcnames = b'rbc-names' + _rbcversion |
|
623 | _rbcnames = b'rbc-names' + _rbcversion | |
624 | _rbcrevs = b'rbc-revs' + _rbcversion |
|
624 | _rbcrevs = b'rbc-revs' + _rbcversion | |
625 | # [4 byte hash prefix][4 byte branch name number with sign bit indicating open] |
|
625 | # [4 byte hash prefix][4 byte branch name number with sign bit indicating open] | |
626 | _rbcrecfmt = b'>4sI' |
|
626 | _rbcrecfmt = b'>4sI' | |
627 | _rbcrecsize = calcsize(_rbcrecfmt) |
|
627 | _rbcrecsize = calcsize(_rbcrecfmt) | |
628 | _rbcmininc = 64 * _rbcrecsize |
|
628 | _rbcmininc = 64 * _rbcrecsize | |
629 | _rbcnodelen = 4 |
|
629 | _rbcnodelen = 4 | |
630 | _rbcbranchidxmask = 0x7FFFFFFF |
|
630 | _rbcbranchidxmask = 0x7FFFFFFF | |
631 | _rbccloseflag = 0x80000000 |
|
631 | _rbccloseflag = 0x80000000 | |
632 |
|
632 | |||
633 |
|
633 | |||
634 | class revbranchcache: |
|
634 | class revbranchcache: | |
635 | """Persistent cache, mapping from revision number to branch name and close. |
|
635 | """Persistent cache, mapping from revision number to branch name and close. | |
636 | This is a low level cache, independent of filtering. |
|
636 | This is a low level cache, independent of filtering. | |
637 |
|
637 | |||
638 | Branch names are stored in rbc-names in internal encoding separated by 0. |
|
638 | Branch names are stored in rbc-names in internal encoding separated by 0. | |
639 | rbc-names is append-only, and each branch name is only stored once and will |
|
639 | rbc-names is append-only, and each branch name is only stored once and will | |
640 | thus have a unique index. |
|
640 | thus have a unique index. | |
641 |
|
641 | |||
642 | The branch info for each revision is stored in rbc-revs as constant size |
|
642 | The branch info for each revision is stored in rbc-revs as constant size | |
643 | records. The whole file is read into memory, but it is only 'parsed' on |
|
643 | records. The whole file is read into memory, but it is only 'parsed' on | |
644 | demand. The file is usually append-only but will be truncated if repo |
|
644 | demand. The file is usually append-only but will be truncated if repo | |
645 | modification is detected. |
|
645 | modification is detected. | |
646 | The record for each revision contains the first 4 bytes of the |
|
646 | The record for each revision contains the first 4 bytes of the | |
647 | corresponding node hash, and the record is only used if it still matches. |
|
647 | corresponding node hash, and the record is only used if it still matches. | |
648 | Even a completely trashed rbc-revs fill thus still give the right result |
|
648 | Even a completely trashed rbc-revs fill thus still give the right result | |
649 | while converging towards full recovery ... assuming no incorrectly matching |
|
649 | while converging towards full recovery ... assuming no incorrectly matching | |
650 | node hashes. |
|
650 | node hashes. | |
651 | The record also contains 4 bytes where 31 bits contains the index of the |
|
651 | The record also contains 4 bytes where 31 bits contains the index of the | |
652 | branch and the last bit indicate that it is a branch close commit. |
|
652 | branch and the last bit indicate that it is a branch close commit. | |
653 | The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i |
|
653 | The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i | |
654 | and will grow with it but be 1/8th of its size. |
|
654 | and will grow with it but be 1/8th of its size. | |
655 | """ |
|
655 | """ | |
656 |
|
656 | |||
657 | def __init__(self, repo, readonly=True): |
|
657 | def __init__(self, repo, readonly=True): | |
658 | assert repo.filtername is None |
|
658 | assert repo.filtername is None | |
659 | self._repo = repo |
|
659 | self._repo = repo | |
660 | self._names = [] # branch names in local encoding with static index |
|
660 | self._names = [] # branch names in local encoding with static index | |
661 | self._rbcrevs = bytearray() |
|
661 | self._rbcrevs = bytearray() | |
662 | self._rbcsnameslen = 0 # length of names read at _rbcsnameslen |
|
662 | self._rbcsnameslen = 0 # length of names read at _rbcsnameslen | |
663 | try: |
|
663 | try: | |
664 | bndata = repo.cachevfs.read(_rbcnames) |
|
664 | bndata = repo.cachevfs.read(_rbcnames) | |
665 | self._rbcsnameslen = len(bndata) # for verification before writing |
|
665 | self._rbcsnameslen = len(bndata) # for verification before writing | |
666 | if bndata: |
|
666 | if bndata: | |
667 | self._names = [ |
|
667 | self._names = [ | |
668 | encoding.tolocal(bn) for bn in bndata.split(b'\0') |
|
668 | encoding.tolocal(bn) for bn in bndata.split(b'\0') | |
669 | ] |
|
669 | ] | |
670 | except (IOError, OSError): |
|
670 | except (IOError, OSError): | |
671 | if readonly: |
|
671 | if readonly: | |
672 | # don't try to use cache - fall back to the slow path |
|
672 | # don't try to use cache - fall back to the slow path | |
673 | self.branchinfo = self._branchinfo |
|
673 | self.branchinfo = self._branchinfo | |
674 |
|
674 | |||
675 | if self._names: |
|
675 | if self._names: | |
676 | try: |
|
676 | try: | |
677 | data = repo.cachevfs.read(_rbcrevs) |
|
677 | data = repo.cachevfs.read(_rbcrevs) | |
678 | self._rbcrevs[:] = data |
|
678 | self._rbcrevs[:] = data | |
679 | except (IOError, OSError) as inst: |
|
679 | except (IOError, OSError) as inst: | |
680 | repo.ui.debug( |
|
680 | repo.ui.debug( | |
681 | b"couldn't read revision branch cache: %s\n" |
|
681 | b"couldn't read revision branch cache: %s\n" | |
682 | % stringutil.forcebytestr(inst) |
|
682 | % stringutil.forcebytestr(inst) | |
683 | ) |
|
683 | ) | |
684 | # remember number of good records on disk |
|
684 | # remember number of good records on disk | |
685 | self._rbcrevslen = min( |
|
685 | self._rbcrevslen = min( | |
686 | len(self._rbcrevs) // _rbcrecsize, len(repo.changelog) |
|
686 | len(self._rbcrevs) // _rbcrecsize, len(repo.changelog) | |
687 | ) |
|
687 | ) | |
688 | if self._rbcrevslen == 0: |
|
688 | if self._rbcrevslen == 0: | |
689 | self._names = [] |
|
689 | self._names = [] | |
690 | self._rbcnamescount = len(self._names) # number of names read at |
|
690 | self._rbcnamescount = len(self._names) # number of names read at | |
691 | # _rbcsnameslen |
|
691 | # _rbcsnameslen | |
692 |
|
692 | |||
693 | def _clear(self): |
|
693 | def _clear(self): | |
694 | self._rbcsnameslen = 0 |
|
694 | self._rbcsnameslen = 0 | |
695 | del self._names[:] |
|
695 | del self._names[:] | |
696 | self._rbcnamescount = 0 |
|
696 | self._rbcnamescount = 0 | |
697 | self._rbcrevslen = len(self._repo.changelog) |
|
697 | self._rbcrevslen = len(self._repo.changelog) | |
698 | self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize) |
|
698 | self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize) | |
699 | util.clearcachedproperty(self, b'_namesreverse') |
|
699 | util.clearcachedproperty(self, b'_namesreverse') | |
700 |
|
700 | |||
701 | @util.propertycache |
|
701 | @util.propertycache | |
702 | def _namesreverse(self): |
|
702 | def _namesreverse(self): | |
703 | return {b: r for r, b in enumerate(self._names)} |
|
703 | return {b: r for r, b in enumerate(self._names)} | |
704 |
|
704 | |||
705 | def branchinfo(self, rev): |
|
705 | def branchinfo(self, rev): | |
706 | """Return branch name and close flag for rev, using and updating |
|
706 | """Return branch name and close flag for rev, using and updating | |
707 | persistent cache.""" |
|
707 | persistent cache.""" | |
708 | changelog = self._repo.changelog |
|
708 | changelog = self._repo.changelog | |
709 | rbcrevidx = rev * _rbcrecsize |
|
709 | rbcrevidx = rev * _rbcrecsize | |
710 |
|
710 | |||
711 | # avoid negative index, changelog.read(nullrev) is fast without cache |
|
711 | # avoid negative index, changelog.read(nullrev) is fast without cache | |
712 | if rev == nullrev: |
|
712 | if rev == nullrev: | |
713 | return changelog.branchinfo(rev) |
|
713 | return changelog.branchinfo(rev) | |
714 |
|
714 | |||
715 | # if requested rev isn't allocated, grow and cache the rev info |
|
715 | # if requested rev isn't allocated, grow and cache the rev info | |
716 | if len(self._rbcrevs) < rbcrevidx + _rbcrecsize: |
|
716 | if len(self._rbcrevs) < rbcrevidx + _rbcrecsize: | |
717 | return self._branchinfo(rev) |
|
717 | return self._branchinfo(rev) | |
718 |
|
718 | |||
719 | # fast path: extract data from cache, use it if node is matching |
|
719 | # fast path: extract data from cache, use it if node is matching | |
720 | reponode = changelog.node(rev)[:_rbcnodelen] |
|
720 | reponode = changelog.node(rev)[:_rbcnodelen] | |
721 | cachenode, branchidx = unpack_from( |
|
721 | cachenode, branchidx = unpack_from( | |
722 | _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx |
|
722 | _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx | |
723 | ) |
|
723 | ) | |
724 | close = bool(branchidx & _rbccloseflag) |
|
724 | close = bool(branchidx & _rbccloseflag) | |
725 | if close: |
|
725 | if close: | |
726 | branchidx &= _rbcbranchidxmask |
|
726 | branchidx &= _rbcbranchidxmask | |
727 | if cachenode == b'\0\0\0\0': |
|
727 | if cachenode == b'\0\0\0\0': | |
728 | pass |
|
728 | pass | |
729 | elif cachenode == reponode: |
|
729 | elif cachenode == reponode: | |
730 | try: |
|
730 | try: | |
731 | return self._names[branchidx], close |
|
731 | return self._names[branchidx], close | |
732 | except IndexError: |
|
732 | except IndexError: | |
733 | # recover from invalid reference to unknown branch |
|
733 | # recover from invalid reference to unknown branch | |
734 | self._repo.ui.debug( |
|
734 | self._repo.ui.debug( | |
735 | b"referenced branch names not found" |
|
735 | b"referenced branch names not found" | |
736 | b" - rebuilding revision branch cache from scratch\n" |
|
736 | b" - rebuilding revision branch cache from scratch\n" | |
737 | ) |
|
737 | ) | |
738 | self._clear() |
|
738 | self._clear() | |
739 | else: |
|
739 | else: | |
740 | # rev/node map has changed, invalidate the cache from here up |
|
740 | # rev/node map has changed, invalidate the cache from here up | |
741 | self._repo.ui.debug( |
|
741 | self._repo.ui.debug( | |
742 | b"history modification detected - truncating " |
|
742 | b"history modification detected - truncating " | |
743 | b"revision branch cache to revision %d\n" % rev |
|
743 | b"revision branch cache to revision %d\n" % rev | |
744 | ) |
|
744 | ) | |
745 | truncate = rbcrevidx + _rbcrecsize |
|
745 | truncate = rbcrevidx + _rbcrecsize | |
746 | del self._rbcrevs[truncate:] |
|
746 | del self._rbcrevs[truncate:] | |
747 | self._rbcrevslen = min(self._rbcrevslen, truncate) |
|
747 | self._rbcrevslen = min(self._rbcrevslen, truncate) | |
748 |
|
748 | |||
749 | # fall back to slow path and make sure it will be written to disk |
|
749 | # fall back to slow path and make sure it will be written to disk | |
750 | return self._branchinfo(rev) |
|
750 | return self._branchinfo(rev) | |
751 |
|
751 | |||
752 | def _branchinfo(self, rev): |
|
752 | def _branchinfo(self, rev): | |
753 | """Retrieve branch info from changelog and update _rbcrevs""" |
|
753 | """Retrieve branch info from changelog and update _rbcrevs""" | |
754 | changelog = self._repo.changelog |
|
754 | changelog = self._repo.changelog | |
755 | b, close = changelog.branchinfo(rev) |
|
755 | b, close = changelog.branchinfo(rev) | |
756 | if b in self._namesreverse: |
|
756 | if b in self._namesreverse: | |
757 | branchidx = self._namesreverse[b] |
|
757 | branchidx = self._namesreverse[b] | |
758 | else: |
|
758 | else: | |
759 | branchidx = len(self._names) |
|
759 | branchidx = len(self._names) | |
760 | self._names.append(b) |
|
760 | self._names.append(b) | |
761 | self._namesreverse[b] = branchidx |
|
761 | self._namesreverse[b] = branchidx | |
762 | reponode = changelog.node(rev) |
|
762 | reponode = changelog.node(rev) | |
763 | if close: |
|
763 | if close: | |
764 | branchidx |= _rbccloseflag |
|
764 | branchidx |= _rbccloseflag | |
765 | self._setcachedata(rev, reponode, branchidx) |
|
765 | self._setcachedata(rev, reponode, branchidx) | |
766 | return b, close |
|
766 | return b, close | |
767 |
|
767 | |||
768 | def setdata(self, rev, changelogrevision): |
|
768 | def setdata(self, rev, changelogrevision): | |
769 | """add new data information to the cache""" |
|
769 | """add new data information to the cache""" | |
770 | branch, close = changelogrevision.branchinfo |
|
770 | branch, close = changelogrevision.branchinfo | |
771 |
|
771 | |||
772 | if branch in self._namesreverse: |
|
772 | if branch in self._namesreverse: | |
773 | branchidx = self._namesreverse[branch] |
|
773 | branchidx = self._namesreverse[branch] | |
774 | else: |
|
774 | else: | |
775 | branchidx = len(self._names) |
|
775 | branchidx = len(self._names) | |
776 | self._names.append(branch) |
|
776 | self._names.append(branch) | |
777 | self._namesreverse[branch] = branchidx |
|
777 | self._namesreverse[branch] = branchidx | |
778 | if close: |
|
778 | if close: | |
779 | branchidx |= _rbccloseflag |
|
779 | branchidx |= _rbccloseflag | |
780 | self._setcachedata(rev, self._repo.changelog.node(rev), branchidx) |
|
780 | self._setcachedata(rev, self._repo.changelog.node(rev), branchidx) | |
781 | # If no cache data were readable (non exists, bad permission, etc) |
|
781 | # If no cache data were readable (non exists, bad permission, etc) | |
782 | # the cache was bypassing itself by setting: |
|
782 | # the cache was bypassing itself by setting: | |
783 | # |
|
783 | # | |
784 | # self.branchinfo = self._branchinfo |
|
784 | # self.branchinfo = self._branchinfo | |
785 | # |
|
785 | # | |
786 | # Since we now have data in the cache, we need to drop this bypassing. |
|
786 | # Since we now have data in the cache, we need to drop this bypassing. | |
787 | if 'branchinfo' in vars(self): |
|
787 | if 'branchinfo' in vars(self): | |
788 | del self.branchinfo |
|
788 | del self.branchinfo | |
789 |
|
789 | |||
790 | def _setcachedata(self, rev, node, branchidx): |
|
790 | def _setcachedata(self, rev, node, branchidx): | |
791 | """Writes the node's branch data to the in-memory cache data.""" |
|
791 | """Writes the node's branch data to the in-memory cache data.""" | |
792 | if rev == nullrev: |
|
792 | if rev == nullrev: | |
793 | return |
|
793 | return | |
794 | rbcrevidx = rev * _rbcrecsize |
|
794 | rbcrevidx = rev * _rbcrecsize | |
795 | requiredsize = rbcrevidx + _rbcrecsize |
|
795 | requiredsize = rbcrevidx + _rbcrecsize | |
796 | rbccur = len(self._rbcrevs) |
|
796 | rbccur = len(self._rbcrevs) | |
797 | if rbccur < requiredsize: |
|
797 | if rbccur < requiredsize: | |
798 | # bytearray doesn't allocate extra space at least in Python 3.7. |
|
798 | # bytearray doesn't allocate extra space at least in Python 3.7. | |
799 | # When multiple changesets are added in a row, precise resize would |
|
799 | # When multiple changesets are added in a row, precise resize would | |
800 | # result in quadratic complexity. Overallocate to compensate by |
|
800 | # result in quadratic complexity. Overallocate to compensate by | |
801 | # use the classic doubling technique for dynamic arrays instead. |
|
801 | # use the classic doubling technique for dynamic arrays instead. | |
802 | # If there was a gap in the map before, less space will be reserved. |
|
802 | # If there was a gap in the map before, less space will be reserved. | |
803 | self._rbcrevs.extend(b'\0' * max(_rbcmininc, requiredsize)) |
|
803 | self._rbcrevs.extend(b'\0' * max(_rbcmininc, requiredsize)) | |
804 | pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx) |
|
804 | pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx) | |
805 | self._rbcrevslen = min(self._rbcrevslen, rev) |
|
805 | self._rbcrevslen = min(self._rbcrevslen, rev) | |
806 |
|
806 | |||
807 | tr = self._repo.currenttransaction() |
|
807 | tr = self._repo.currenttransaction() | |
808 | if tr: |
|
808 | if tr: | |
809 | tr.addfinalize(b'write-revbranchcache', self.write) |
|
809 | tr.addfinalize(b'write-revbranchcache', self.write) | |
810 |
|
810 | |||
811 | def write(self, tr=None): |
|
811 | def write(self, tr=None): | |
812 | """Save branch cache if it is dirty.""" |
|
812 | """Save branch cache if it is dirty.""" | |
813 | repo = self._repo |
|
813 | repo = self._repo | |
814 | wlock = None |
|
814 | wlock = None | |
815 | step = b'' |
|
815 | step = b'' | |
816 | try: |
|
816 | try: | |
817 | # write the new names |
|
817 | # write the new names | |
818 | if self._rbcnamescount < len(self._names): |
|
818 | if self._rbcnamescount < len(self._names): | |
819 | wlock = repo.wlock(wait=False) |
|
819 | wlock = repo.wlock(wait=False) | |
820 | step = b' names' |
|
820 | step = b' names' | |
821 | self._writenames(repo) |
|
821 | self._writenames(repo) | |
822 |
|
822 | |||
823 | # write the new revs |
|
823 | # write the new revs | |
824 | start = self._rbcrevslen * _rbcrecsize |
|
824 | start = self._rbcrevslen * _rbcrecsize | |
825 | if start != len(self._rbcrevs): |
|
825 | if start != len(self._rbcrevs): | |
826 | step = b'' |
|
826 | step = b'' | |
827 | if wlock is None: |
|
827 | if wlock is None: | |
828 | wlock = repo.wlock(wait=False) |
|
828 | wlock = repo.wlock(wait=False) | |
829 | self._writerevs(repo, start) |
|
829 | self._writerevs(repo, start) | |
830 |
|
830 | |||
831 | except (IOError, OSError, error.Abort, error.LockError) as inst: |
|
831 | except (IOError, OSError, error.Abort, error.LockError) as inst: | |
832 | repo.ui.debug( |
|
832 | repo.ui.debug( | |
833 | b"couldn't write revision branch cache%s: %s\n" |
|
833 | b"couldn't write revision branch cache%s: %s\n" | |
834 | % (step, stringutil.forcebytestr(inst)) |
|
834 | % (step, stringutil.forcebytestr(inst)) | |
835 | ) |
|
835 | ) | |
836 | finally: |
|
836 | finally: | |
837 | if wlock is not None: |
|
837 | if wlock is not None: | |
838 | wlock.release() |
|
838 | wlock.release() | |
839 |
|
839 | |||
840 | def _writenames(self, repo): |
|
840 | def _writenames(self, repo): | |
841 | """write the new branch names to revbranchcache""" |
|
841 | """write the new branch names to revbranchcache""" | |
842 | if self._rbcnamescount != 0: |
|
842 | if self._rbcnamescount != 0: | |
843 | f = repo.cachevfs.open(_rbcnames, b'ab') |
|
843 | f = repo.cachevfs.open(_rbcnames, b'ab') | |
844 | if f.tell() == self._rbcsnameslen: |
|
844 | if f.tell() == self._rbcsnameslen: | |
845 | f.write(b'\0') |
|
845 | f.write(b'\0') | |
846 | else: |
|
846 | else: | |
847 | f.close() |
|
847 | f.close() | |
848 | repo.ui.debug(b"%s changed - rewriting it\n" % _rbcnames) |
|
848 | repo.ui.debug(b"%s changed - rewriting it\n" % _rbcnames) | |
849 | self._rbcnamescount = 0 |
|
849 | self._rbcnamescount = 0 | |
850 | self._rbcrevslen = 0 |
|
850 | self._rbcrevslen = 0 | |
851 | if self._rbcnamescount == 0: |
|
851 | if self._rbcnamescount == 0: | |
852 | # before rewriting names, make sure references are removed |
|
852 | # before rewriting names, make sure references are removed | |
853 | repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True) |
|
853 | repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True) | |
854 | f = repo.cachevfs.open(_rbcnames, b'wb') |
|
854 | f = repo.cachevfs.open(_rbcnames, b'wb') | |
855 | f.write( |
|
855 | f.write( | |
856 | b'\0'.join( |
|
856 | b'\0'.join( | |
857 | encoding.fromlocal(b) |
|
857 | encoding.fromlocal(b) | |
858 | for b in self._names[self._rbcnamescount :] |
|
858 | for b in self._names[self._rbcnamescount :] | |
859 | ) |
|
859 | ) | |
860 | ) |
|
860 | ) | |
861 | self._rbcsnameslen = f.tell() |
|
861 | self._rbcsnameslen = f.tell() | |
862 | f.close() |
|
862 | f.close() | |
863 | self._rbcnamescount = len(self._names) |
|
863 | self._rbcnamescount = len(self._names) | |
864 |
|
864 | |||
865 | def _writerevs(self, repo, start): |
|
865 | def _writerevs(self, repo, start): | |
866 | """write the new revs to revbranchcache""" |
|
866 | """write the new revs to revbranchcache""" | |
867 | revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize) |
|
867 | revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize) | |
868 | with repo.cachevfs.open(_rbcrevs, b'ab') as f: |
|
868 | with repo.cachevfs.open(_rbcrevs, b'ab') as f: | |
869 | if f.tell() != start: |
|
869 | if f.tell() != start: | |
870 | repo.ui.debug( |
|
870 | repo.ui.debug( | |
871 | b"truncating cache/%s to %d\n" % (_rbcrevs, start) |
|
871 | b"truncating cache/%s to %d\n" % (_rbcrevs, start) | |
872 | ) |
|
872 | ) | |
873 | f.seek(start) |
|
873 | f.seek(start) | |
874 | if f.tell() != start: |
|
874 | if f.tell() != start: | |
875 | start = 0 |
|
875 | start = 0 | |
876 | f.seek(start) |
|
876 | f.seek(start) | |
877 | f.truncate() |
|
877 | f.truncate() | |
878 | end = revs * _rbcrecsize |
|
878 | end = revs * _rbcrecsize | |
879 | f.write(self._rbcrevs[start:end]) |
|
879 | f.write(self._rbcrevs[start:end]) | |
880 | self._rbcrevslen = revs |
|
880 | self._rbcrevslen = revs |
@@ -1,1149 +1,1147 | |||||
1 | # obsolete.py - obsolete markers handling |
|
1 | # obsolete.py - obsolete markers handling | |
2 | # |
|
2 | # | |
3 | # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org> |
|
3 | # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org> | |
4 | # Logilab SA <contact@logilab.fr> |
|
4 | # Logilab SA <contact@logilab.fr> | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms of the |
|
6 | # This software may be used and distributed according to the terms of the | |
7 | # GNU General Public License version 2 or any later version. |
|
7 | # GNU General Public License version 2 or any later version. | |
8 |
|
8 | |||
9 | """Obsolete marker handling |
|
9 | """Obsolete marker handling | |
10 |
|
10 | |||
11 | An obsolete marker maps an old changeset to a list of new |
|
11 | An obsolete marker maps an old changeset to a list of new | |
12 | changesets. If the list of new changesets is empty, the old changeset |
|
12 | changesets. If the list of new changesets is empty, the old changeset | |
13 | is said to be "killed". Otherwise, the old changeset is being |
|
13 | is said to be "killed". Otherwise, the old changeset is being | |
14 | "replaced" by the new changesets. |
|
14 | "replaced" by the new changesets. | |
15 |
|
15 | |||
16 | Obsolete markers can be used to record and distribute changeset graph |
|
16 | Obsolete markers can be used to record and distribute changeset graph | |
17 | transformations performed by history rewrite operations, and help |
|
17 | transformations performed by history rewrite operations, and help | |
18 | building new tools to reconcile conflicting rewrite actions. To |
|
18 | building new tools to reconcile conflicting rewrite actions. To | |
19 | facilitate conflict resolution, markers include various annotations |
|
19 | facilitate conflict resolution, markers include various annotations | |
20 | besides old and news changeset identifiers, such as creation date or |
|
20 | besides old and news changeset identifiers, such as creation date or | |
21 | author name. |
|
21 | author name. | |
22 |
|
22 | |||
23 | The old obsoleted changeset is called a "predecessor" and possible |
|
23 | The old obsoleted changeset is called a "predecessor" and possible | |
24 | replacements are called "successors". Markers that used changeset X as |
|
24 | replacements are called "successors". Markers that used changeset X as | |
25 | a predecessor are called "successor markers of X" because they hold |
|
25 | a predecessor are called "successor markers of X" because they hold | |
26 | information about the successors of X. Markers that use changeset Y as |
|
26 | information about the successors of X. Markers that use changeset Y as | |
27 | a successors are call "predecessor markers of Y" because they hold |
|
27 | a successors are call "predecessor markers of Y" because they hold | |
28 | information about the predecessors of Y. |
|
28 | information about the predecessors of Y. | |
29 |
|
29 | |||
30 | Examples: |
|
30 | Examples: | |
31 |
|
31 | |||
32 | - When changeset A is replaced by changeset A', one marker is stored: |
|
32 | - When changeset A is replaced by changeset A', one marker is stored: | |
33 |
|
33 | |||
34 | (A, (A',)) |
|
34 | (A, (A',)) | |
35 |
|
35 | |||
36 | - When changesets A and B are folded into a new changeset C, two markers are |
|
36 | - When changesets A and B are folded into a new changeset C, two markers are | |
37 | stored: |
|
37 | stored: | |
38 |
|
38 | |||
39 | (A, (C,)) and (B, (C,)) |
|
39 | (A, (C,)) and (B, (C,)) | |
40 |
|
40 | |||
41 | - When changeset A is simply "pruned" from the graph, a marker is created: |
|
41 | - When changeset A is simply "pruned" from the graph, a marker is created: | |
42 |
|
42 | |||
43 | (A, ()) |
|
43 | (A, ()) | |
44 |
|
44 | |||
45 | - When changeset A is split into B and C, a single marker is used: |
|
45 | - When changeset A is split into B and C, a single marker is used: | |
46 |
|
46 | |||
47 | (A, (B, C)) |
|
47 | (A, (B, C)) | |
48 |
|
48 | |||
49 | We use a single marker to distinguish the "split" case from the "divergence" |
|
49 | We use a single marker to distinguish the "split" case from the "divergence" | |
50 | case. If two independent operations rewrite the same changeset A in to A' and |
|
50 | case. If two independent operations rewrite the same changeset A in to A' and | |
51 | A'', we have an error case: divergent rewriting. We can detect it because |
|
51 | A'', we have an error case: divergent rewriting. We can detect it because | |
52 | two markers will be created independently: |
|
52 | two markers will be created independently: | |
53 |
|
53 | |||
54 | (A, (B,)) and (A, (C,)) |
|
54 | (A, (B,)) and (A, (C,)) | |
55 |
|
55 | |||
56 | Format |
|
56 | Format | |
57 | ------ |
|
57 | ------ | |
58 |
|
58 | |||
59 | Markers are stored in an append-only file stored in |
|
59 | Markers are stored in an append-only file stored in | |
60 | '.hg/store/obsstore'. |
|
60 | '.hg/store/obsstore'. | |
61 |
|
61 | |||
62 | The file starts with a version header: |
|
62 | The file starts with a version header: | |
63 |
|
63 | |||
64 | - 1 unsigned byte: version number, starting at zero. |
|
64 | - 1 unsigned byte: version number, starting at zero. | |
65 |
|
65 | |||
66 | The header is followed by the markers. Marker format depend of the version. See |
|
66 | The header is followed by the markers. Marker format depend of the version. See | |
67 | comment associated with each format for details. |
|
67 | comment associated with each format for details. | |
68 |
|
68 | |||
69 | """ |
|
69 | """ | |
70 |
|
70 | |||
71 | import binascii |
|
71 | import binascii | |
72 | import errno |
|
72 | import errno | |
73 | import struct |
|
73 | import struct | |
74 |
|
74 | |||
75 | from .i18n import _ |
|
75 | from .i18n import _ | |
76 | from .pycompat import getattr |
|
76 | from .pycompat import getattr | |
77 | from .node import ( |
|
77 | from .node import ( | |
78 | bin, |
|
78 | bin, | |
79 | hex, |
|
79 | hex, | |
80 | ) |
|
80 | ) | |
81 | from . import ( |
|
81 | from . import ( | |
82 | encoding, |
|
82 | encoding, | |
83 | error, |
|
83 | error, | |
84 | obsutil, |
|
84 | obsutil, | |
85 | phases, |
|
85 | phases, | |
86 | policy, |
|
86 | policy, | |
87 | pycompat, |
|
87 | pycompat, | |
88 | util, |
|
88 | util, | |
89 | ) |
|
89 | ) | |
90 | from .utils import ( |
|
90 | from .utils import ( | |
91 | dateutil, |
|
91 | dateutil, | |
92 | hashutil, |
|
92 | hashutil, | |
93 | ) |
|
93 | ) | |
94 |
|
94 | |||
95 | parsers = policy.importmod('parsers') |
|
95 | parsers = policy.importmod('parsers') | |
96 |
|
96 | |||
97 | _pack = struct.pack |
|
97 | _pack = struct.pack | |
98 | _unpack = struct.unpack |
|
98 | _unpack = struct.unpack | |
99 | _calcsize = struct.calcsize |
|
99 | _calcsize = struct.calcsize | |
100 | propertycache = util.propertycache |
|
100 | propertycache = util.propertycache | |
101 |
|
101 | |||
102 | # Options for obsolescence |
|
102 | # Options for obsolescence | |
103 | createmarkersopt = b'createmarkers' |
|
103 | createmarkersopt = b'createmarkers' | |
104 | allowunstableopt = b'allowunstable' |
|
104 | allowunstableopt = b'allowunstable' | |
105 | allowdivergenceopt = b'allowdivergence' |
|
105 | allowdivergenceopt = b'allowdivergence' | |
106 | exchangeopt = b'exchange' |
|
106 | exchangeopt = b'exchange' | |
107 |
|
107 | |||
108 |
|
108 | |||
109 | def _getoptionvalue(repo, option): |
|
109 | def _getoptionvalue(repo, option): | |
110 | """Returns True if the given repository has the given obsolete option |
|
110 | """Returns True if the given repository has the given obsolete option | |
111 | enabled. |
|
111 | enabled. | |
112 | """ |
|
112 | """ | |
113 | configkey = b'evolution.%s' % option |
|
113 | configkey = b'evolution.%s' % option | |
114 | newconfig = repo.ui.configbool(b'experimental', configkey) |
|
114 | newconfig = repo.ui.configbool(b'experimental', configkey) | |
115 |
|
115 | |||
116 | # Return the value only if defined |
|
116 | # Return the value only if defined | |
117 | if newconfig is not None: |
|
117 | if newconfig is not None: | |
118 | return newconfig |
|
118 | return newconfig | |
119 |
|
119 | |||
120 | # Fallback on generic option |
|
120 | # Fallback on generic option | |
121 | try: |
|
121 | try: | |
122 | return repo.ui.configbool(b'experimental', b'evolution') |
|
122 | return repo.ui.configbool(b'experimental', b'evolution') | |
123 | except (error.ConfigError, AttributeError): |
|
123 | except (error.ConfigError, AttributeError): | |
124 | # Fallback on old-fashion config |
|
124 | # Fallback on old-fashion config | |
125 | # inconsistent config: experimental.evolution |
|
125 | # inconsistent config: experimental.evolution | |
126 | result = set(repo.ui.configlist(b'experimental', b'evolution')) |
|
126 | result = set(repo.ui.configlist(b'experimental', b'evolution')) | |
127 |
|
127 | |||
128 | if b'all' in result: |
|
128 | if b'all' in result: | |
129 | return True |
|
129 | return True | |
130 |
|
130 | |||
131 | # Temporary hack for next check |
|
131 | # Temporary hack for next check | |
132 | newconfig = repo.ui.config(b'experimental', b'evolution.createmarkers') |
|
132 | newconfig = repo.ui.config(b'experimental', b'evolution.createmarkers') | |
133 | if newconfig: |
|
133 | if newconfig: | |
134 | result.add(b'createmarkers') |
|
134 | result.add(b'createmarkers') | |
135 |
|
135 | |||
136 | return option in result |
|
136 | return option in result | |
137 |
|
137 | |||
138 |
|
138 | |||
139 | def getoptions(repo): |
|
139 | def getoptions(repo): | |
140 | """Returns dicts showing state of obsolescence features.""" |
|
140 | """Returns dicts showing state of obsolescence features.""" | |
141 |
|
141 | |||
142 | createmarkersvalue = _getoptionvalue(repo, createmarkersopt) |
|
142 | createmarkersvalue = _getoptionvalue(repo, createmarkersopt) | |
143 | if createmarkersvalue: |
|
143 | if createmarkersvalue: | |
144 | unstablevalue = _getoptionvalue(repo, allowunstableopt) |
|
144 | unstablevalue = _getoptionvalue(repo, allowunstableopt) | |
145 | divergencevalue = _getoptionvalue(repo, allowdivergenceopt) |
|
145 | divergencevalue = _getoptionvalue(repo, allowdivergenceopt) | |
146 | exchangevalue = _getoptionvalue(repo, exchangeopt) |
|
146 | exchangevalue = _getoptionvalue(repo, exchangeopt) | |
147 | else: |
|
147 | else: | |
148 | # if we cannot create obsolescence markers, we shouldn't exchange them |
|
148 | # if we cannot create obsolescence markers, we shouldn't exchange them | |
149 | # or perform operations that lead to instability or divergence |
|
149 | # or perform operations that lead to instability or divergence | |
150 | unstablevalue = False |
|
150 | unstablevalue = False | |
151 | divergencevalue = False |
|
151 | divergencevalue = False | |
152 | exchangevalue = False |
|
152 | exchangevalue = False | |
153 |
|
153 | |||
154 | return { |
|
154 | return { | |
155 | createmarkersopt: createmarkersvalue, |
|
155 | createmarkersopt: createmarkersvalue, | |
156 | allowunstableopt: unstablevalue, |
|
156 | allowunstableopt: unstablevalue, | |
157 | allowdivergenceopt: divergencevalue, |
|
157 | allowdivergenceopt: divergencevalue, | |
158 | exchangeopt: exchangevalue, |
|
158 | exchangeopt: exchangevalue, | |
159 | } |
|
159 | } | |
160 |
|
160 | |||
161 |
|
161 | |||
162 | def isenabled(repo, option): |
|
162 | def isenabled(repo, option): | |
163 | """Returns True if the given repository has the given obsolete option |
|
163 | """Returns True if the given repository has the given obsolete option | |
164 | enabled. |
|
164 | enabled. | |
165 | """ |
|
165 | """ | |
166 | return getoptions(repo)[option] |
|
166 | return getoptions(repo)[option] | |
167 |
|
167 | |||
168 |
|
168 | |||
169 | # Creating aliases for marker flags because evolve extension looks for |
|
169 | # Creating aliases for marker flags because evolve extension looks for | |
170 | # bumpedfix in obsolete.py |
|
170 | # bumpedfix in obsolete.py | |
171 | bumpedfix = obsutil.bumpedfix |
|
171 | bumpedfix = obsutil.bumpedfix | |
172 | usingsha256 = obsutil.usingsha256 |
|
172 | usingsha256 = obsutil.usingsha256 | |
173 |
|
173 | |||
174 | ## Parsing and writing of version "0" |
|
174 | ## Parsing and writing of version "0" | |
175 | # |
|
175 | # | |
176 | # The header is followed by the markers. Each marker is made of: |
|
176 | # The header is followed by the markers. Each marker is made of: | |
177 | # |
|
177 | # | |
178 | # - 1 uint8 : number of new changesets "N", can be zero. |
|
178 | # - 1 uint8 : number of new changesets "N", can be zero. | |
179 | # |
|
179 | # | |
180 | # - 1 uint32: metadata size "M" in bytes. |
|
180 | # - 1 uint32: metadata size "M" in bytes. | |
181 | # |
|
181 | # | |
182 | # - 1 byte: a bit field. It is reserved for flags used in common |
|
182 | # - 1 byte: a bit field. It is reserved for flags used in common | |
183 | # obsolete marker operations, to avoid repeated decoding of metadata |
|
183 | # obsolete marker operations, to avoid repeated decoding of metadata | |
184 | # entries. |
|
184 | # entries. | |
185 | # |
|
185 | # | |
186 | # - 20 bytes: obsoleted changeset identifier. |
|
186 | # - 20 bytes: obsoleted changeset identifier. | |
187 | # |
|
187 | # | |
188 | # - N*20 bytes: new changesets identifiers. |
|
188 | # - N*20 bytes: new changesets identifiers. | |
189 | # |
|
189 | # | |
190 | # - M bytes: metadata as a sequence of nul-terminated strings. Each |
|
190 | # - M bytes: metadata as a sequence of nul-terminated strings. Each | |
191 | # string contains a key and a value, separated by a colon ':', without |
|
191 | # string contains a key and a value, separated by a colon ':', without | |
192 | # additional encoding. Keys cannot contain '\0' or ':' and values |
|
192 | # additional encoding. Keys cannot contain '\0' or ':' and values | |
193 | # cannot contain '\0'. |
|
193 | # cannot contain '\0'. | |
194 | _fm0version = 0 |
|
194 | _fm0version = 0 | |
195 | _fm0fixed = b'>BIB20s' |
|
195 | _fm0fixed = b'>BIB20s' | |
196 | _fm0node = b'20s' |
|
196 | _fm0node = b'20s' | |
197 | _fm0fsize = _calcsize(_fm0fixed) |
|
197 | _fm0fsize = _calcsize(_fm0fixed) | |
198 | _fm0fnodesize = _calcsize(_fm0node) |
|
198 | _fm0fnodesize = _calcsize(_fm0node) | |
199 |
|
199 | |||
200 |
|
200 | |||
201 | def _fm0readmarkers(data, off, stop): |
|
201 | def _fm0readmarkers(data, off, stop): | |
202 | # Loop on markers |
|
202 | # Loop on markers | |
203 | while off < stop: |
|
203 | while off < stop: | |
204 | # read fixed part |
|
204 | # read fixed part | |
205 | cur = data[off : off + _fm0fsize] |
|
205 | cur = data[off : off + _fm0fsize] | |
206 | off += _fm0fsize |
|
206 | off += _fm0fsize | |
207 | numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur) |
|
207 | numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur) | |
208 | # read replacement |
|
208 | # read replacement | |
209 | sucs = () |
|
209 | sucs = () | |
210 | if numsuc: |
|
210 | if numsuc: | |
211 | s = _fm0fnodesize * numsuc |
|
211 | s = _fm0fnodesize * numsuc | |
212 | cur = data[off : off + s] |
|
212 | cur = data[off : off + s] | |
213 | sucs = _unpack(_fm0node * numsuc, cur) |
|
213 | sucs = _unpack(_fm0node * numsuc, cur) | |
214 | off += s |
|
214 | off += s | |
215 | # read metadata |
|
215 | # read metadata | |
216 | # (metadata will be decoded on demand) |
|
216 | # (metadata will be decoded on demand) | |
217 | metadata = data[off : off + mdsize] |
|
217 | metadata = data[off : off + mdsize] | |
218 | if len(metadata) != mdsize: |
|
218 | if len(metadata) != mdsize: | |
219 | raise error.Abort( |
|
219 | raise error.Abort( | |
220 | _( |
|
220 | _( | |
221 | b'parsing obsolete marker: metadata is too ' |
|
221 | b'parsing obsolete marker: metadata is too ' | |
222 | b'short, %d bytes expected, got %d' |
|
222 | b'short, %d bytes expected, got %d' | |
223 | ) |
|
223 | ) | |
224 | % (mdsize, len(metadata)) |
|
224 | % (mdsize, len(metadata)) | |
225 | ) |
|
225 | ) | |
226 | off += mdsize |
|
226 | off += mdsize | |
227 | metadata = _fm0decodemeta(metadata) |
|
227 | metadata = _fm0decodemeta(metadata) | |
228 | try: |
|
228 | try: | |
229 | when, offset = metadata.pop(b'date', b'0 0').split(b' ') |
|
229 | when, offset = metadata.pop(b'date', b'0 0').split(b' ') | |
230 | date = float(when), int(offset) |
|
230 | date = float(when), int(offset) | |
231 | except ValueError: |
|
231 | except ValueError: | |
232 | date = (0.0, 0) |
|
232 | date = (0.0, 0) | |
233 | parents = None |
|
233 | parents = None | |
234 | if b'p2' in metadata: |
|
234 | if b'p2' in metadata: | |
235 | parents = (metadata.pop(b'p1', None), metadata.pop(b'p2', None)) |
|
235 | parents = (metadata.pop(b'p1', None), metadata.pop(b'p2', None)) | |
236 | elif b'p1' in metadata: |
|
236 | elif b'p1' in metadata: | |
237 | parents = (metadata.pop(b'p1', None),) |
|
237 | parents = (metadata.pop(b'p1', None),) | |
238 | elif b'p0' in metadata: |
|
238 | elif b'p0' in metadata: | |
239 | parents = () |
|
239 | parents = () | |
240 | if parents is not None: |
|
240 | if parents is not None: | |
241 | try: |
|
241 | try: | |
242 | parents = tuple(bin(p) for p in parents) |
|
242 | parents = tuple(bin(p) for p in parents) | |
243 | # if parent content is not a nodeid, drop the data |
|
243 | # if parent content is not a nodeid, drop the data | |
244 | for p in parents: |
|
244 | for p in parents: | |
245 | if len(p) != 20: |
|
245 | if len(p) != 20: | |
246 | parents = None |
|
246 | parents = None | |
247 | break |
|
247 | break | |
248 | except binascii.Error: |
|
248 | except binascii.Error: | |
249 | # if content cannot be translated to nodeid drop the data. |
|
249 | # if content cannot be translated to nodeid drop the data. | |
250 | parents = None |
|
250 | parents = None | |
251 |
|
251 | |||
252 | metadata = tuple(sorted(metadata.items())) |
|
252 | metadata = tuple(sorted(metadata.items())) | |
253 |
|
253 | |||
254 | yield (pre, sucs, flags, metadata, date, parents) |
|
254 | yield (pre, sucs, flags, metadata, date, parents) | |
255 |
|
255 | |||
256 |
|
256 | |||
257 | def _fm0encodeonemarker(marker): |
|
257 | def _fm0encodeonemarker(marker): | |
258 | pre, sucs, flags, metadata, date, parents = marker |
|
258 | pre, sucs, flags, metadata, date, parents = marker | |
259 | if flags & usingsha256: |
|
259 | if flags & usingsha256: | |
260 | raise error.Abort(_(b'cannot handle sha256 with old obsstore format')) |
|
260 | raise error.Abort(_(b'cannot handle sha256 with old obsstore format')) | |
261 | metadata = dict(metadata) |
|
261 | metadata = dict(metadata) | |
262 | time, tz = date |
|
262 | time, tz = date | |
263 | metadata[b'date'] = b'%r %i' % (time, tz) |
|
263 | metadata[b'date'] = b'%r %i' % (time, tz) | |
264 | if parents is not None: |
|
264 | if parents is not None: | |
265 | if not parents: |
|
265 | if not parents: | |
266 | # mark that we explicitly recorded no parents |
|
266 | # mark that we explicitly recorded no parents | |
267 | metadata[b'p0'] = b'' |
|
267 | metadata[b'p0'] = b'' | |
268 | for i, p in enumerate(parents, 1): |
|
268 | for i, p in enumerate(parents, 1): | |
269 | metadata[b'p%i' % i] = hex(p) |
|
269 | metadata[b'p%i' % i] = hex(p) | |
270 | metadata = _fm0encodemeta(metadata) |
|
270 | metadata = _fm0encodemeta(metadata) | |
271 | numsuc = len(sucs) |
|
271 | numsuc = len(sucs) | |
272 | format = _fm0fixed + (_fm0node * numsuc) |
|
272 | format = _fm0fixed + (_fm0node * numsuc) | |
273 | data = [numsuc, len(metadata), flags, pre] |
|
273 | data = [numsuc, len(metadata), flags, pre] | |
274 | data.extend(sucs) |
|
274 | data.extend(sucs) | |
275 | return _pack(format, *data) + metadata |
|
275 | return _pack(format, *data) + metadata | |
276 |
|
276 | |||
277 |
|
277 | |||
278 | def _fm0encodemeta(meta): |
|
278 | def _fm0encodemeta(meta): | |
279 | """Return encoded metadata string to string mapping. |
|
279 | """Return encoded metadata string to string mapping. | |
280 |
|
280 | |||
281 | Assume no ':' in key and no '\0' in both key and value.""" |
|
281 | Assume no ':' in key and no '\0' in both key and value.""" | |
282 | for key, value in meta.items(): |
|
282 | for key, value in meta.items(): | |
283 | if b':' in key or b'\0' in key: |
|
283 | if b':' in key or b'\0' in key: | |
284 | raise ValueError(b"':' and '\0' are forbidden in metadata key'") |
|
284 | raise ValueError(b"':' and '\0' are forbidden in metadata key'") | |
285 | if b'\0' in value: |
|
285 | if b'\0' in value: | |
286 | raise ValueError(b"':' is forbidden in metadata value'") |
|
286 | raise ValueError(b"':' is forbidden in metadata value'") | |
287 | return b'\0'.join([b'%s:%s' % (k, meta[k]) for k in sorted(meta)]) |
|
287 | return b'\0'.join([b'%s:%s' % (k, meta[k]) for k in sorted(meta)]) | |
288 |
|
288 | |||
289 |
|
289 | |||
290 | def _fm0decodemeta(data): |
|
290 | def _fm0decodemeta(data): | |
291 | """Return string to string dictionary from encoded version.""" |
|
291 | """Return string to string dictionary from encoded version.""" | |
292 | d = {} |
|
292 | d = {} | |
293 | for l in data.split(b'\0'): |
|
293 | for l in data.split(b'\0'): | |
294 | if l: |
|
294 | if l: | |
295 | key, value = l.split(b':', 1) |
|
295 | key, value = l.split(b':', 1) | |
296 | d[key] = value |
|
296 | d[key] = value | |
297 | return d |
|
297 | return d | |
298 |
|
298 | |||
299 |
|
299 | |||
300 | ## Parsing and writing of version "1" |
|
300 | ## Parsing and writing of version "1" | |
301 | # |
|
301 | # | |
302 | # The header is followed by the markers. Each marker is made of: |
|
302 | # The header is followed by the markers. Each marker is made of: | |
303 | # |
|
303 | # | |
304 | # - uint32: total size of the marker (including this field) |
|
304 | # - uint32: total size of the marker (including this field) | |
305 | # |
|
305 | # | |
306 | # - float64: date in seconds since epoch |
|
306 | # - float64: date in seconds since epoch | |
307 | # |
|
307 | # | |
308 | # - int16: timezone offset in minutes |
|
308 | # - int16: timezone offset in minutes | |
309 | # |
|
309 | # | |
310 | # - uint16: a bit field. It is reserved for flags used in common |
|
310 | # - uint16: a bit field. It is reserved for flags used in common | |
311 | # obsolete marker operations, to avoid repeated decoding of metadata |
|
311 | # obsolete marker operations, to avoid repeated decoding of metadata | |
312 | # entries. |
|
312 | # entries. | |
313 | # |
|
313 | # | |
314 | # - uint8: number of successors "N", can be zero. |
|
314 | # - uint8: number of successors "N", can be zero. | |
315 | # |
|
315 | # | |
316 | # - uint8: number of parents "P", can be zero. |
|
316 | # - uint8: number of parents "P", can be zero. | |
317 | # |
|
317 | # | |
318 | # 0: parents data stored but no parent, |
|
318 | # 0: parents data stored but no parent, | |
319 | # 1: one parent stored, |
|
319 | # 1: one parent stored, | |
320 | # 2: two parents stored, |
|
320 | # 2: two parents stored, | |
321 | # 3: no parent data stored |
|
321 | # 3: no parent data stored | |
322 | # |
|
322 | # | |
323 | # - uint8: number of metadata entries M |
|
323 | # - uint8: number of metadata entries M | |
324 | # |
|
324 | # | |
325 | # - 20 or 32 bytes: predecessor changeset identifier. |
|
325 | # - 20 or 32 bytes: predecessor changeset identifier. | |
326 | # |
|
326 | # | |
327 | # - N*(20 or 32) bytes: successors changesets identifiers. |
|
327 | # - N*(20 or 32) bytes: successors changesets identifiers. | |
328 | # |
|
328 | # | |
329 | # - P*(20 or 32) bytes: parents of the predecessors changesets. |
|
329 | # - P*(20 or 32) bytes: parents of the predecessors changesets. | |
330 | # |
|
330 | # | |
331 | # - M*(uint8, uint8): size of all metadata entries (key and value) |
|
331 | # - M*(uint8, uint8): size of all metadata entries (key and value) | |
332 | # |
|
332 | # | |
333 | # - remaining bytes: the metadata, each (key, value) pair after the other. |
|
333 | # - remaining bytes: the metadata, each (key, value) pair after the other. | |
334 | _fm1version = 1 |
|
334 | _fm1version = 1 | |
335 | _fm1fixed = b'>IdhHBBB' |
|
335 | _fm1fixed = b'>IdhHBBB' | |
336 | _fm1nodesha1 = b'20s' |
|
336 | _fm1nodesha1 = b'20s' | |
337 | _fm1nodesha256 = b'32s' |
|
337 | _fm1nodesha256 = b'32s' | |
338 | _fm1nodesha1size = _calcsize(_fm1nodesha1) |
|
338 | _fm1nodesha1size = _calcsize(_fm1nodesha1) | |
339 | _fm1nodesha256size = _calcsize(_fm1nodesha256) |
|
339 | _fm1nodesha256size = _calcsize(_fm1nodesha256) | |
340 | _fm1fsize = _calcsize(_fm1fixed) |
|
340 | _fm1fsize = _calcsize(_fm1fixed) | |
341 | _fm1parentnone = 3 |
|
341 | _fm1parentnone = 3 | |
342 | _fm1metapair = b'BB' |
|
342 | _fm1metapair = b'BB' | |
343 | _fm1metapairsize = _calcsize(_fm1metapair) |
|
343 | _fm1metapairsize = _calcsize(_fm1metapair) | |
344 |
|
344 | |||
345 |
|
345 | |||
346 | def _fm1purereadmarkers(data, off, stop): |
|
346 | def _fm1purereadmarkers(data, off, stop): | |
347 | # make some global constants local for performance |
|
347 | # make some global constants local for performance | |
348 | noneflag = _fm1parentnone |
|
348 | noneflag = _fm1parentnone | |
349 | sha2flag = usingsha256 |
|
349 | sha2flag = usingsha256 | |
350 | sha1size = _fm1nodesha1size |
|
350 | sha1size = _fm1nodesha1size | |
351 | sha2size = _fm1nodesha256size |
|
351 | sha2size = _fm1nodesha256size | |
352 | sha1fmt = _fm1nodesha1 |
|
352 | sha1fmt = _fm1nodesha1 | |
353 | sha2fmt = _fm1nodesha256 |
|
353 | sha2fmt = _fm1nodesha256 | |
354 | metasize = _fm1metapairsize |
|
354 | metasize = _fm1metapairsize | |
355 | metafmt = _fm1metapair |
|
355 | metafmt = _fm1metapair | |
356 | fsize = _fm1fsize |
|
356 | fsize = _fm1fsize | |
357 | unpack = _unpack |
|
357 | unpack = _unpack | |
358 |
|
358 | |||
359 | # Loop on markers |
|
359 | # Loop on markers | |
360 | ufixed = struct.Struct(_fm1fixed).unpack |
|
360 | ufixed = struct.Struct(_fm1fixed).unpack | |
361 |
|
361 | |||
362 | while off < stop: |
|
362 | while off < stop: | |
363 | # read fixed part |
|
363 | # read fixed part | |
364 | o1 = off + fsize |
|
364 | o1 = off + fsize | |
365 | t, secs, tz, flags, numsuc, numpar, nummeta = ufixed(data[off:o1]) |
|
365 | t, secs, tz, flags, numsuc, numpar, nummeta = ufixed(data[off:o1]) | |
366 |
|
366 | |||
367 | if flags & sha2flag: |
|
367 | if flags & sha2flag: | |
368 | nodefmt = sha2fmt |
|
368 | nodefmt = sha2fmt | |
369 | nodesize = sha2size |
|
369 | nodesize = sha2size | |
370 | else: |
|
370 | else: | |
371 | nodefmt = sha1fmt |
|
371 | nodefmt = sha1fmt | |
372 | nodesize = sha1size |
|
372 | nodesize = sha1size | |
373 |
|
373 | |||
374 | (prec,) = unpack(nodefmt, data[o1 : o1 + nodesize]) |
|
374 | (prec,) = unpack(nodefmt, data[o1 : o1 + nodesize]) | |
375 | o1 += nodesize |
|
375 | o1 += nodesize | |
376 |
|
376 | |||
377 | # read 0 or more successors |
|
377 | # read 0 or more successors | |
378 | if numsuc == 1: |
|
378 | if numsuc == 1: | |
379 | o2 = o1 + nodesize |
|
379 | o2 = o1 + nodesize | |
380 | sucs = (data[o1:o2],) |
|
380 | sucs = (data[o1:o2],) | |
381 | else: |
|
381 | else: | |
382 | o2 = o1 + nodesize * numsuc |
|
382 | o2 = o1 + nodesize * numsuc | |
383 | sucs = unpack(nodefmt * numsuc, data[o1:o2]) |
|
383 | sucs = unpack(nodefmt * numsuc, data[o1:o2]) | |
384 |
|
384 | |||
385 | # read parents |
|
385 | # read parents | |
386 | if numpar == noneflag: |
|
386 | if numpar == noneflag: | |
387 | o3 = o2 |
|
387 | o3 = o2 | |
388 | parents = None |
|
388 | parents = None | |
389 | elif numpar == 1: |
|
389 | elif numpar == 1: | |
390 | o3 = o2 + nodesize |
|
390 | o3 = o2 + nodesize | |
391 | parents = (data[o2:o3],) |
|
391 | parents = (data[o2:o3],) | |
392 | else: |
|
392 | else: | |
393 | o3 = o2 + nodesize * numpar |
|
393 | o3 = o2 + nodesize * numpar | |
394 | parents = unpack(nodefmt * numpar, data[o2:o3]) |
|
394 | parents = unpack(nodefmt * numpar, data[o2:o3]) | |
395 |
|
395 | |||
396 | # read metadata |
|
396 | # read metadata | |
397 | off = o3 + metasize * nummeta |
|
397 | off = o3 + metasize * nummeta | |
398 | metapairsize = unpack(b'>' + (metafmt * nummeta), data[o3:off]) |
|
398 | metapairsize = unpack(b'>' + (metafmt * nummeta), data[o3:off]) | |
399 | metadata = [] |
|
399 | metadata = [] | |
400 | for idx in range(0, len(metapairsize), 2): |
|
400 | for idx in range(0, len(metapairsize), 2): | |
401 | o1 = off + metapairsize[idx] |
|
401 | o1 = off + metapairsize[idx] | |
402 | o2 = o1 + metapairsize[idx + 1] |
|
402 | o2 = o1 + metapairsize[idx + 1] | |
403 | metadata.append((data[off:o1], data[o1:o2])) |
|
403 | metadata.append((data[off:o1], data[o1:o2])) | |
404 | off = o2 |
|
404 | off = o2 | |
405 |
|
405 | |||
406 | yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents) |
|
406 | yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents) | |
407 |
|
407 | |||
408 |
|
408 | |||
409 | def _fm1encodeonemarker(marker): |
|
409 | def _fm1encodeonemarker(marker): | |
410 | pre, sucs, flags, metadata, date, parents = marker |
|
410 | pre, sucs, flags, metadata, date, parents = marker | |
411 | # determine node size |
|
411 | # determine node size | |
412 | _fm1node = _fm1nodesha1 |
|
412 | _fm1node = _fm1nodesha1 | |
413 | if flags & usingsha256: |
|
413 | if flags & usingsha256: | |
414 | _fm1node = _fm1nodesha256 |
|
414 | _fm1node = _fm1nodesha256 | |
415 | numsuc = len(sucs) |
|
415 | numsuc = len(sucs) | |
416 | numextranodes = 1 + numsuc |
|
416 | numextranodes = 1 + numsuc | |
417 | if parents is None: |
|
417 | if parents is None: | |
418 | numpar = _fm1parentnone |
|
418 | numpar = _fm1parentnone | |
419 | else: |
|
419 | else: | |
420 | numpar = len(parents) |
|
420 | numpar = len(parents) | |
421 | numextranodes += numpar |
|
421 | numextranodes += numpar | |
422 | formatnodes = _fm1node * numextranodes |
|
422 | formatnodes = _fm1node * numextranodes | |
423 | formatmeta = _fm1metapair * len(metadata) |
|
423 | formatmeta = _fm1metapair * len(metadata) | |
424 | format = _fm1fixed + formatnodes + formatmeta |
|
424 | format = _fm1fixed + formatnodes + formatmeta | |
425 | # tz is stored in minutes so we divide by 60 |
|
425 | # tz is stored in minutes so we divide by 60 | |
426 | tz = date[1] // 60 |
|
426 | tz = date[1] // 60 | |
427 | data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre] |
|
427 | data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre] | |
428 | data.extend(sucs) |
|
428 | data.extend(sucs) | |
429 | if parents is not None: |
|
429 | if parents is not None: | |
430 | data.extend(parents) |
|
430 | data.extend(parents) | |
431 | totalsize = _calcsize(format) |
|
431 | totalsize = _calcsize(format) | |
432 | for key, value in metadata: |
|
432 | for key, value in metadata: | |
433 | lk = len(key) |
|
433 | lk = len(key) | |
434 | lv = len(value) |
|
434 | lv = len(value) | |
435 | if lk > 255: |
|
435 | if lk > 255: | |
436 | msg = ( |
|
436 | msg = ( | |
437 | b'obsstore metadata key cannot be longer than 255 bytes' |
|
437 | b'obsstore metadata key cannot be longer than 255 bytes' | |
438 | b' (key "%s" is %u bytes)' |
|
438 | b' (key "%s" is %u bytes)' | |
439 | ) % (key, lk) |
|
439 | ) % (key, lk) | |
440 | raise error.ProgrammingError(msg) |
|
440 | raise error.ProgrammingError(msg) | |
441 | if lv > 255: |
|
441 | if lv > 255: | |
442 | msg = ( |
|
442 | msg = ( | |
443 | b'obsstore metadata value cannot be longer than 255 bytes' |
|
443 | b'obsstore metadata value cannot be longer than 255 bytes' | |
444 | b' (value "%s" for key "%s" is %u bytes)' |
|
444 | b' (value "%s" for key "%s" is %u bytes)' | |
445 | ) % (value, key, lv) |
|
445 | ) % (value, key, lv) | |
446 | raise error.ProgrammingError(msg) |
|
446 | raise error.ProgrammingError(msg) | |
447 | data.append(lk) |
|
447 | data.append(lk) | |
448 | data.append(lv) |
|
448 | data.append(lv) | |
449 | totalsize += lk + lv |
|
449 | totalsize += lk + lv | |
450 | data[0] = totalsize |
|
450 | data[0] = totalsize | |
451 | data = [_pack(format, *data)] |
|
451 | data = [_pack(format, *data)] | |
452 | for key, value in metadata: |
|
452 | for key, value in metadata: | |
453 | data.append(key) |
|
453 | data.append(key) | |
454 | data.append(value) |
|
454 | data.append(value) | |
455 | return b''.join(data) |
|
455 | return b''.join(data) | |
456 |
|
456 | |||
457 |
|
457 | |||
458 | def _fm1readmarkers(data, off, stop): |
|
458 | def _fm1readmarkers(data, off, stop): | |
459 | native = getattr(parsers, 'fm1readmarkers', None) |
|
459 | native = getattr(parsers, 'fm1readmarkers', None) | |
460 | if not native: |
|
460 | if not native: | |
461 | return _fm1purereadmarkers(data, off, stop) |
|
461 | return _fm1purereadmarkers(data, off, stop) | |
462 | return native(data, off, stop) |
|
462 | return native(data, off, stop) | |
463 |
|
463 | |||
464 |
|
464 | |||
465 | # mapping to read/write various marker formats |
|
465 | # mapping to read/write various marker formats | |
466 | # <version> -> (decoder, encoder) |
|
466 | # <version> -> (decoder, encoder) | |
467 | formats = { |
|
467 | formats = { | |
468 | _fm0version: (_fm0readmarkers, _fm0encodeonemarker), |
|
468 | _fm0version: (_fm0readmarkers, _fm0encodeonemarker), | |
469 | _fm1version: (_fm1readmarkers, _fm1encodeonemarker), |
|
469 | _fm1version: (_fm1readmarkers, _fm1encodeonemarker), | |
470 | } |
|
470 | } | |
471 |
|
471 | |||
472 |
|
472 | |||
473 | def _readmarkerversion(data): |
|
473 | def _readmarkerversion(data): | |
474 | return _unpack(b'>B', data[0:1])[0] |
|
474 | return _unpack(b'>B', data[0:1])[0] | |
475 |
|
475 | |||
476 |
|
476 | |||
477 | @util.nogc |
|
477 | @util.nogc | |
478 | def _readmarkers(data, off=None, stop=None): |
|
478 | def _readmarkers(data, off=None, stop=None): | |
479 | """Read and enumerate markers from raw data""" |
|
479 | """Read and enumerate markers from raw data""" | |
480 | diskversion = _readmarkerversion(data) |
|
480 | diskversion = _readmarkerversion(data) | |
481 | if not off: |
|
481 | if not off: | |
482 | off = 1 # skip 1 byte version number |
|
482 | off = 1 # skip 1 byte version number | |
483 | if stop is None: |
|
483 | if stop is None: | |
484 | stop = len(data) |
|
484 | stop = len(data) | |
485 | if diskversion not in formats: |
|
485 | if diskversion not in formats: | |
486 | msg = _(b'parsing obsolete marker: unknown version %r') % diskversion |
|
486 | msg = _(b'parsing obsolete marker: unknown version %r') % diskversion | |
487 | raise error.UnknownVersion(msg, version=diskversion) |
|
487 | raise error.UnknownVersion(msg, version=diskversion) | |
488 | return diskversion, formats[diskversion][0](data, off, stop) |
|
488 | return diskversion, formats[diskversion][0](data, off, stop) | |
489 |
|
489 | |||
490 |
|
490 | |||
491 | def encodeheader(version=_fm0version): |
|
491 | def encodeheader(version=_fm0version): | |
492 | return _pack(b'>B', version) |
|
492 | return _pack(b'>B', version) | |
493 |
|
493 | |||
494 |
|
494 | |||
495 | def encodemarkers(markers, addheader=False, version=_fm0version): |
|
495 | def encodemarkers(markers, addheader=False, version=_fm0version): | |
496 | # Kept separate from flushmarkers(), it will be reused for |
|
496 | # Kept separate from flushmarkers(), it will be reused for | |
497 | # markers exchange. |
|
497 | # markers exchange. | |
498 | encodeone = formats[version][1] |
|
498 | encodeone = formats[version][1] | |
499 | if addheader: |
|
499 | if addheader: | |
500 | yield encodeheader(version) |
|
500 | yield encodeheader(version) | |
501 | for marker in markers: |
|
501 | for marker in markers: | |
502 | yield encodeone(marker) |
|
502 | yield encodeone(marker) | |
503 |
|
503 | |||
504 |
|
504 | |||
505 | @util.nogc |
|
505 | @util.nogc | |
506 | def _addsuccessors(successors, markers): |
|
506 | def _addsuccessors(successors, markers): | |
507 | for mark in markers: |
|
507 | for mark in markers: | |
508 | successors.setdefault(mark[0], set()).add(mark) |
|
508 | successors.setdefault(mark[0], set()).add(mark) | |
509 |
|
509 | |||
510 |
|
510 | |||
511 | @util.nogc |
|
511 | @util.nogc | |
512 | def _addpredecessors(predecessors, markers): |
|
512 | def _addpredecessors(predecessors, markers): | |
513 | for mark in markers: |
|
513 | for mark in markers: | |
514 | for suc in mark[1]: |
|
514 | for suc in mark[1]: | |
515 | predecessors.setdefault(suc, set()).add(mark) |
|
515 | predecessors.setdefault(suc, set()).add(mark) | |
516 |
|
516 | |||
517 |
|
517 | |||
518 | @util.nogc |
|
518 | @util.nogc | |
519 | def _addchildren(children, markers): |
|
519 | def _addchildren(children, markers): | |
520 | for mark in markers: |
|
520 | for mark in markers: | |
521 | parents = mark[5] |
|
521 | parents = mark[5] | |
522 | if parents is not None: |
|
522 | if parents is not None: | |
523 | for p in parents: |
|
523 | for p in parents: | |
524 | children.setdefault(p, set()).add(mark) |
|
524 | children.setdefault(p, set()).add(mark) | |
525 |
|
525 | |||
526 |
|
526 | |||
527 | def _checkinvalidmarkers(repo, markers): |
|
527 | def _checkinvalidmarkers(repo, markers): | |
528 | """search for marker with invalid data and raise error if needed |
|
528 | """search for marker with invalid data and raise error if needed | |
529 |
|
529 | |||
530 | Exist as a separated function to allow the evolve extension for a more |
|
530 | Exist as a separated function to allow the evolve extension for a more | |
531 | subtle handling. |
|
531 | subtle handling. | |
532 | """ |
|
532 | """ | |
533 | for mark in markers: |
|
533 | for mark in markers: | |
534 | if repo.nullid in mark[1]: |
|
534 | if repo.nullid in mark[1]: | |
535 | raise error.Abort( |
|
535 | raise error.Abort( | |
536 | _( |
|
536 | _( | |
537 | b'bad obsolescence marker detected: ' |
|
537 | b'bad obsolescence marker detected: ' | |
538 | b'invalid successors nullid' |
|
538 | b'invalid successors nullid' | |
539 | ) |
|
539 | ) | |
540 | ) |
|
540 | ) | |
541 |
|
541 | |||
542 |
|
542 | |||
543 | class obsstore: |
|
543 | class obsstore: | |
544 | """Store obsolete markers |
|
544 | """Store obsolete markers | |
545 |
|
545 | |||
546 | Markers can be accessed with two mappings: |
|
546 | Markers can be accessed with two mappings: | |
547 | - predecessors[x] -> set(markers on predecessors edges of x) |
|
547 | - predecessors[x] -> set(markers on predecessors edges of x) | |
548 | - successors[x] -> set(markers on successors edges of x) |
|
548 | - successors[x] -> set(markers on successors edges of x) | |
549 | - children[x] -> set(markers on predecessors edges of children(x) |
|
549 | - children[x] -> set(markers on predecessors edges of children(x) | |
550 | """ |
|
550 | """ | |
551 |
|
551 | |||
552 | fields = (b'prec', b'succs', b'flag', b'meta', b'date', b'parents') |
|
552 | fields = (b'prec', b'succs', b'flag', b'meta', b'date', b'parents') | |
553 | # prec: nodeid, predecessors changesets |
|
553 | # prec: nodeid, predecessors changesets | |
554 | # succs: tuple of nodeid, successor changesets (0-N length) |
|
554 | # succs: tuple of nodeid, successor changesets (0-N length) | |
555 | # flag: integer, flag field carrying modifier for the markers (see doc) |
|
555 | # flag: integer, flag field carrying modifier for the markers (see doc) | |
556 | # meta: binary blob in UTF-8, encoded metadata dictionary |
|
556 | # meta: binary blob in UTF-8, encoded metadata dictionary | |
557 | # date: (float, int) tuple, date of marker creation |
|
557 | # date: (float, int) tuple, date of marker creation | |
558 | # parents: (tuple of nodeid) or None, parents of predecessors |
|
558 | # parents: (tuple of nodeid) or None, parents of predecessors | |
559 | # None is used when no data has been recorded |
|
559 | # None is used when no data has been recorded | |
560 |
|
560 | |||
561 | def __init__(self, repo, svfs, defaultformat=_fm1version, readonly=False): |
|
561 | def __init__(self, repo, svfs, defaultformat=_fm1version, readonly=False): | |
562 | # caches for various obsolescence related cache |
|
562 | # caches for various obsolescence related cache | |
563 | self.caches = {} |
|
563 | self.caches = {} | |
564 | self.svfs = svfs |
|
564 | self.svfs = svfs | |
565 | self.repo = repo |
|
565 | self.repo = repo | |
566 | self._defaultformat = defaultformat |
|
566 | self._defaultformat = defaultformat | |
567 | self._readonly = readonly |
|
567 | self._readonly = readonly | |
568 |
|
568 | |||
569 | def __iter__(self): |
|
569 | def __iter__(self): | |
570 | return iter(self._all) |
|
570 | return iter(self._all) | |
571 |
|
571 | |||
572 | def __len__(self): |
|
572 | def __len__(self): | |
573 | return len(self._all) |
|
573 | return len(self._all) | |
574 |
|
574 | |||
575 | def __nonzero__(self): |
|
575 | def __nonzero__(self): | |
576 | from . import statichttprepo |
|
576 | from . import statichttprepo | |
577 |
|
577 | |||
578 | if isinstance(self.repo, statichttprepo.statichttprepository): |
|
578 | if isinstance(self.repo, statichttprepo.statichttprepository): | |
579 | # If repo is accessed via static HTTP, then we can't use os.stat() |
|
579 | # If repo is accessed via static HTTP, then we can't use os.stat() | |
580 | # to just peek at the file size. |
|
580 | # to just peek at the file size. | |
581 | return len(self._data) > 1 |
|
581 | return len(self._data) > 1 | |
582 | if not self._cached('_all'): |
|
582 | if not self._cached('_all'): | |
583 | try: |
|
583 | try: | |
584 | return self.svfs.stat(b'obsstore').st_size > 1 |
|
584 | return self.svfs.stat(b'obsstore').st_size > 1 | |
585 | except OSError as inst: |
|
585 | except OSError as inst: | |
586 | if inst.errno != errno.ENOENT: |
|
586 | if inst.errno != errno.ENOENT: | |
587 | raise |
|
587 | raise | |
588 | # just build an empty _all list if no obsstore exists, which |
|
588 | # just build an empty _all list if no obsstore exists, which | |
589 | # avoids further stat() syscalls |
|
589 | # avoids further stat() syscalls | |
590 | return bool(self._all) |
|
590 | return bool(self._all) | |
591 |
|
591 | |||
592 | __bool__ = __nonzero__ |
|
592 | __bool__ = __nonzero__ | |
593 |
|
593 | |||
594 | @property |
|
594 | @property | |
595 | def readonly(self): |
|
595 | def readonly(self): | |
596 | """True if marker creation is disabled |
|
596 | """True if marker creation is disabled | |
597 |
|
597 | |||
598 | Remove me in the future when obsolete marker is always on.""" |
|
598 | Remove me in the future when obsolete marker is always on.""" | |
599 | return self._readonly |
|
599 | return self._readonly | |
600 |
|
600 | |||
601 | def create( |
|
601 | def create( | |
602 | self, |
|
602 | self, | |
603 | transaction, |
|
603 | transaction, | |
604 | prec, |
|
604 | prec, | |
605 | succs=(), |
|
605 | succs=(), | |
606 | flag=0, |
|
606 | flag=0, | |
607 | parents=None, |
|
607 | parents=None, | |
608 | date=None, |
|
608 | date=None, | |
609 | metadata=None, |
|
609 | metadata=None, | |
610 | ui=None, |
|
610 | ui=None, | |
611 | ): |
|
611 | ): | |
612 | """obsolete: add a new obsolete marker |
|
612 | """obsolete: add a new obsolete marker | |
613 |
|
613 | |||
614 | * ensuring it is hashable |
|
614 | * ensuring it is hashable | |
615 | * check mandatory metadata |
|
615 | * check mandatory metadata | |
616 | * encode metadata |
|
616 | * encode metadata | |
617 |
|
617 | |||
618 | If you are a human writing code creating marker you want to use the |
|
618 | If you are a human writing code creating marker you want to use the | |
619 | `createmarkers` function in this module instead. |
|
619 | `createmarkers` function in this module instead. | |
620 |
|
620 | |||
621 | return True if a new marker have been added, False if the markers |
|
621 | return True if a new marker have been added, False if the markers | |
622 | already existed (no op). |
|
622 | already existed (no op). | |
623 | """ |
|
623 | """ | |
624 | flag = int(flag) |
|
624 | flag = int(flag) | |
625 | if metadata is None: |
|
625 | if metadata is None: | |
626 | metadata = {} |
|
626 | metadata = {} | |
627 | if date is None: |
|
627 | if date is None: | |
628 | if b'date' in metadata: |
|
628 | if b'date' in metadata: | |
629 | # as a courtesy for out-of-tree extensions |
|
629 | # as a courtesy for out-of-tree extensions | |
630 | date = dateutil.parsedate(metadata.pop(b'date')) |
|
630 | date = dateutil.parsedate(metadata.pop(b'date')) | |
631 | elif ui is not None: |
|
631 | elif ui is not None: | |
632 | date = ui.configdate(b'devel', b'default-date') |
|
632 | date = ui.configdate(b'devel', b'default-date') | |
633 | if date is None: |
|
633 | if date is None: | |
634 | date = dateutil.makedate() |
|
634 | date = dateutil.makedate() | |
635 | else: |
|
635 | else: | |
636 | date = dateutil.makedate() |
|
636 | date = dateutil.makedate() | |
637 | if flag & usingsha256: |
|
637 | if flag & usingsha256: | |
638 | if len(prec) != 32: |
|
638 | if len(prec) != 32: | |
639 | raise ValueError(prec) |
|
639 | raise ValueError(prec) | |
640 | for succ in succs: |
|
640 | for succ in succs: | |
641 | if len(succ) != 32: |
|
641 | if len(succ) != 32: | |
642 | raise ValueError(succ) |
|
642 | raise ValueError(succ) | |
643 | else: |
|
643 | else: | |
644 | if len(prec) != 20: |
|
644 | if len(prec) != 20: | |
645 | raise ValueError(prec) |
|
645 | raise ValueError(prec) | |
646 | for succ in succs: |
|
646 | for succ in succs: | |
647 | if len(succ) != 20: |
|
647 | if len(succ) != 20: | |
648 | raise ValueError(succ) |
|
648 | raise ValueError(succ) | |
649 | if prec in succs: |
|
649 | if prec in succs: | |
650 | raise ValueError( |
|
650 | raise ValueError('in-marker cycle with %s' % prec.hex()) | |
651 | 'in-marker cycle with %s' % pycompat.sysstr(hex(prec)) |
|
|||
652 | ) |
|
|||
653 |
|
651 | |||
654 | metadata = tuple(sorted(metadata.items())) |
|
652 | metadata = tuple(sorted(metadata.items())) | |
655 | for k, v in metadata: |
|
653 | for k, v in metadata: | |
656 | try: |
|
654 | try: | |
657 | # might be better to reject non-ASCII keys |
|
655 | # might be better to reject non-ASCII keys | |
658 | k.decode('utf-8') |
|
656 | k.decode('utf-8') | |
659 | v.decode('utf-8') |
|
657 | v.decode('utf-8') | |
660 | except UnicodeDecodeError: |
|
658 | except UnicodeDecodeError: | |
661 | raise error.ProgrammingError( |
|
659 | raise error.ProgrammingError( | |
662 | b'obsstore metadata must be valid UTF-8 sequence ' |
|
660 | b'obsstore metadata must be valid UTF-8 sequence ' | |
663 | b'(key = %r, value = %r)' |
|
661 | b'(key = %r, value = %r)' | |
664 | % (pycompat.bytestr(k), pycompat.bytestr(v)) |
|
662 | % (pycompat.bytestr(k), pycompat.bytestr(v)) | |
665 | ) |
|
663 | ) | |
666 |
|
664 | |||
667 | marker = (bytes(prec), tuple(succs), flag, metadata, date, parents) |
|
665 | marker = (bytes(prec), tuple(succs), flag, metadata, date, parents) | |
668 | return bool(self.add(transaction, [marker])) |
|
666 | return bool(self.add(transaction, [marker])) | |
669 |
|
667 | |||
670 | def add(self, transaction, markers): |
|
668 | def add(self, transaction, markers): | |
671 | """Add new markers to the store |
|
669 | """Add new markers to the store | |
672 |
|
670 | |||
673 | Take care of filtering duplicate. |
|
671 | Take care of filtering duplicate. | |
674 | Return the number of new marker.""" |
|
672 | Return the number of new marker.""" | |
675 | if self._readonly: |
|
673 | if self._readonly: | |
676 | raise error.Abort( |
|
674 | raise error.Abort( | |
677 | _(b'creating obsolete markers is not enabled on this repo') |
|
675 | _(b'creating obsolete markers is not enabled on this repo') | |
678 | ) |
|
676 | ) | |
679 | known = set() |
|
677 | known = set() | |
680 | getsuccessors = self.successors.get |
|
678 | getsuccessors = self.successors.get | |
681 | new = [] |
|
679 | new = [] | |
682 | for m in markers: |
|
680 | for m in markers: | |
683 | if m not in getsuccessors(m[0], ()) and m not in known: |
|
681 | if m not in getsuccessors(m[0], ()) and m not in known: | |
684 | known.add(m) |
|
682 | known.add(m) | |
685 | new.append(m) |
|
683 | new.append(m) | |
686 | if new: |
|
684 | if new: | |
687 | f = self.svfs(b'obsstore', b'ab') |
|
685 | f = self.svfs(b'obsstore', b'ab') | |
688 | try: |
|
686 | try: | |
689 | offset = f.tell() |
|
687 | offset = f.tell() | |
690 | transaction.add(b'obsstore', offset) |
|
688 | transaction.add(b'obsstore', offset) | |
691 | # offset == 0: new file - add the version header |
|
689 | # offset == 0: new file - add the version header | |
692 | data = b''.join(encodemarkers(new, offset == 0, self._version)) |
|
690 | data = b''.join(encodemarkers(new, offset == 0, self._version)) | |
693 | f.write(data) |
|
691 | f.write(data) | |
694 | finally: |
|
692 | finally: | |
695 | # XXX: f.close() == filecache invalidation == obsstore rebuilt. |
|
693 | # XXX: f.close() == filecache invalidation == obsstore rebuilt. | |
696 | # call 'filecacheentry.refresh()' here |
|
694 | # call 'filecacheentry.refresh()' here | |
697 | f.close() |
|
695 | f.close() | |
698 | addedmarkers = transaction.changes.get(b'obsmarkers') |
|
696 | addedmarkers = transaction.changes.get(b'obsmarkers') | |
699 | if addedmarkers is not None: |
|
697 | if addedmarkers is not None: | |
700 | addedmarkers.update(new) |
|
698 | addedmarkers.update(new) | |
701 | self._addmarkers(new, data) |
|
699 | self._addmarkers(new, data) | |
702 | # new marker *may* have changed several set. invalidate the cache. |
|
700 | # new marker *may* have changed several set. invalidate the cache. | |
703 | self.caches.clear() |
|
701 | self.caches.clear() | |
704 | # records the number of new markers for the transaction hooks |
|
702 | # records the number of new markers for the transaction hooks | |
705 | previous = int(transaction.hookargs.get(b'new_obsmarkers', b'0')) |
|
703 | previous = int(transaction.hookargs.get(b'new_obsmarkers', b'0')) | |
706 | transaction.hookargs[b'new_obsmarkers'] = b'%d' % (previous + len(new)) |
|
704 | transaction.hookargs[b'new_obsmarkers'] = b'%d' % (previous + len(new)) | |
707 | return len(new) |
|
705 | return len(new) | |
708 |
|
706 | |||
709 | def mergemarkers(self, transaction, data): |
|
707 | def mergemarkers(self, transaction, data): | |
710 | """merge a binary stream of markers inside the obsstore |
|
708 | """merge a binary stream of markers inside the obsstore | |
711 |
|
709 | |||
712 | Returns the number of new markers added.""" |
|
710 | Returns the number of new markers added.""" | |
713 | version, markers = _readmarkers(data) |
|
711 | version, markers = _readmarkers(data) | |
714 | return self.add(transaction, markers) |
|
712 | return self.add(transaction, markers) | |
715 |
|
713 | |||
716 | @propertycache |
|
714 | @propertycache | |
717 | def _data(self): |
|
715 | def _data(self): | |
718 | return self.svfs.tryread(b'obsstore') |
|
716 | return self.svfs.tryread(b'obsstore') | |
719 |
|
717 | |||
720 | @propertycache |
|
718 | @propertycache | |
721 | def _version(self): |
|
719 | def _version(self): | |
722 | if len(self._data) >= 1: |
|
720 | if len(self._data) >= 1: | |
723 | return _readmarkerversion(self._data) |
|
721 | return _readmarkerversion(self._data) | |
724 | else: |
|
722 | else: | |
725 | return self._defaultformat |
|
723 | return self._defaultformat | |
726 |
|
724 | |||
727 | @propertycache |
|
725 | @propertycache | |
728 | def _all(self): |
|
726 | def _all(self): | |
729 | data = self._data |
|
727 | data = self._data | |
730 | if not data: |
|
728 | if not data: | |
731 | return [] |
|
729 | return [] | |
732 | self._version, markers = _readmarkers(data) |
|
730 | self._version, markers = _readmarkers(data) | |
733 | markers = list(markers) |
|
731 | markers = list(markers) | |
734 | _checkinvalidmarkers(self.repo, markers) |
|
732 | _checkinvalidmarkers(self.repo, markers) | |
735 | return markers |
|
733 | return markers | |
736 |
|
734 | |||
737 | @propertycache |
|
735 | @propertycache | |
738 | def successors(self): |
|
736 | def successors(self): | |
739 | successors = {} |
|
737 | successors = {} | |
740 | _addsuccessors(successors, self._all) |
|
738 | _addsuccessors(successors, self._all) | |
741 | return successors |
|
739 | return successors | |
742 |
|
740 | |||
743 | @propertycache |
|
741 | @propertycache | |
744 | def predecessors(self): |
|
742 | def predecessors(self): | |
745 | predecessors = {} |
|
743 | predecessors = {} | |
746 | _addpredecessors(predecessors, self._all) |
|
744 | _addpredecessors(predecessors, self._all) | |
747 | return predecessors |
|
745 | return predecessors | |
748 |
|
746 | |||
749 | @propertycache |
|
747 | @propertycache | |
750 | def children(self): |
|
748 | def children(self): | |
751 | children = {} |
|
749 | children = {} | |
752 | _addchildren(children, self._all) |
|
750 | _addchildren(children, self._all) | |
753 | return children |
|
751 | return children | |
754 |
|
752 | |||
755 | def _cached(self, attr): |
|
753 | def _cached(self, attr): | |
756 | return attr in self.__dict__ |
|
754 | return attr in self.__dict__ | |
757 |
|
755 | |||
758 | def _addmarkers(self, markers, rawdata): |
|
756 | def _addmarkers(self, markers, rawdata): | |
759 | markers = list(markers) # to allow repeated iteration |
|
757 | markers = list(markers) # to allow repeated iteration | |
760 | self._data = self._data + rawdata |
|
758 | self._data = self._data + rawdata | |
761 | self._all.extend(markers) |
|
759 | self._all.extend(markers) | |
762 | if self._cached('successors'): |
|
760 | if self._cached('successors'): | |
763 | _addsuccessors(self.successors, markers) |
|
761 | _addsuccessors(self.successors, markers) | |
764 | if self._cached('predecessors'): |
|
762 | if self._cached('predecessors'): | |
765 | _addpredecessors(self.predecessors, markers) |
|
763 | _addpredecessors(self.predecessors, markers) | |
766 | if self._cached('children'): |
|
764 | if self._cached('children'): | |
767 | _addchildren(self.children, markers) |
|
765 | _addchildren(self.children, markers) | |
768 | _checkinvalidmarkers(self.repo, markers) |
|
766 | _checkinvalidmarkers(self.repo, markers) | |
769 |
|
767 | |||
770 | def relevantmarkers(self, nodes): |
|
768 | def relevantmarkers(self, nodes): | |
771 | """return a set of all obsolescence markers relevant to a set of nodes. |
|
769 | """return a set of all obsolescence markers relevant to a set of nodes. | |
772 |
|
770 | |||
773 | "relevant" to a set of nodes mean: |
|
771 | "relevant" to a set of nodes mean: | |
774 |
|
772 | |||
775 | - marker that use this changeset as successor |
|
773 | - marker that use this changeset as successor | |
776 | - prune marker of direct children on this changeset |
|
774 | - prune marker of direct children on this changeset | |
777 | - recursive application of the two rules on predecessors of these |
|
775 | - recursive application of the two rules on predecessors of these | |
778 | markers |
|
776 | markers | |
779 |
|
777 | |||
780 | It is a set so you cannot rely on order.""" |
|
778 | It is a set so you cannot rely on order.""" | |
781 |
|
779 | |||
782 | pendingnodes = set(nodes) |
|
780 | pendingnodes = set(nodes) | |
783 | seenmarkers = set() |
|
781 | seenmarkers = set() | |
784 | seennodes = set(pendingnodes) |
|
782 | seennodes = set(pendingnodes) | |
785 | precursorsmarkers = self.predecessors |
|
783 | precursorsmarkers = self.predecessors | |
786 | succsmarkers = self.successors |
|
784 | succsmarkers = self.successors | |
787 | children = self.children |
|
785 | children = self.children | |
788 | while pendingnodes: |
|
786 | while pendingnodes: | |
789 | direct = set() |
|
787 | direct = set() | |
790 | for current in pendingnodes: |
|
788 | for current in pendingnodes: | |
791 | direct.update(precursorsmarkers.get(current, ())) |
|
789 | direct.update(precursorsmarkers.get(current, ())) | |
792 | pruned = [m for m in children.get(current, ()) if not m[1]] |
|
790 | pruned = [m for m in children.get(current, ()) if not m[1]] | |
793 | direct.update(pruned) |
|
791 | direct.update(pruned) | |
794 | pruned = [m for m in succsmarkers.get(current, ()) if not m[1]] |
|
792 | pruned = [m for m in succsmarkers.get(current, ()) if not m[1]] | |
795 | direct.update(pruned) |
|
793 | direct.update(pruned) | |
796 | direct -= seenmarkers |
|
794 | direct -= seenmarkers | |
797 | pendingnodes = {m[0] for m in direct} |
|
795 | pendingnodes = {m[0] for m in direct} | |
798 | seenmarkers |= direct |
|
796 | seenmarkers |= direct | |
799 | pendingnodes -= seennodes |
|
797 | pendingnodes -= seennodes | |
800 | seennodes |= pendingnodes |
|
798 | seennodes |= pendingnodes | |
801 | return seenmarkers |
|
799 | return seenmarkers | |
802 |
|
800 | |||
803 |
|
801 | |||
804 | def makestore(ui, repo): |
|
802 | def makestore(ui, repo): | |
805 | """Create an obsstore instance from a repo.""" |
|
803 | """Create an obsstore instance from a repo.""" | |
806 | # read default format for new obsstore. |
|
804 | # read default format for new obsstore. | |
807 | # developer config: format.obsstore-version |
|
805 | # developer config: format.obsstore-version | |
808 | defaultformat = ui.configint(b'format', b'obsstore-version') |
|
806 | defaultformat = ui.configint(b'format', b'obsstore-version') | |
809 | # rely on obsstore class default when possible. |
|
807 | # rely on obsstore class default when possible. | |
810 | kwargs = {} |
|
808 | kwargs = {} | |
811 | if defaultformat is not None: |
|
809 | if defaultformat is not None: | |
812 | kwargs['defaultformat'] = defaultformat |
|
810 | kwargs['defaultformat'] = defaultformat | |
813 | readonly = not isenabled(repo, createmarkersopt) |
|
811 | readonly = not isenabled(repo, createmarkersopt) | |
814 | store = obsstore(repo, repo.svfs, readonly=readonly, **kwargs) |
|
812 | store = obsstore(repo, repo.svfs, readonly=readonly, **kwargs) | |
815 | if store and readonly: |
|
813 | if store and readonly: | |
816 | ui.warn( |
|
814 | ui.warn( | |
817 | _(b'obsolete feature not enabled but %i markers found!\n') |
|
815 | _(b'obsolete feature not enabled but %i markers found!\n') | |
818 | % len(list(store)) |
|
816 | % len(list(store)) | |
819 | ) |
|
817 | ) | |
820 | return store |
|
818 | return store | |
821 |
|
819 | |||
822 |
|
820 | |||
823 | def commonversion(versions): |
|
821 | def commonversion(versions): | |
824 | """Return the newest version listed in both versions and our local formats. |
|
822 | """Return the newest version listed in both versions and our local formats. | |
825 |
|
823 | |||
826 | Returns None if no common version exists. |
|
824 | Returns None if no common version exists. | |
827 | """ |
|
825 | """ | |
828 | versions.sort(reverse=True) |
|
826 | versions.sort(reverse=True) | |
829 | # search for highest version known on both side |
|
827 | # search for highest version known on both side | |
830 | for v in versions: |
|
828 | for v in versions: | |
831 | if v in formats: |
|
829 | if v in formats: | |
832 | return v |
|
830 | return v | |
833 | return None |
|
831 | return None | |
834 |
|
832 | |||
835 |
|
833 | |||
836 | # arbitrary picked to fit into 8K limit from HTTP server |
|
834 | # arbitrary picked to fit into 8K limit from HTTP server | |
837 | # you have to take in account: |
|
835 | # you have to take in account: | |
838 | # - the version header |
|
836 | # - the version header | |
839 | # - the base85 encoding |
|
837 | # - the base85 encoding | |
840 | _maxpayload = 5300 |
|
838 | _maxpayload = 5300 | |
841 |
|
839 | |||
842 |
|
840 | |||
843 | def _pushkeyescape(markers): |
|
841 | def _pushkeyescape(markers): | |
844 | """encode markers into a dict suitable for pushkey exchange |
|
842 | """encode markers into a dict suitable for pushkey exchange | |
845 |
|
843 | |||
846 | - binary data is base85 encoded |
|
844 | - binary data is base85 encoded | |
847 | - split in chunks smaller than 5300 bytes""" |
|
845 | - split in chunks smaller than 5300 bytes""" | |
848 | keys = {} |
|
846 | keys = {} | |
849 | parts = [] |
|
847 | parts = [] | |
850 | currentlen = _maxpayload * 2 # ensure we create a new part |
|
848 | currentlen = _maxpayload * 2 # ensure we create a new part | |
851 | for marker in markers: |
|
849 | for marker in markers: | |
852 | nextdata = _fm0encodeonemarker(marker) |
|
850 | nextdata = _fm0encodeonemarker(marker) | |
853 | if len(nextdata) + currentlen > _maxpayload: |
|
851 | if len(nextdata) + currentlen > _maxpayload: | |
854 | currentpart = [] |
|
852 | currentpart = [] | |
855 | currentlen = 0 |
|
853 | currentlen = 0 | |
856 | parts.append(currentpart) |
|
854 | parts.append(currentpart) | |
857 | currentpart.append(nextdata) |
|
855 | currentpart.append(nextdata) | |
858 | currentlen += len(nextdata) |
|
856 | currentlen += len(nextdata) | |
859 | for idx, part in enumerate(reversed(parts)): |
|
857 | for idx, part in enumerate(reversed(parts)): | |
860 | data = b''.join([_pack(b'>B', _fm0version)] + part) |
|
858 | data = b''.join([_pack(b'>B', _fm0version)] + part) | |
861 | keys[b'dump%i' % idx] = util.b85encode(data) |
|
859 | keys[b'dump%i' % idx] = util.b85encode(data) | |
862 | return keys |
|
860 | return keys | |
863 |
|
861 | |||
864 |
|
862 | |||
865 | def listmarkers(repo): |
|
863 | def listmarkers(repo): | |
866 | """List markers over pushkey""" |
|
864 | """List markers over pushkey""" | |
867 | if not repo.obsstore: |
|
865 | if not repo.obsstore: | |
868 | return {} |
|
866 | return {} | |
869 | return _pushkeyescape(sorted(repo.obsstore)) |
|
867 | return _pushkeyescape(sorted(repo.obsstore)) | |
870 |
|
868 | |||
871 |
|
869 | |||
872 | def pushmarker(repo, key, old, new): |
|
870 | def pushmarker(repo, key, old, new): | |
873 | """Push markers over pushkey""" |
|
871 | """Push markers over pushkey""" | |
874 | if not key.startswith(b'dump'): |
|
872 | if not key.startswith(b'dump'): | |
875 | repo.ui.warn(_(b'unknown key: %r') % key) |
|
873 | repo.ui.warn(_(b'unknown key: %r') % key) | |
876 | return False |
|
874 | return False | |
877 | if old: |
|
875 | if old: | |
878 | repo.ui.warn(_(b'unexpected old value for %r') % key) |
|
876 | repo.ui.warn(_(b'unexpected old value for %r') % key) | |
879 | return False |
|
877 | return False | |
880 | data = util.b85decode(new) |
|
878 | data = util.b85decode(new) | |
881 | with repo.lock(), repo.transaction(b'pushkey: obsolete markers') as tr: |
|
879 | with repo.lock(), repo.transaction(b'pushkey: obsolete markers') as tr: | |
882 | repo.obsstore.mergemarkers(tr, data) |
|
880 | repo.obsstore.mergemarkers(tr, data) | |
883 | repo.invalidatevolatilesets() |
|
881 | repo.invalidatevolatilesets() | |
884 | return True |
|
882 | return True | |
885 |
|
883 | |||
886 |
|
884 | |||
887 | # mapping of 'set-name' -> <function to compute this set> |
|
885 | # mapping of 'set-name' -> <function to compute this set> | |
888 | cachefuncs = {} |
|
886 | cachefuncs = {} | |
889 |
|
887 | |||
890 |
|
888 | |||
891 | def cachefor(name): |
|
889 | def cachefor(name): | |
892 | """Decorator to register a function as computing the cache for a set""" |
|
890 | """Decorator to register a function as computing the cache for a set""" | |
893 |
|
891 | |||
894 | def decorator(func): |
|
892 | def decorator(func): | |
895 | if name in cachefuncs: |
|
893 | if name in cachefuncs: | |
896 | msg = b"duplicated registration for volatileset '%s' (existing: %r)" |
|
894 | msg = b"duplicated registration for volatileset '%s' (existing: %r)" | |
897 | raise error.ProgrammingError(msg % (name, cachefuncs[name])) |
|
895 | raise error.ProgrammingError(msg % (name, cachefuncs[name])) | |
898 | cachefuncs[name] = func |
|
896 | cachefuncs[name] = func | |
899 | return func |
|
897 | return func | |
900 |
|
898 | |||
901 | return decorator |
|
899 | return decorator | |
902 |
|
900 | |||
903 |
|
901 | |||
904 | def getrevs(repo, name): |
|
902 | def getrevs(repo, name): | |
905 | """Return the set of revision that belong to the <name> set |
|
903 | """Return the set of revision that belong to the <name> set | |
906 |
|
904 | |||
907 | Such access may compute the set and cache it for future use""" |
|
905 | Such access may compute the set and cache it for future use""" | |
908 | repo = repo.unfiltered() |
|
906 | repo = repo.unfiltered() | |
909 | with util.timedcm('getrevs %s', name): |
|
907 | with util.timedcm('getrevs %s', name): | |
910 | if not repo.obsstore: |
|
908 | if not repo.obsstore: | |
911 | return frozenset() |
|
909 | return frozenset() | |
912 | if name not in repo.obsstore.caches: |
|
910 | if name not in repo.obsstore.caches: | |
913 | repo.obsstore.caches[name] = cachefuncs[name](repo) |
|
911 | repo.obsstore.caches[name] = cachefuncs[name](repo) | |
914 | return repo.obsstore.caches[name] |
|
912 | return repo.obsstore.caches[name] | |
915 |
|
913 | |||
916 |
|
914 | |||
917 | # To be simple we need to invalidate obsolescence cache when: |
|
915 | # To be simple we need to invalidate obsolescence cache when: | |
918 | # |
|
916 | # | |
919 | # - new changeset is added: |
|
917 | # - new changeset is added: | |
920 | # - public phase is changed |
|
918 | # - public phase is changed | |
921 | # - obsolescence marker are added |
|
919 | # - obsolescence marker are added | |
922 | # - strip is used a repo |
|
920 | # - strip is used a repo | |
923 | def clearobscaches(repo): |
|
921 | def clearobscaches(repo): | |
924 | """Remove all obsolescence related cache from a repo |
|
922 | """Remove all obsolescence related cache from a repo | |
925 |
|
923 | |||
926 | This remove all cache in obsstore is the obsstore already exist on the |
|
924 | This remove all cache in obsstore is the obsstore already exist on the | |
927 | repo. |
|
925 | repo. | |
928 |
|
926 | |||
929 | (We could be smarter here given the exact event that trigger the cache |
|
927 | (We could be smarter here given the exact event that trigger the cache | |
930 | clearing)""" |
|
928 | clearing)""" | |
931 | # only clear cache is there is obsstore data in this repo |
|
929 | # only clear cache is there is obsstore data in this repo | |
932 | if b'obsstore' in repo._filecache: |
|
930 | if b'obsstore' in repo._filecache: | |
933 | repo.obsstore.caches.clear() |
|
931 | repo.obsstore.caches.clear() | |
934 |
|
932 | |||
935 |
|
933 | |||
936 | def _mutablerevs(repo): |
|
934 | def _mutablerevs(repo): | |
937 | """the set of mutable revision in the repository""" |
|
935 | """the set of mutable revision in the repository""" | |
938 | return repo._phasecache.getrevset(repo, phases.mutablephases) |
|
936 | return repo._phasecache.getrevset(repo, phases.mutablephases) | |
939 |
|
937 | |||
940 |
|
938 | |||
941 | @cachefor(b'obsolete') |
|
939 | @cachefor(b'obsolete') | |
942 | def _computeobsoleteset(repo): |
|
940 | def _computeobsoleteset(repo): | |
943 | """the set of obsolete revisions""" |
|
941 | """the set of obsolete revisions""" | |
944 | getnode = repo.changelog.node |
|
942 | getnode = repo.changelog.node | |
945 | notpublic = _mutablerevs(repo) |
|
943 | notpublic = _mutablerevs(repo) | |
946 | isobs = repo.obsstore.successors.__contains__ |
|
944 | isobs = repo.obsstore.successors.__contains__ | |
947 | return frozenset(r for r in notpublic if isobs(getnode(r))) |
|
945 | return frozenset(r for r in notpublic if isobs(getnode(r))) | |
948 |
|
946 | |||
949 |
|
947 | |||
950 | @cachefor(b'orphan') |
|
948 | @cachefor(b'orphan') | |
951 | def _computeorphanset(repo): |
|
949 | def _computeorphanset(repo): | |
952 | """the set of non obsolete revisions with obsolete parents""" |
|
950 | """the set of non obsolete revisions with obsolete parents""" | |
953 | pfunc = repo.changelog.parentrevs |
|
951 | pfunc = repo.changelog.parentrevs | |
954 | mutable = _mutablerevs(repo) |
|
952 | mutable = _mutablerevs(repo) | |
955 | obsolete = getrevs(repo, b'obsolete') |
|
953 | obsolete = getrevs(repo, b'obsolete') | |
956 | others = mutable - obsolete |
|
954 | others = mutable - obsolete | |
957 | unstable = set() |
|
955 | unstable = set() | |
958 | for r in sorted(others): |
|
956 | for r in sorted(others): | |
959 | # A rev is unstable if one of its parent is obsolete or unstable |
|
957 | # A rev is unstable if one of its parent is obsolete or unstable | |
960 | # this works since we traverse following growing rev order |
|
958 | # this works since we traverse following growing rev order | |
961 | for p in pfunc(r): |
|
959 | for p in pfunc(r): | |
962 | if p in obsolete or p in unstable: |
|
960 | if p in obsolete or p in unstable: | |
963 | unstable.add(r) |
|
961 | unstable.add(r) | |
964 | break |
|
962 | break | |
965 | return frozenset(unstable) |
|
963 | return frozenset(unstable) | |
966 |
|
964 | |||
967 |
|
965 | |||
968 | @cachefor(b'suspended') |
|
966 | @cachefor(b'suspended') | |
969 | def _computesuspendedset(repo): |
|
967 | def _computesuspendedset(repo): | |
970 | """the set of obsolete parents with non obsolete descendants""" |
|
968 | """the set of obsolete parents with non obsolete descendants""" | |
971 | suspended = repo.changelog.ancestors(getrevs(repo, b'orphan')) |
|
969 | suspended = repo.changelog.ancestors(getrevs(repo, b'orphan')) | |
972 | return frozenset(r for r in getrevs(repo, b'obsolete') if r in suspended) |
|
970 | return frozenset(r for r in getrevs(repo, b'obsolete') if r in suspended) | |
973 |
|
971 | |||
974 |
|
972 | |||
975 | @cachefor(b'extinct') |
|
973 | @cachefor(b'extinct') | |
976 | def _computeextinctset(repo): |
|
974 | def _computeextinctset(repo): | |
977 | """the set of obsolete parents without non obsolete descendants""" |
|
975 | """the set of obsolete parents without non obsolete descendants""" | |
978 | return getrevs(repo, b'obsolete') - getrevs(repo, b'suspended') |
|
976 | return getrevs(repo, b'obsolete') - getrevs(repo, b'suspended') | |
979 |
|
977 | |||
980 |
|
978 | |||
981 | @cachefor(b'phasedivergent') |
|
979 | @cachefor(b'phasedivergent') | |
982 | def _computephasedivergentset(repo): |
|
980 | def _computephasedivergentset(repo): | |
983 | """the set of revs trying to obsolete public revisions""" |
|
981 | """the set of revs trying to obsolete public revisions""" | |
984 | bumped = set() |
|
982 | bumped = set() | |
985 | # util function (avoid attribute lookup in the loop) |
|
983 | # util function (avoid attribute lookup in the loop) | |
986 | phase = repo._phasecache.phase # would be faster to grab the full list |
|
984 | phase = repo._phasecache.phase # would be faster to grab the full list | |
987 | public = phases.public |
|
985 | public = phases.public | |
988 | cl = repo.changelog |
|
986 | cl = repo.changelog | |
989 | torev = cl.index.get_rev |
|
987 | torev = cl.index.get_rev | |
990 | tonode = cl.node |
|
988 | tonode = cl.node | |
991 | obsstore = repo.obsstore |
|
989 | obsstore = repo.obsstore | |
992 | for rev in repo.revs(b'(not public()) and (not obsolete())'): |
|
990 | for rev in repo.revs(b'(not public()) and (not obsolete())'): | |
993 | # We only evaluate mutable, non-obsolete revision |
|
991 | # We only evaluate mutable, non-obsolete revision | |
994 | node = tonode(rev) |
|
992 | node = tonode(rev) | |
995 | # (future) A cache of predecessors may worth if split is very common |
|
993 | # (future) A cache of predecessors may worth if split is very common | |
996 | for pnode in obsutil.allpredecessors( |
|
994 | for pnode in obsutil.allpredecessors( | |
997 | obsstore, [node], ignoreflags=bumpedfix |
|
995 | obsstore, [node], ignoreflags=bumpedfix | |
998 | ): |
|
996 | ): | |
999 | prev = torev(pnode) # unfiltered! but so is phasecache |
|
997 | prev = torev(pnode) # unfiltered! but so is phasecache | |
1000 | if (prev is not None) and (phase(repo, prev) <= public): |
|
998 | if (prev is not None) and (phase(repo, prev) <= public): | |
1001 | # we have a public predecessor |
|
999 | # we have a public predecessor | |
1002 | bumped.add(rev) |
|
1000 | bumped.add(rev) | |
1003 | break # Next draft! |
|
1001 | break # Next draft! | |
1004 | return frozenset(bumped) |
|
1002 | return frozenset(bumped) | |
1005 |
|
1003 | |||
1006 |
|
1004 | |||
1007 | @cachefor(b'contentdivergent') |
|
1005 | @cachefor(b'contentdivergent') | |
1008 | def _computecontentdivergentset(repo): |
|
1006 | def _computecontentdivergentset(repo): | |
1009 | """the set of rev that compete to be the final successors of some revision.""" |
|
1007 | """the set of rev that compete to be the final successors of some revision.""" | |
1010 | divergent = set() |
|
1008 | divergent = set() | |
1011 | obsstore = repo.obsstore |
|
1009 | obsstore = repo.obsstore | |
1012 | newermap = {} |
|
1010 | newermap = {} | |
1013 | tonode = repo.changelog.node |
|
1011 | tonode = repo.changelog.node | |
1014 | for rev in repo.revs(b'(not public()) - obsolete()'): |
|
1012 | for rev in repo.revs(b'(not public()) - obsolete()'): | |
1015 | node = tonode(rev) |
|
1013 | node = tonode(rev) | |
1016 | mark = obsstore.predecessors.get(node, ()) |
|
1014 | mark = obsstore.predecessors.get(node, ()) | |
1017 | toprocess = set(mark) |
|
1015 | toprocess = set(mark) | |
1018 | seen = set() |
|
1016 | seen = set() | |
1019 | while toprocess: |
|
1017 | while toprocess: | |
1020 | prec = toprocess.pop()[0] |
|
1018 | prec = toprocess.pop()[0] | |
1021 | if prec in seen: |
|
1019 | if prec in seen: | |
1022 | continue # emergency cycle hanging prevention |
|
1020 | continue # emergency cycle hanging prevention | |
1023 | seen.add(prec) |
|
1021 | seen.add(prec) | |
1024 | if prec not in newermap: |
|
1022 | if prec not in newermap: | |
1025 | obsutil.successorssets(repo, prec, cache=newermap) |
|
1023 | obsutil.successorssets(repo, prec, cache=newermap) | |
1026 | newer = [n for n in newermap[prec] if n] |
|
1024 | newer = [n for n in newermap[prec] if n] | |
1027 | if len(newer) > 1: |
|
1025 | if len(newer) > 1: | |
1028 | divergent.add(rev) |
|
1026 | divergent.add(rev) | |
1029 | break |
|
1027 | break | |
1030 | toprocess.update(obsstore.predecessors.get(prec, ())) |
|
1028 | toprocess.update(obsstore.predecessors.get(prec, ())) | |
1031 | return frozenset(divergent) |
|
1029 | return frozenset(divergent) | |
1032 |
|
1030 | |||
1033 |
|
1031 | |||
1034 | def makefoldid(relation, user): |
|
1032 | def makefoldid(relation, user): | |
1035 |
|
1033 | |||
1036 | folddigest = hashutil.sha1(user) |
|
1034 | folddigest = hashutil.sha1(user) | |
1037 | for p in relation[0] + relation[1]: |
|
1035 | for p in relation[0] + relation[1]: | |
1038 | folddigest.update(b'%d' % p.rev()) |
|
1036 | folddigest.update(b'%d' % p.rev()) | |
1039 | folddigest.update(p.node()) |
|
1037 | folddigest.update(p.node()) | |
1040 | # Since fold only has to compete against fold for the same successors, it |
|
1038 | # Since fold only has to compete against fold for the same successors, it | |
1041 | # seems fine to use a small ID. Smaller ID save space. |
|
1039 | # seems fine to use a small ID. Smaller ID save space. | |
1042 | return hex(folddigest.digest())[:8] |
|
1040 | return hex(folddigest.digest())[:8] | |
1043 |
|
1041 | |||
1044 |
|
1042 | |||
1045 | def createmarkers( |
|
1043 | def createmarkers( | |
1046 | repo, relations, flag=0, date=None, metadata=None, operation=None |
|
1044 | repo, relations, flag=0, date=None, metadata=None, operation=None | |
1047 | ): |
|
1045 | ): | |
1048 | """Add obsolete markers between changesets in a repo |
|
1046 | """Add obsolete markers between changesets in a repo | |
1049 |
|
1047 | |||
1050 | <relations> must be an iterable of ((<old>,...), (<new>, ...)[,{metadata}]) |
|
1048 | <relations> must be an iterable of ((<old>,...), (<new>, ...)[,{metadata}]) | |
1051 | tuple. `old` and `news` are changectx. metadata is an optional dictionary |
|
1049 | tuple. `old` and `news` are changectx. metadata is an optional dictionary | |
1052 | containing metadata for this marker only. It is merged with the global |
|
1050 | containing metadata for this marker only. It is merged with the global | |
1053 | metadata specified through the `metadata` argument of this function. |
|
1051 | metadata specified through the `metadata` argument of this function. | |
1054 | Any string values in metadata must be UTF-8 bytes. |
|
1052 | Any string values in metadata must be UTF-8 bytes. | |
1055 |
|
1053 | |||
1056 | Trying to obsolete a public changeset will raise an exception. |
|
1054 | Trying to obsolete a public changeset will raise an exception. | |
1057 |
|
1055 | |||
1058 | Current user and date are used except if specified otherwise in the |
|
1056 | Current user and date are used except if specified otherwise in the | |
1059 | metadata attribute. |
|
1057 | metadata attribute. | |
1060 |
|
1058 | |||
1061 | This function operates within a transaction of its own, but does |
|
1059 | This function operates within a transaction of its own, but does | |
1062 | not take any lock on the repo. |
|
1060 | not take any lock on the repo. | |
1063 | """ |
|
1061 | """ | |
1064 | # prepare metadata |
|
1062 | # prepare metadata | |
1065 | if metadata is None: |
|
1063 | if metadata is None: | |
1066 | metadata = {} |
|
1064 | metadata = {} | |
1067 | if b'user' not in metadata: |
|
1065 | if b'user' not in metadata: | |
1068 | luser = ( |
|
1066 | luser = ( | |
1069 | repo.ui.config(b'devel', b'user.obsmarker') or repo.ui.username() |
|
1067 | repo.ui.config(b'devel', b'user.obsmarker') or repo.ui.username() | |
1070 | ) |
|
1068 | ) | |
1071 | metadata[b'user'] = encoding.fromlocal(luser) |
|
1069 | metadata[b'user'] = encoding.fromlocal(luser) | |
1072 |
|
1070 | |||
1073 | # Operation metadata handling |
|
1071 | # Operation metadata handling | |
1074 | useoperation = repo.ui.configbool( |
|
1072 | useoperation = repo.ui.configbool( | |
1075 | b'experimental', b'evolution.track-operation' |
|
1073 | b'experimental', b'evolution.track-operation' | |
1076 | ) |
|
1074 | ) | |
1077 | if useoperation and operation: |
|
1075 | if useoperation and operation: | |
1078 | metadata[b'operation'] = operation |
|
1076 | metadata[b'operation'] = operation | |
1079 |
|
1077 | |||
1080 | # Effect flag metadata handling |
|
1078 | # Effect flag metadata handling | |
1081 | saveeffectflag = repo.ui.configbool( |
|
1079 | saveeffectflag = repo.ui.configbool( | |
1082 | b'experimental', b'evolution.effect-flags' |
|
1080 | b'experimental', b'evolution.effect-flags' | |
1083 | ) |
|
1081 | ) | |
1084 |
|
1082 | |||
1085 | with repo.transaction(b'add-obsolescence-marker') as tr: |
|
1083 | with repo.transaction(b'add-obsolescence-marker') as tr: | |
1086 | markerargs = [] |
|
1084 | markerargs = [] | |
1087 | for rel in relations: |
|
1085 | for rel in relations: | |
1088 | predecessors = rel[0] |
|
1086 | predecessors = rel[0] | |
1089 | if not isinstance(predecessors, tuple): |
|
1087 | if not isinstance(predecessors, tuple): | |
1090 | # preserve compat with old API until all caller are migrated |
|
1088 | # preserve compat with old API until all caller are migrated | |
1091 | predecessors = (predecessors,) |
|
1089 | predecessors = (predecessors,) | |
1092 | if len(predecessors) > 1 and len(rel[1]) != 1: |
|
1090 | if len(predecessors) > 1 and len(rel[1]) != 1: | |
1093 | msg = b'Fold markers can only have 1 successors, not %d' |
|
1091 | msg = b'Fold markers can only have 1 successors, not %d' | |
1094 | raise error.ProgrammingError(msg % len(rel[1])) |
|
1092 | raise error.ProgrammingError(msg % len(rel[1])) | |
1095 | foldid = None |
|
1093 | foldid = None | |
1096 | foldsize = len(predecessors) |
|
1094 | foldsize = len(predecessors) | |
1097 | if 1 < foldsize: |
|
1095 | if 1 < foldsize: | |
1098 | foldid = makefoldid(rel, metadata[b'user']) |
|
1096 | foldid = makefoldid(rel, metadata[b'user']) | |
1099 | for foldidx, prec in enumerate(predecessors, 1): |
|
1097 | for foldidx, prec in enumerate(predecessors, 1): | |
1100 | sucs = rel[1] |
|
1098 | sucs = rel[1] | |
1101 | localmetadata = metadata.copy() |
|
1099 | localmetadata = metadata.copy() | |
1102 | if len(rel) > 2: |
|
1100 | if len(rel) > 2: | |
1103 | localmetadata.update(rel[2]) |
|
1101 | localmetadata.update(rel[2]) | |
1104 | if foldid is not None: |
|
1102 | if foldid is not None: | |
1105 | localmetadata[b'fold-id'] = foldid |
|
1103 | localmetadata[b'fold-id'] = foldid | |
1106 | localmetadata[b'fold-idx'] = b'%d' % foldidx |
|
1104 | localmetadata[b'fold-idx'] = b'%d' % foldidx | |
1107 | localmetadata[b'fold-size'] = b'%d' % foldsize |
|
1105 | localmetadata[b'fold-size'] = b'%d' % foldsize | |
1108 |
|
1106 | |||
1109 | if not prec.mutable(): |
|
1107 | if not prec.mutable(): | |
1110 | raise error.Abort( |
|
1108 | raise error.Abort( | |
1111 | _(b"cannot obsolete public changeset: %s") % prec, |
|
1109 | _(b"cannot obsolete public changeset: %s") % prec, | |
1112 | hint=b"see 'hg help phases' for details", |
|
1110 | hint=b"see 'hg help phases' for details", | |
1113 | ) |
|
1111 | ) | |
1114 | nprec = prec.node() |
|
1112 | nprec = prec.node() | |
1115 | nsucs = tuple(s.node() for s in sucs) |
|
1113 | nsucs = tuple(s.node() for s in sucs) | |
1116 | npare = None |
|
1114 | npare = None | |
1117 | if not nsucs: |
|
1115 | if not nsucs: | |
1118 | npare = tuple(p.node() for p in prec.parents()) |
|
1116 | npare = tuple(p.node() for p in prec.parents()) | |
1119 | if nprec in nsucs: |
|
1117 | if nprec in nsucs: | |
1120 | raise error.Abort( |
|
1118 | raise error.Abort( | |
1121 | _(b"changeset %s cannot obsolete itself") % prec |
|
1119 | _(b"changeset %s cannot obsolete itself") % prec | |
1122 | ) |
|
1120 | ) | |
1123 |
|
1121 | |||
1124 | # Effect flag can be different by relation |
|
1122 | # Effect flag can be different by relation | |
1125 | if saveeffectflag: |
|
1123 | if saveeffectflag: | |
1126 | # The effect flag is saved in a versioned field name for |
|
1124 | # The effect flag is saved in a versioned field name for | |
1127 | # future evolution |
|
1125 | # future evolution | |
1128 | effectflag = obsutil.geteffectflag(prec, sucs) |
|
1126 | effectflag = obsutil.geteffectflag(prec, sucs) | |
1129 | localmetadata[obsutil.EFFECTFLAGFIELD] = b"%d" % effectflag |
|
1127 | localmetadata[obsutil.EFFECTFLAGFIELD] = b"%d" % effectflag | |
1130 |
|
1128 | |||
1131 | # Creating the marker causes the hidden cache to become |
|
1129 | # Creating the marker causes the hidden cache to become | |
1132 | # invalid, which causes recomputation when we ask for |
|
1130 | # invalid, which causes recomputation when we ask for | |
1133 | # prec.parents() above. Resulting in n^2 behavior. So let's |
|
1131 | # prec.parents() above. Resulting in n^2 behavior. So let's | |
1134 | # prepare all of the args first, then create the markers. |
|
1132 | # prepare all of the args first, then create the markers. | |
1135 | markerargs.append((nprec, nsucs, npare, localmetadata)) |
|
1133 | markerargs.append((nprec, nsucs, npare, localmetadata)) | |
1136 |
|
1134 | |||
1137 | for args in markerargs: |
|
1135 | for args in markerargs: | |
1138 | nprec, nsucs, npare, localmetadata = args |
|
1136 | nprec, nsucs, npare, localmetadata = args | |
1139 | repo.obsstore.create( |
|
1137 | repo.obsstore.create( | |
1140 | tr, |
|
1138 | tr, | |
1141 | nprec, |
|
1139 | nprec, | |
1142 | nsucs, |
|
1140 | nsucs, | |
1143 | flag, |
|
1141 | flag, | |
1144 | parents=npare, |
|
1142 | parents=npare, | |
1145 | date=date, |
|
1143 | date=date, | |
1146 | metadata=localmetadata, |
|
1144 | metadata=localmetadata, | |
1147 | ui=repo.ui, |
|
1145 | ui=repo.ui, | |
1148 | ) |
|
1146 | ) | |
1149 | repo.filteredrevcache.clear() |
|
1147 | repo.filteredrevcache.clear() |
General Comments 0
You need to be logged in to leave comments.
Login now