##// END OF EJS Templates
branchcache: change the _delayed flag to an explicit `_dirty` flag...
marmoute -
r52381:94f82149 default
parent child Browse files
Show More
@@ -1,1049 +1,1048 b''
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import struct
9 import struct
10
10
11 from .node import (
11 from .node import (
12 bin,
12 bin,
13 hex,
13 hex,
14 nullrev,
14 nullrev,
15 )
15 )
16
16
17 from typing import (
17 from typing import (
18 Any,
18 Any,
19 Callable,
19 Callable,
20 Dict,
20 Dict,
21 Iterable,
21 Iterable,
22 List,
22 List,
23 Optional,
23 Optional,
24 Set,
24 Set,
25 TYPE_CHECKING,
25 TYPE_CHECKING,
26 Tuple,
26 Tuple,
27 Union,
27 Union,
28 )
28 )
29
29
30 from . import (
30 from . import (
31 encoding,
31 encoding,
32 error,
32 error,
33 obsolete,
33 obsolete,
34 scmutil,
34 scmutil,
35 util,
35 util,
36 )
36 )
37
37
38 from .utils import (
38 from .utils import (
39 repoviewutil,
39 repoviewutil,
40 stringutil,
40 stringutil,
41 )
41 )
42
42
43 if TYPE_CHECKING:
43 if TYPE_CHECKING:
44 from . import localrepo
44 from . import localrepo
45
45
46 assert [localrepo]
46 assert [localrepo]
47
47
48 subsettable = repoviewutil.subsettable
48 subsettable = repoviewutil.subsettable
49
49
50 calcsize = struct.calcsize
50 calcsize = struct.calcsize
51 pack_into = struct.pack_into
51 pack_into = struct.pack_into
52 unpack_from = struct.unpack_from
52 unpack_from = struct.unpack_from
53
53
54
54
55 class BranchMapCache:
55 class BranchMapCache:
56 """mapping of filtered views of repo with their branchcache"""
56 """mapping of filtered views of repo with their branchcache"""
57
57
58 def __init__(self):
58 def __init__(self):
59 self._per_filter = {}
59 self._per_filter = {}
60
60
61 def __getitem__(self, repo):
61 def __getitem__(self, repo):
62 self.updatecache(repo)
62 self.updatecache(repo)
63 bcache = self._per_filter[repo.filtername]
63 bcache = self._per_filter[repo.filtername]
64 assert bcache._filtername == repo.filtername, (
64 assert bcache._filtername == repo.filtername, (
65 bcache._filtername,
65 bcache._filtername,
66 repo.filtername,
66 repo.filtername,
67 )
67 )
68 return bcache
68 return bcache
69
69
70 def update_disk(self, repo):
70 def update_disk(self, repo):
71 """ensure and up-to-date cache is (or will be) written on disk
71 """ensure and up-to-date cache is (or will be) written on disk
72
72
73 The cache for this repository view is updated if needed and written on
73 The cache for this repository view is updated if needed and written on
74 disk.
74 disk.
75
75
76 If a transaction is in progress, the writing is schedule to transaction
76 If a transaction is in progress, the writing is schedule to transaction
77 close. See the `BranchMapCache.write_delayed` method.
77 close. See the `BranchMapCache.write_dirty` method.
78
78
79 This method exist independently of __getitem__ as it is sometime useful
79 This method exist independently of __getitem__ as it is sometime useful
80 to signal that we have no intend to use the data in memory yet.
80 to signal that we have no intend to use the data in memory yet.
81 """
81 """
82 self.updatecache(repo)
82 self.updatecache(repo)
83 bcache = self._per_filter[repo.filtername]
83 bcache = self._per_filter[repo.filtername]
84 assert bcache._filtername == repo.filtername, (
84 assert bcache._filtername == repo.filtername, (
85 bcache._filtername,
85 bcache._filtername,
86 repo.filtername,
86 repo.filtername,
87 )
87 )
88 bcache.write(repo)
88 bcache.write(repo)
89
89
90 def updatecache(self, repo):
90 def updatecache(self, repo):
91 """Update the cache for the given filtered view on a repository"""
91 """Update the cache for the given filtered view on a repository"""
92 # This can trigger updates for the caches for subsets of the filtered
92 # This can trigger updates for the caches for subsets of the filtered
93 # view, e.g. when there is no cache for this filtered view or the cache
93 # view, e.g. when there is no cache for this filtered view or the cache
94 # is stale.
94 # is stale.
95
95
96 cl = repo.changelog
96 cl = repo.changelog
97 filtername = repo.filtername
97 filtername = repo.filtername
98 bcache = self._per_filter.get(filtername)
98 bcache = self._per_filter.get(filtername)
99 if bcache is None or not bcache.validfor(repo):
99 if bcache is None or not bcache.validfor(repo):
100 # cache object missing or cache object stale? Read from disk
100 # cache object missing or cache object stale? Read from disk
101 bcache = branchcache.fromfile(repo)
101 bcache = branchcache.fromfile(repo)
102
102
103 revs = []
103 revs = []
104 if bcache is None:
104 if bcache is None:
105 # no (fresh) cache available anymore, perhaps we can re-use
105 # no (fresh) cache available anymore, perhaps we can re-use
106 # the cache for a subset, then extend that to add info on missing
106 # the cache for a subset, then extend that to add info on missing
107 # revisions.
107 # revisions.
108 subsetname = subsettable.get(filtername)
108 subsetname = subsettable.get(filtername)
109 if subsetname is not None:
109 if subsetname is not None:
110 subset = repo.filtered(subsetname)
110 subset = repo.filtered(subsetname)
111 bcache = self[subset].copy(repo)
111 bcache = self[subset].copy(repo)
112 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
112 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
113 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
113 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
114 else:
114 else:
115 # nothing to fall back on, start empty.
115 # nothing to fall back on, start empty.
116 bcache = branchcache(repo)
116 bcache = branchcache(repo)
117
117
118 revs.extend(cl.revs(start=bcache.tiprev + 1))
118 revs.extend(cl.revs(start=bcache.tiprev + 1))
119 if revs:
119 if revs:
120 bcache.update(repo, revs)
120 bcache.update(repo, revs)
121
121
122 assert bcache.validfor(repo), filtername
122 assert bcache.validfor(repo), filtername
123 self._per_filter[repo.filtername] = bcache
123 self._per_filter[repo.filtername] = bcache
124
124
125 def replace(self, repo, remotebranchmap):
125 def replace(self, repo, remotebranchmap):
126 """Replace the branchmap cache for a repo with a branch mapping.
126 """Replace the branchmap cache for a repo with a branch mapping.
127
127
128 This is likely only called during clone with a branch map from a
128 This is likely only called during clone with a branch map from a
129 remote.
129 remote.
130
130
131 """
131 """
132 cl = repo.changelog
132 cl = repo.changelog
133 clrev = cl.rev
133 clrev = cl.rev
134 clbranchinfo = cl.branchinfo
134 clbranchinfo = cl.branchinfo
135 rbheads = []
135 rbheads = []
136 closed = set()
136 closed = set()
137 for bheads in remotebranchmap.values():
137 for bheads in remotebranchmap.values():
138 rbheads += bheads
138 rbheads += bheads
139 for h in bheads:
139 for h in bheads:
140 r = clrev(h)
140 r = clrev(h)
141 b, c = clbranchinfo(r)
141 b, c = clbranchinfo(r)
142 if c:
142 if c:
143 closed.add(h)
143 closed.add(h)
144
144
145 if rbheads:
145 if rbheads:
146 rtiprev = max((int(clrev(node)) for node in rbheads))
146 rtiprev = max((int(clrev(node)) for node in rbheads))
147 cache = branchcache(
147 cache = branchcache(
148 repo,
148 repo,
149 remotebranchmap,
149 remotebranchmap,
150 repo[rtiprev].node(),
150 repo[rtiprev].node(),
151 rtiprev,
151 rtiprev,
152 closednodes=closed,
152 closednodes=closed,
153 )
153 )
154
154
155 # Try to stick it as low as possible
155 # Try to stick it as low as possible
156 # filter above served are unlikely to be fetch from a clone
156 # filter above served are unlikely to be fetch from a clone
157 for candidate in (b'base', b'immutable', b'served'):
157 for candidate in (b'base', b'immutable', b'served'):
158 rview = repo.filtered(candidate)
158 rview = repo.filtered(candidate)
159 if cache.validfor(rview):
159 if cache.validfor(rview):
160 cache = self._per_filter[candidate] = cache.copy(rview)
160 cache = self._per_filter[candidate] = cache.copy(rview)
161 cache.write(rview)
161 cache.write(rview)
162 return
162 return
163
163
164 def clear(self):
164 def clear(self):
165 self._per_filter.clear()
165 self._per_filter.clear()
166
166
167 def write_delayed(self, repo):
167 def write_dirty(self, repo):
168 unfi = repo.unfiltered()
168 unfi = repo.unfiltered()
169 for filtername in repoviewutil.get_ordered_subset():
169 for filtername in repoviewutil.get_ordered_subset():
170 cache = self._per_filter.get(filtername)
170 cache = self._per_filter.get(filtername)
171 if cache is None:
171 if cache is None:
172 continue
172 continue
173 if cache._delayed:
173 if cache._dirty:
174 if filtername is None:
174 if filtername is None:
175 repo = unfi
175 repo = unfi
176 else:
176 else:
177 repo = unfi.filtered(filtername)
177 repo = unfi.filtered(filtername)
178 cache.write(repo)
178 cache.write(repo)
179
179
180
180
181 def _unknownnode(node):
181 def _unknownnode(node):
182 """raises ValueError when branchcache found a node which does not exists"""
182 """raises ValueError when branchcache found a node which does not exists"""
183 raise ValueError('node %s does not exist' % node.hex())
183 raise ValueError('node %s does not exist' % node.hex())
184
184
185
185
186 def _branchcachedesc(repo):
186 def _branchcachedesc(repo):
187 if repo.filtername is not None:
187 if repo.filtername is not None:
188 return b'branch cache (%s)' % repo.filtername
188 return b'branch cache (%s)' % repo.filtername
189 else:
189 else:
190 return b'branch cache'
190 return b'branch cache'
191
191
192
192
193 class _BaseBranchCache:
193 class _BaseBranchCache:
194 """A dict like object that hold branches heads cache.
194 """A dict like object that hold branches heads cache.
195
195
196 This cache is used to avoid costly computations to determine all the
196 This cache is used to avoid costly computations to determine all the
197 branch heads of a repo.
197 branch heads of a repo.
198
198
199 The cache is serialized on disk in the following format:
199 The cache is serialized on disk in the following format:
200
200
201 <tip hex node> <tip rev number> [optional filtered repo hex hash]
201 <tip hex node> <tip rev number> [optional filtered repo hex hash]
202 <branch head hex node> <open/closed state> <branch name>
202 <branch head hex node> <open/closed state> <branch name>
203 <branch head hex node> <open/closed state> <branch name>
203 <branch head hex node> <open/closed state> <branch name>
204 ...
204 ...
205
205
206 The first line is used to check if the cache is still valid. If the
206 The first line is used to check if the cache is still valid. If the
207 branch cache is for a filtered repo view, an optional third hash is
207 branch cache is for a filtered repo view, an optional third hash is
208 included that hashes the hashes of all filtered and obsolete revisions.
208 included that hashes the hashes of all filtered and obsolete revisions.
209
209
210 The open/closed state is represented by a single letter 'o' or 'c'.
210 The open/closed state is represented by a single letter 'o' or 'c'.
211 This field can be used to avoid changelog reads when determining if a
211 This field can be used to avoid changelog reads when determining if a
212 branch head closes a branch or not.
212 branch head closes a branch or not.
213 """
213 """
214
214
215 def __init__(
215 def __init__(
216 self,
216 self,
217 repo: "localrepo.localrepository",
217 repo: "localrepo.localrepository",
218 entries: Union[
218 entries: Union[
219 Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]
219 Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]
220 ] = (),
220 ] = (),
221 closed_nodes: Optional[Set[bytes]] = None,
221 closed_nodes: Optional[Set[bytes]] = None,
222 ) -> None:
222 ) -> None:
223 """hasnode is a function which can be used to verify whether changelog
223 """hasnode is a function which can be used to verify whether changelog
224 has a given node or not. If it's not provided, we assume that every node
224 has a given node or not. If it's not provided, we assume that every node
225 we have exists in changelog"""
225 we have exists in changelog"""
226 # closednodes is a set of nodes that close their branch. If the branch
226 # closednodes is a set of nodes that close their branch. If the branch
227 # cache has been updated, it may contain nodes that are no longer
227 # cache has been updated, it may contain nodes that are no longer
228 # heads.
228 # heads.
229 if closed_nodes is None:
229 if closed_nodes is None:
230 closed_nodes = set()
230 closed_nodes = set()
231 self._closednodes = set(closed_nodes)
231 self._closednodes = set(closed_nodes)
232 self._entries = dict(entries)
232 self._entries = dict(entries)
233
233
234 def __iter__(self):
234 def __iter__(self):
235 return iter(self._entries)
235 return iter(self._entries)
236
236
237 def __setitem__(self, key, value):
237 def __setitem__(self, key, value):
238 self._entries[key] = value
238 self._entries[key] = value
239
239
240 def __getitem__(self, key):
240 def __getitem__(self, key):
241 return self._entries[key]
241 return self._entries[key]
242
242
243 def __contains__(self, key):
243 def __contains__(self, key):
244 return key in self._entries
244 return key in self._entries
245
245
246 def iteritems(self):
246 def iteritems(self):
247 return self._entries.items()
247 return self._entries.items()
248
248
249 items = iteritems
249 items = iteritems
250
250
251 def hasbranch(self, label):
251 def hasbranch(self, label):
252 """checks whether a branch of this name exists or not"""
252 """checks whether a branch of this name exists or not"""
253 return label in self._entries
253 return label in self._entries
254
254
255 def _branchtip(self, heads):
255 def _branchtip(self, heads):
256 """Return tuple with last open head in heads and false,
256 """Return tuple with last open head in heads and false,
257 otherwise return last closed head and true."""
257 otherwise return last closed head and true."""
258 tip = heads[-1]
258 tip = heads[-1]
259 closed = True
259 closed = True
260 for h in reversed(heads):
260 for h in reversed(heads):
261 if h not in self._closednodes:
261 if h not in self._closednodes:
262 tip = h
262 tip = h
263 closed = False
263 closed = False
264 break
264 break
265 return tip, closed
265 return tip, closed
266
266
267 def branchtip(self, branch):
267 def branchtip(self, branch):
268 """Return the tipmost open head on branch head, otherwise return the
268 """Return the tipmost open head on branch head, otherwise return the
269 tipmost closed head on branch.
269 tipmost closed head on branch.
270 Raise KeyError for unknown branch."""
270 Raise KeyError for unknown branch."""
271 return self._branchtip(self[branch])[0]
271 return self._branchtip(self[branch])[0]
272
272
273 def iteropen(self, nodes):
273 def iteropen(self, nodes):
274 return (n for n in nodes if n not in self._closednodes)
274 return (n for n in nodes if n not in self._closednodes)
275
275
276 def branchheads(self, branch, closed=False):
276 def branchheads(self, branch, closed=False):
277 heads = self._entries[branch]
277 heads = self._entries[branch]
278 if not closed:
278 if not closed:
279 heads = list(self.iteropen(heads))
279 heads = list(self.iteropen(heads))
280 return heads
280 return heads
281
281
282 def iterbranches(self):
282 def iterbranches(self):
283 for bn, heads in self.items():
283 for bn, heads in self.items():
284 yield (bn, heads) + self._branchtip(heads)
284 yield (bn, heads) + self._branchtip(heads)
285
285
286 def iterheads(self):
286 def iterheads(self):
287 """returns all the heads"""
287 """returns all the heads"""
288 return self._entries.values()
288 return self._entries.values()
289
289
290 def update(self, repo, revgen):
290 def update(self, repo, revgen):
291 """Given a branchhead cache, self, that may have extra nodes or be
291 """Given a branchhead cache, self, that may have extra nodes or be
292 missing heads, and a generator of nodes that are strictly a superset of
292 missing heads, and a generator of nodes that are strictly a superset of
293 heads missing, this function updates self to be correct.
293 heads missing, this function updates self to be correct.
294 """
294 """
295 starttime = util.timer()
295 starttime = util.timer()
296 cl = repo.changelog
296 cl = repo.changelog
297 # collect new branch entries
297 # collect new branch entries
298 newbranches = {}
298 newbranches = {}
299 getbranchinfo = repo.revbranchcache().branchinfo
299 getbranchinfo = repo.revbranchcache().branchinfo
300 max_rev = -1
300 max_rev = -1
301 for r in revgen:
301 for r in revgen:
302 branch, closesbranch = getbranchinfo(r)
302 branch, closesbranch = getbranchinfo(r)
303 newbranches.setdefault(branch, []).append(r)
303 newbranches.setdefault(branch, []).append(r)
304 if closesbranch:
304 if closesbranch:
305 self._closednodes.add(cl.node(r))
305 self._closednodes.add(cl.node(r))
306 max_rev = max(max_rev, r)
306 max_rev = max(max_rev, r)
307 if max_rev < 0:
307 if max_rev < 0:
308 msg = "running branchcache.update without revision to update"
308 msg = "running branchcache.update without revision to update"
309 raise error.ProgrammingError(msg)
309 raise error.ProgrammingError(msg)
310
310
311 # Delay fetching the topological heads until they are needed.
311 # Delay fetching the topological heads until they are needed.
312 # A repository without non-continous branches can skip this part.
312 # A repository without non-continous branches can skip this part.
313 topoheads = None
313 topoheads = None
314
314
315 # If a changeset is visible, its parents must be visible too, so
315 # If a changeset is visible, its parents must be visible too, so
316 # use the faster unfiltered parent accessor.
316 # use the faster unfiltered parent accessor.
317 parentrevs = repo.unfiltered().changelog.parentrevs
317 parentrevs = repo.unfiltered().changelog.parentrevs
318
318
319 # Faster than using ctx.obsolete()
319 # Faster than using ctx.obsolete()
320 obsrevs = obsolete.getrevs(repo, b'obsolete')
320 obsrevs = obsolete.getrevs(repo, b'obsolete')
321
321
322 for branch, newheadrevs in newbranches.items():
322 for branch, newheadrevs in newbranches.items():
323 # For every branch, compute the new branchheads.
323 # For every branch, compute the new branchheads.
324 # A branchhead is a revision such that no descendant is on
324 # A branchhead is a revision such that no descendant is on
325 # the same branch.
325 # the same branch.
326 #
326 #
327 # The branchheads are computed iteratively in revision order.
327 # The branchheads are computed iteratively in revision order.
328 # This ensures topological order, i.e. parents are processed
328 # This ensures topological order, i.e. parents are processed
329 # before their children. Ancestors are inclusive here, i.e.
329 # before their children. Ancestors are inclusive here, i.e.
330 # any revision is an ancestor of itself.
330 # any revision is an ancestor of itself.
331 #
331 #
332 # Core observations:
332 # Core observations:
333 # - The current revision is always a branchhead for the
333 # - The current revision is always a branchhead for the
334 # repository up to that point.
334 # repository up to that point.
335 # - It is the first revision of the branch if and only if
335 # - It is the first revision of the branch if and only if
336 # there was no branchhead before. In that case, it is the
336 # there was no branchhead before. In that case, it is the
337 # only branchhead as there are no possible ancestors on
337 # only branchhead as there are no possible ancestors on
338 # the same branch.
338 # the same branch.
339 # - If a parent is on the same branch, a branchhead can
339 # - If a parent is on the same branch, a branchhead can
340 # only be an ancestor of that parent, if it is parent
340 # only be an ancestor of that parent, if it is parent
341 # itself. Otherwise it would have been removed as ancestor
341 # itself. Otherwise it would have been removed as ancestor
342 # of that parent before.
342 # of that parent before.
343 # - Therefore, if all parents are on the same branch, they
343 # - Therefore, if all parents are on the same branch, they
344 # can just be removed from the branchhead set.
344 # can just be removed from the branchhead set.
345 # - If one parent is on the same branch and the other is not
345 # - If one parent is on the same branch and the other is not
346 # and there was exactly one branchhead known, the existing
346 # and there was exactly one branchhead known, the existing
347 # branchhead can only be an ancestor if it is the parent.
347 # branchhead can only be an ancestor if it is the parent.
348 # Otherwise it would have been removed as ancestor of
348 # Otherwise it would have been removed as ancestor of
349 # the parent before. The other parent therefore can't have
349 # the parent before. The other parent therefore can't have
350 # a branchhead as ancestor.
350 # a branchhead as ancestor.
351 # - In all other cases, the parents on different branches
351 # - In all other cases, the parents on different branches
352 # could have a branchhead as ancestor. Those parents are
352 # could have a branchhead as ancestor. Those parents are
353 # kept in the "uncertain" set. If all branchheads are also
353 # kept in the "uncertain" set. If all branchheads are also
354 # topological heads, they can't have descendants and further
354 # topological heads, they can't have descendants and further
355 # checks can be skipped. Otherwise, the ancestors of the
355 # checks can be skipped. Otherwise, the ancestors of the
356 # "uncertain" set are removed from branchheads.
356 # "uncertain" set are removed from branchheads.
357 # This computation is heavy and avoided if at all possible.
357 # This computation is heavy and avoided if at all possible.
358 bheads = self._entries.get(branch, [])
358 bheads = self._entries.get(branch, [])
359 bheadset = {cl.rev(node) for node in bheads}
359 bheadset = {cl.rev(node) for node in bheads}
360 uncertain = set()
360 uncertain = set()
361 for newrev in sorted(newheadrevs):
361 for newrev in sorted(newheadrevs):
362 if newrev in obsrevs:
362 if newrev in obsrevs:
363 # We ignore obsolete changesets as they shouldn't be
363 # We ignore obsolete changesets as they shouldn't be
364 # considered heads.
364 # considered heads.
365 continue
365 continue
366
366
367 if not bheadset:
367 if not bheadset:
368 bheadset.add(newrev)
368 bheadset.add(newrev)
369 continue
369 continue
370
370
371 parents = [p for p in parentrevs(newrev) if p != nullrev]
371 parents = [p for p in parentrevs(newrev) if p != nullrev]
372 samebranch = set()
372 samebranch = set()
373 otherbranch = set()
373 otherbranch = set()
374 obsparents = set()
374 obsparents = set()
375 for p in parents:
375 for p in parents:
376 if p in obsrevs:
376 if p in obsrevs:
377 # We ignored this obsolete changeset earlier, but now
377 # We ignored this obsolete changeset earlier, but now
378 # that it has non-ignored children, we need to make
378 # that it has non-ignored children, we need to make
379 # sure their ancestors are not considered heads. To
379 # sure their ancestors are not considered heads. To
380 # achieve that, we will simply treat this obsolete
380 # achieve that, we will simply treat this obsolete
381 # changeset as a parent from other branch.
381 # changeset as a parent from other branch.
382 obsparents.add(p)
382 obsparents.add(p)
383 elif p in bheadset or getbranchinfo(p)[0] == branch:
383 elif p in bheadset or getbranchinfo(p)[0] == branch:
384 samebranch.add(p)
384 samebranch.add(p)
385 else:
385 else:
386 otherbranch.add(p)
386 otherbranch.add(p)
387 if not (len(bheadset) == len(samebranch) == 1):
387 if not (len(bheadset) == len(samebranch) == 1):
388 uncertain.update(otherbranch)
388 uncertain.update(otherbranch)
389 uncertain.update(obsparents)
389 uncertain.update(obsparents)
390 bheadset.difference_update(samebranch)
390 bheadset.difference_update(samebranch)
391 bheadset.add(newrev)
391 bheadset.add(newrev)
392
392
393 if uncertain:
393 if uncertain:
394 if topoheads is None:
394 if topoheads is None:
395 topoheads = set(cl.headrevs())
395 topoheads = set(cl.headrevs())
396 if bheadset - topoheads:
396 if bheadset - topoheads:
397 floorrev = min(bheadset)
397 floorrev = min(bheadset)
398 if floorrev <= max(uncertain):
398 if floorrev <= max(uncertain):
399 ancestors = set(cl.ancestors(uncertain, floorrev))
399 ancestors = set(cl.ancestors(uncertain, floorrev))
400 bheadset -= ancestors
400 bheadset -= ancestors
401 if bheadset:
401 if bheadset:
402 self[branch] = [cl.node(rev) for rev in sorted(bheadset)]
402 self[branch] = [cl.node(rev) for rev in sorted(bheadset)]
403
403
404 duration = util.timer() - starttime
404 duration = util.timer() - starttime
405 repo.ui.log(
405 repo.ui.log(
406 b'branchcache',
406 b'branchcache',
407 b'updated %s in %.4f seconds\n',
407 b'updated %s in %.4f seconds\n',
408 _branchcachedesc(repo),
408 _branchcachedesc(repo),
409 duration,
409 duration,
410 )
410 )
411 return max_rev
411 return max_rev
412
412
413
413
414 class branchcache(_BaseBranchCache):
414 class branchcache(_BaseBranchCache):
415 """Branchmap info for a local repo or repoview"""
415 """Branchmap info for a local repo or repoview"""
416
416
417 _base_filename = b"branch2"
417 _base_filename = b"branch2"
418
418
419 def __init__(
419 def __init__(
420 self,
420 self,
421 repo: "localrepo.localrepository",
421 repo: "localrepo.localrepository",
422 entries: Union[
422 entries: Union[
423 Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]
423 Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]
424 ] = (),
424 ] = (),
425 tipnode: Optional[bytes] = None,
425 tipnode: Optional[bytes] = None,
426 tiprev: Optional[int] = nullrev,
426 tiprev: Optional[int] = nullrev,
427 filteredhash: Optional[bytes] = None,
427 filteredhash: Optional[bytes] = None,
428 closednodes: Optional[Set[bytes]] = None,
428 closednodes: Optional[Set[bytes]] = None,
429 hasnode: Optional[Callable[[bytes], bool]] = None,
429 hasnode: Optional[Callable[[bytes], bool]] = None,
430 verify_node: bool = False,
430 verify_node: bool = False,
431 ) -> None:
431 ) -> None:
432 """hasnode is a function which can be used to verify whether changelog
432 """hasnode is a function which can be used to verify whether changelog
433 has a given node or not. If it's not provided, we assume that every node
433 has a given node or not. If it's not provided, we assume that every node
434 we have exists in changelog"""
434 we have exists in changelog"""
435 self._filtername = repo.filtername
435 self._filtername = repo.filtername
436 self._delayed = False
437 if tipnode is None:
436 if tipnode is None:
438 self.tipnode = repo.nullid
437 self.tipnode = repo.nullid
439 else:
438 else:
440 self.tipnode = tipnode
439 self.tipnode = tipnode
441 self.tiprev = tiprev
440 self.tiprev = tiprev
442 self.filteredhash = filteredhash
441 self.filteredhash = filteredhash
442 self._dirty = False
443
443
444 super().__init__(repo=repo, entries=entries, closed_nodes=closednodes)
444 super().__init__(repo=repo, entries=entries, closed_nodes=closednodes)
445 # closednodes is a set of nodes that close their branch. If the branch
445 # closednodes is a set of nodes that close their branch. If the branch
446 # cache has been updated, it may contain nodes that are no longer
446 # cache has been updated, it may contain nodes that are no longer
447 # heads.
447 # heads.
448
448
449 # Do we need to verify branch at all ?
449 # Do we need to verify branch at all ?
450 self._verify_node = verify_node
450 self._verify_node = verify_node
451 # branches for which nodes are verified
451 # branches for which nodes are verified
452 self._verifiedbranches = set()
452 self._verifiedbranches = set()
453 self._hasnode = None
453 self._hasnode = None
454 if self._verify_node:
454 if self._verify_node:
455 self._hasnode = repo.changelog.hasnode
455 self._hasnode = repo.changelog.hasnode
456
456
457 def validfor(self, repo):
457 def validfor(self, repo):
458 """check that cache contents are valid for (a subset of) this repo
458 """check that cache contents are valid for (a subset of) this repo
459
459
460 - False when the order of changesets changed or if we detect a strip.
460 - False when the order of changesets changed or if we detect a strip.
461 - True when cache is up-to-date for the current repo or its subset."""
461 - True when cache is up-to-date for the current repo or its subset."""
462 try:
462 try:
463 node = repo.changelog.node(self.tiprev)
463 node = repo.changelog.node(self.tiprev)
464 except IndexError:
464 except IndexError:
465 # changesets were stripped and now we don't even have enough to
465 # changesets were stripped and now we don't even have enough to
466 # find tiprev
466 # find tiprev
467 return False
467 return False
468 if self.tipnode != node:
468 if self.tipnode != node:
469 # tiprev doesn't correspond to tipnode: repo was stripped, or this
469 # tiprev doesn't correspond to tipnode: repo was stripped, or this
470 # repo has a different order of changesets
470 # repo has a different order of changesets
471 return False
471 return False
472 tiphash = scmutil.filteredhash(repo, self.tiprev, needobsolete=True)
472 tiphash = scmutil.filteredhash(repo, self.tiprev, needobsolete=True)
473 # hashes don't match if this repo view has a different set of filtered
473 # hashes don't match if this repo view has a different set of filtered
474 # revisions (e.g. due to phase changes) or obsolete revisions (e.g.
474 # revisions (e.g. due to phase changes) or obsolete revisions (e.g.
475 # history was rewritten)
475 # history was rewritten)
476 return self.filteredhash == tiphash
476 return self.filteredhash == tiphash
477
477
478 @classmethod
478 @classmethod
479 def fromfile(cls, repo):
479 def fromfile(cls, repo):
480 f = None
480 f = None
481 try:
481 try:
482 f = repo.cachevfs(cls._filename(repo))
482 f = repo.cachevfs(cls._filename(repo))
483 lineiter = iter(f)
483 lineiter = iter(f)
484 init_kwargs = cls._load_header(repo, lineiter)
484 init_kwargs = cls._load_header(repo, lineiter)
485 bcache = cls(
485 bcache = cls(
486 repo,
486 repo,
487 verify_node=True,
487 verify_node=True,
488 **init_kwargs,
488 **init_kwargs,
489 )
489 )
490 if not bcache.validfor(repo):
490 if not bcache.validfor(repo):
491 # invalidate the cache
491 # invalidate the cache
492 raise ValueError('tip differs')
492 raise ValueError('tip differs')
493 bcache._load_heads(repo, lineiter)
493 bcache._load_heads(repo, lineiter)
494 except (IOError, OSError):
494 except (IOError, OSError):
495 return None
495 return None
496
496
497 except Exception as inst:
497 except Exception as inst:
498 if repo.ui.debugflag:
498 if repo.ui.debugflag:
499 msg = b'invalid %s: %s\n'
499 msg = b'invalid %s: %s\n'
500 msg %= (
500 msg %= (
501 _branchcachedesc(repo),
501 _branchcachedesc(repo),
502 stringutil.forcebytestr(inst),
502 stringutil.forcebytestr(inst),
503 )
503 )
504 repo.ui.debug(msg)
504 repo.ui.debug(msg)
505 bcache = None
505 bcache = None
506
506
507 finally:
507 finally:
508 if f:
508 if f:
509 f.close()
509 f.close()
510
510
511 return bcache
511 return bcache
512
512
513 @classmethod
513 @classmethod
514 def _load_header(cls, repo, lineiter) -> "dict[str, Any]":
514 def _load_header(cls, repo, lineiter) -> "dict[str, Any]":
515 """parse the head of a branchmap file
515 """parse the head of a branchmap file
516
516
517 return parameters to pass to a newly created class instance.
517 return parameters to pass to a newly created class instance.
518 """
518 """
519 cachekey = next(lineiter).rstrip(b'\n').split(b" ", 2)
519 cachekey = next(lineiter).rstrip(b'\n').split(b" ", 2)
520 last, lrev = cachekey[:2]
520 last, lrev = cachekey[:2]
521 last, lrev = bin(last), int(lrev)
521 last, lrev = bin(last), int(lrev)
522 filteredhash = None
522 filteredhash = None
523 if len(cachekey) > 2:
523 if len(cachekey) > 2:
524 filteredhash = bin(cachekey[2])
524 filteredhash = bin(cachekey[2])
525 return {
525 return {
526 "tipnode": last,
526 "tipnode": last,
527 "tiprev": lrev,
527 "tiprev": lrev,
528 "filteredhash": filteredhash,
528 "filteredhash": filteredhash,
529 }
529 }
530
530
531 def _load_heads(self, repo, lineiter):
531 def _load_heads(self, repo, lineiter):
532 """fully loads the branchcache by reading from the file using the line
532 """fully loads the branchcache by reading from the file using the line
533 iterator passed"""
533 iterator passed"""
534 for line in lineiter:
534 for line in lineiter:
535 line = line.rstrip(b'\n')
535 line = line.rstrip(b'\n')
536 if not line:
536 if not line:
537 continue
537 continue
538 node, state, label = line.split(b" ", 2)
538 node, state, label = line.split(b" ", 2)
539 if state not in b'oc':
539 if state not in b'oc':
540 raise ValueError('invalid branch state')
540 raise ValueError('invalid branch state')
541 label = encoding.tolocal(label.strip())
541 label = encoding.tolocal(label.strip())
542 node = bin(node)
542 node = bin(node)
543 self._entries.setdefault(label, []).append(node)
543 self._entries.setdefault(label, []).append(node)
544 if state == b'c':
544 if state == b'c':
545 self._closednodes.add(node)
545 self._closednodes.add(node)
546
546
547 @classmethod
547 @classmethod
548 def _filename(cls, repo):
548 def _filename(cls, repo):
549 """name of a branchcache file for a given repo or repoview"""
549 """name of a branchcache file for a given repo or repoview"""
550 filename = cls._base_filename
550 filename = cls._base_filename
551 if repo.filtername:
551 if repo.filtername:
552 filename = b'%s-%s' % (filename, repo.filtername)
552 filename = b'%s-%s' % (filename, repo.filtername)
553 return filename
553 return filename
554
554
555 def copy(self, repo):
555 def copy(self, repo):
556 """return a deep copy of the branchcache object"""
556 """return a deep copy of the branchcache object"""
557 other = type(self)(
557 other = type(self)(
558 repo=repo,
558 repo=repo,
559 # we always do a shally copy of self._entries, and the values is
559 # we always do a shally copy of self._entries, and the values is
560 # always replaced, so no need to deepcopy until the above remains
560 # always replaced, so no need to deepcopy until the above remains
561 # true.
561 # true.
562 entries=self._entries,
562 entries=self._entries,
563 tipnode=self.tipnode,
563 tipnode=self.tipnode,
564 tiprev=self.tiprev,
564 tiprev=self.tiprev,
565 filteredhash=self.filteredhash,
565 filteredhash=self.filteredhash,
566 closednodes=set(self._closednodes),
566 closednodes=set(self._closednodes),
567 verify_node=self._verify_node,
567 verify_node=self._verify_node,
568 )
568 )
569 # we copy will likely schedule a write anyway, but that does not seems
569 # we copy will likely schedule a write anyway, but that does not seems
570 # to hurt to overschedule
570 # to hurt to overschedule
571 other._delayed = self._delayed
571 other._dirty = self._dirty
572 # also copy information about the current verification state
572 # also copy information about the current verification state
573 other._verifiedbranches = set(self._verifiedbranches)
573 other._verifiedbranches = set(self._verifiedbranches)
574 return other
574 return other
575
575
576 def write(self, repo):
576 def write(self, repo):
577 assert self._filtername == repo.filtername, (
577 assert self._filtername == repo.filtername, (
578 self._filtername,
578 self._filtername,
579 repo.filtername,
579 repo.filtername,
580 )
580 )
581 tr = repo.currenttransaction()
581 tr = repo.currenttransaction()
582 if not getattr(tr, 'finalized', True):
582 if not getattr(tr, 'finalized', True):
583 # Avoid premature writing.
583 # Avoid premature writing.
584 #
584 #
585 # (The cache warming setup by localrepo will update the file later.)
585 # (The cache warming setup by localrepo will update the file later.)
586 self._delayed = True
587 return
586 return
588 try:
587 try:
589 filename = self._filename(repo)
588 filename = self._filename(repo)
590 with repo.cachevfs(filename, b"w", atomictemp=True) as f:
589 with repo.cachevfs(filename, b"w", atomictemp=True) as f:
591 self._write_header(f)
590 self._write_header(f)
592 nodecount = self._write_heads(f)
591 nodecount = self._write_heads(f)
593 repo.ui.log(
592 repo.ui.log(
594 b'branchcache',
593 b'branchcache',
595 b'wrote %s with %d labels and %d nodes\n',
594 b'wrote %s with %d labels and %d nodes\n',
596 _branchcachedesc(repo),
595 _branchcachedesc(repo),
597 len(self._entries),
596 len(self._entries),
598 nodecount,
597 nodecount,
599 )
598 )
600 self._delayed = False
599 self._dirty = False
601 except (IOError, OSError, error.Abort) as inst:
600 except (IOError, OSError, error.Abort) as inst:
602 # Abort may be raised by read only opener, so log and continue
601 # Abort may be raised by read only opener, so log and continue
603 repo.ui.debug(
602 repo.ui.debug(
604 b"couldn't write branch cache: %s\n"
603 b"couldn't write branch cache: %s\n"
605 % stringutil.forcebytestr(inst)
604 % stringutil.forcebytestr(inst)
606 )
605 )
607
606
608 def _write_header(self, fp) -> None:
607 def _write_header(self, fp) -> None:
609 """write the branch cache header to a file"""
608 """write the branch cache header to a file"""
610 cachekey = [hex(self.tipnode), b'%d' % self.tiprev]
609 cachekey = [hex(self.tipnode), b'%d' % self.tiprev]
611 if self.filteredhash is not None:
610 if self.filteredhash is not None:
612 cachekey.append(hex(self.filteredhash))
611 cachekey.append(hex(self.filteredhash))
613 fp.write(b" ".join(cachekey) + b'\n')
612 fp.write(b" ".join(cachekey) + b'\n')
614
613
615 def _write_heads(self, fp) -> int:
614 def _write_heads(self, fp) -> int:
616 """write list of heads to a file
615 """write list of heads to a file
617
616
618 Return the number of heads written."""
617 Return the number of heads written."""
619 nodecount = 0
618 nodecount = 0
620 for label, nodes in sorted(self._entries.items()):
619 for label, nodes in sorted(self._entries.items()):
621 label = encoding.fromlocal(label)
620 label = encoding.fromlocal(label)
622 for node in nodes:
621 for node in nodes:
623 nodecount += 1
622 nodecount += 1
624 if node in self._closednodes:
623 if node in self._closednodes:
625 state = b'c'
624 state = b'c'
626 else:
625 else:
627 state = b'o'
626 state = b'o'
628 fp.write(b"%s %s %s\n" % (hex(node), state, label))
627 fp.write(b"%s %s %s\n" % (hex(node), state, label))
629 return nodecount
628 return nodecount
630
629
631 def _verifybranch(self, branch):
630 def _verifybranch(self, branch):
632 """verify head nodes for the given branch."""
631 """verify head nodes for the given branch."""
633 if not self._verify_node:
632 if not self._verify_node:
634 return
633 return
635 if branch not in self._entries or branch in self._verifiedbranches:
634 if branch not in self._entries or branch in self._verifiedbranches:
636 return
635 return
637 assert self._hasnode is not None
636 assert self._hasnode is not None
638 for n in self._entries[branch]:
637 for n in self._entries[branch]:
639 if not self._hasnode(n):
638 if not self._hasnode(n):
640 _unknownnode(n)
639 _unknownnode(n)
641
640
642 self._verifiedbranches.add(branch)
641 self._verifiedbranches.add(branch)
643
642
644 def _verifyall(self):
643 def _verifyall(self):
645 """verifies nodes of all the branches"""
644 """verifies nodes of all the branches"""
646 for b in self._entries.keys():
645 for b in self._entries.keys():
647 if b not in self._verifiedbranches:
646 if b not in self._verifiedbranches:
648 self._verifybranch(b)
647 self._verifybranch(b)
649
648
650 def __getitem__(self, key):
649 def __getitem__(self, key):
651 self._verifybranch(key)
650 self._verifybranch(key)
652 return super().__getitem__(key)
651 return super().__getitem__(key)
653
652
654 def __contains__(self, key):
653 def __contains__(self, key):
655 self._verifybranch(key)
654 self._verifybranch(key)
656 return super().__contains__(key)
655 return super().__contains__(key)
657
656
658 def iteritems(self):
657 def iteritems(self):
659 self._verifyall()
658 self._verifyall()
660 return super().iteritems()
659 return super().iteritems()
661
660
662 items = iteritems
661 items = iteritems
663
662
664 def iterheads(self):
663 def iterheads(self):
665 """returns all the heads"""
664 """returns all the heads"""
666 self._verifyall()
665 self._verifyall()
667 return super().iterheads()
666 return super().iterheads()
668
667
669 def hasbranch(self, label):
668 def hasbranch(self, label):
670 """checks whether a branch of this name exists or not"""
669 """checks whether a branch of this name exists or not"""
671 self._verifybranch(label)
670 self._verifybranch(label)
672 return super().hasbranch(label)
671 return super().hasbranch(label)
673
672
674 def branchheads(self, branch, closed=False):
673 def branchheads(self, branch, closed=False):
675 self._verifybranch(branch)
674 self._verifybranch(branch)
676 return super().branchheads(branch, closed=closed)
675 return super().branchheads(branch, closed=closed)
677
676
678 def update(self, repo, revgen):
677 def update(self, repo, revgen):
679 assert self._filtername == repo.filtername, (
678 assert self._filtername == repo.filtername, (
680 self._filtername,
679 self._filtername,
681 repo.filtername,
680 repo.filtername,
682 )
681 )
683 cl = repo.changelog
682 cl = repo.changelog
684 max_rev = super().update(repo, revgen)
683 max_rev = super().update(repo, revgen)
685 # new tip revision which we found after iterating items from new
684 # new tip revision which we found after iterating items from new
686 # branches
685 # branches
687 if max_rev is not None and max_rev > self.tiprev:
686 if max_rev is not None and max_rev > self.tiprev:
688 self.tiprev = max_rev
687 self.tiprev = max_rev
689 self.tipnode = cl.node(max_rev)
688 self.tipnode = cl.node(max_rev)
690
689
691 if not self.validfor(repo):
690 if not self.validfor(repo):
692 # old cache key is now invalid for the repo, but we've just updated
691 # old cache key is now invalid for the repo, but we've just updated
693 # the cache and we assume it's valid, so let's make the cache key
692 # the cache and we assume it's valid, so let's make the cache key
694 # valid as well by recomputing it from the cached data
693 # valid as well by recomputing it from the cached data
695 self.tipnode = repo.nullid
694 self.tipnode = repo.nullid
696 self.tiprev = nullrev
695 self.tiprev = nullrev
697 for heads in self.iterheads():
696 for heads in self.iterheads():
698 if not heads:
697 if not heads:
699 # all revisions on a branch are obsolete
698 # all revisions on a branch are obsolete
700 continue
699 continue
701 # note: tiprev is not necessarily the tip revision of repo,
700 # note: tiprev is not necessarily the tip revision of repo,
702 # because the tip could be obsolete (i.e. not a head)
701 # because the tip could be obsolete (i.e. not a head)
703 tiprev = max(cl.rev(node) for node in heads)
702 tiprev = max(cl.rev(node) for node in heads)
704 if tiprev > self.tiprev:
703 if tiprev > self.tiprev:
705 self.tipnode = cl.node(tiprev)
704 self.tipnode = cl.node(tiprev)
706 self.tiprev = tiprev
705 self.tiprev = tiprev
707 self.filteredhash = scmutil.filteredhash(
706 self.filteredhash = scmutil.filteredhash(
708 repo, self.tiprev, needobsolete=True
707 repo, self.tiprev, needobsolete=True
709 )
708 )
710
709 self._dirty = True
711 self.write(repo)
710 self.write(repo)
712
711
713
712
714 class remotebranchcache(_BaseBranchCache):
713 class remotebranchcache(_BaseBranchCache):
715 """Branchmap info for a remote connection, should not write locally"""
714 """Branchmap info for a remote connection, should not write locally"""
716
715
717 def __init__(
716 def __init__(
718 self,
717 self,
719 repo: "localrepo.localrepository",
718 repo: "localrepo.localrepository",
720 entries: Union[
719 entries: Union[
721 Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]
720 Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]
722 ] = (),
721 ] = (),
723 closednodes: Optional[Set[bytes]] = None,
722 closednodes: Optional[Set[bytes]] = None,
724 ) -> None:
723 ) -> None:
725 super().__init__(repo=repo, entries=entries, closed_nodes=closednodes)
724 super().__init__(repo=repo, entries=entries, closed_nodes=closednodes)
726
725
727
726
728 # Revision branch info cache
727 # Revision branch info cache
729
728
730 _rbcversion = b'-v1'
729 _rbcversion = b'-v1'
731 _rbcnames = b'rbc-names' + _rbcversion
730 _rbcnames = b'rbc-names' + _rbcversion
732 _rbcrevs = b'rbc-revs' + _rbcversion
731 _rbcrevs = b'rbc-revs' + _rbcversion
733 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
732 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
734 _rbcrecfmt = b'>4sI'
733 _rbcrecfmt = b'>4sI'
735 _rbcrecsize = calcsize(_rbcrecfmt)
734 _rbcrecsize = calcsize(_rbcrecfmt)
736 _rbcmininc = 64 * _rbcrecsize
735 _rbcmininc = 64 * _rbcrecsize
737 _rbcnodelen = 4
736 _rbcnodelen = 4
738 _rbcbranchidxmask = 0x7FFFFFFF
737 _rbcbranchidxmask = 0x7FFFFFFF
739 _rbccloseflag = 0x80000000
738 _rbccloseflag = 0x80000000
740
739
741
740
742 class rbcrevs:
741 class rbcrevs:
743 """a byte string consisting of an immutable prefix followed by a mutable suffix"""
742 """a byte string consisting of an immutable prefix followed by a mutable suffix"""
744
743
745 def __init__(self, revs):
744 def __init__(self, revs):
746 self._prefix = revs
745 self._prefix = revs
747 self._rest = bytearray()
746 self._rest = bytearray()
748
747
749 def __len__(self):
748 def __len__(self):
750 return len(self._prefix) + len(self._rest)
749 return len(self._prefix) + len(self._rest)
751
750
752 def unpack_record(self, rbcrevidx):
751 def unpack_record(self, rbcrevidx):
753 if rbcrevidx < len(self._prefix):
752 if rbcrevidx < len(self._prefix):
754 return unpack_from(_rbcrecfmt, util.buffer(self._prefix), rbcrevidx)
753 return unpack_from(_rbcrecfmt, util.buffer(self._prefix), rbcrevidx)
755 else:
754 else:
756 return unpack_from(
755 return unpack_from(
757 _rbcrecfmt,
756 _rbcrecfmt,
758 util.buffer(self._rest),
757 util.buffer(self._rest),
759 rbcrevidx - len(self._prefix),
758 rbcrevidx - len(self._prefix),
760 )
759 )
761
760
762 def make_mutable(self):
761 def make_mutable(self):
763 if len(self._prefix) > 0:
762 if len(self._prefix) > 0:
764 entirety = bytearray()
763 entirety = bytearray()
765 entirety[:] = self._prefix
764 entirety[:] = self._prefix
766 entirety.extend(self._rest)
765 entirety.extend(self._rest)
767 self._rest = entirety
766 self._rest = entirety
768 self._prefix = bytearray()
767 self._prefix = bytearray()
769
768
770 def truncate(self, pos):
769 def truncate(self, pos):
771 self.make_mutable()
770 self.make_mutable()
772 del self._rest[pos:]
771 del self._rest[pos:]
773
772
774 def pack_into(self, rbcrevidx, node, branchidx):
773 def pack_into(self, rbcrevidx, node, branchidx):
775 if rbcrevidx < len(self._prefix):
774 if rbcrevidx < len(self._prefix):
776 self.make_mutable()
775 self.make_mutable()
777 buf = self._rest
776 buf = self._rest
778 start_offset = rbcrevidx - len(self._prefix)
777 start_offset = rbcrevidx - len(self._prefix)
779 end_offset = start_offset + _rbcrecsize
778 end_offset = start_offset + _rbcrecsize
780
779
781 if len(self._rest) < end_offset:
780 if len(self._rest) < end_offset:
782 # bytearray doesn't allocate extra space at least in Python 3.7.
781 # bytearray doesn't allocate extra space at least in Python 3.7.
783 # When multiple changesets are added in a row, precise resize would
782 # When multiple changesets are added in a row, precise resize would
784 # result in quadratic complexity. Overallocate to compensate by
783 # result in quadratic complexity. Overallocate to compensate by
785 # using the classic doubling technique for dynamic arrays instead.
784 # using the classic doubling technique for dynamic arrays instead.
786 # If there was a gap in the map before, less space will be reserved.
785 # If there was a gap in the map before, less space will be reserved.
787 self._rest.extend(b'\0' * end_offset)
786 self._rest.extend(b'\0' * end_offset)
788 return pack_into(
787 return pack_into(
789 _rbcrecfmt,
788 _rbcrecfmt,
790 buf,
789 buf,
791 start_offset,
790 start_offset,
792 node,
791 node,
793 branchidx,
792 branchidx,
794 )
793 )
795
794
796 def extend(self, extension):
795 def extend(self, extension):
797 return self._rest.extend(extension)
796 return self._rest.extend(extension)
798
797
799 def slice(self, begin, end):
798 def slice(self, begin, end):
800 if begin < len(self._prefix):
799 if begin < len(self._prefix):
801 acc = bytearray()
800 acc = bytearray()
802 acc[:] = self._prefix[begin:end]
801 acc[:] = self._prefix[begin:end]
803 acc.extend(
802 acc.extend(
804 self._rest[begin - len(self._prefix) : end - len(self._prefix)]
803 self._rest[begin - len(self._prefix) : end - len(self._prefix)]
805 )
804 )
806 return acc
805 return acc
807 return self._rest[begin - len(self._prefix) : end - len(self._prefix)]
806 return self._rest[begin - len(self._prefix) : end - len(self._prefix)]
808
807
809
808
810 class revbranchcache:
809 class revbranchcache:
811 """Persistent cache, mapping from revision number to branch name and close.
810 """Persistent cache, mapping from revision number to branch name and close.
812 This is a low level cache, independent of filtering.
811 This is a low level cache, independent of filtering.
813
812
814 Branch names are stored in rbc-names in internal encoding separated by 0.
813 Branch names are stored in rbc-names in internal encoding separated by 0.
815 rbc-names is append-only, and each branch name is only stored once and will
814 rbc-names is append-only, and each branch name is only stored once and will
816 thus have a unique index.
815 thus have a unique index.
817
816
818 The branch info for each revision is stored in rbc-revs as constant size
817 The branch info for each revision is stored in rbc-revs as constant size
819 records. The whole file is read into memory, but it is only 'parsed' on
818 records. The whole file is read into memory, but it is only 'parsed' on
820 demand. The file is usually append-only but will be truncated if repo
819 demand. The file is usually append-only but will be truncated if repo
821 modification is detected.
820 modification is detected.
822 The record for each revision contains the first 4 bytes of the
821 The record for each revision contains the first 4 bytes of the
823 corresponding node hash, and the record is only used if it still matches.
822 corresponding node hash, and the record is only used if it still matches.
824 Even a completely trashed rbc-revs fill thus still give the right result
823 Even a completely trashed rbc-revs fill thus still give the right result
825 while converging towards full recovery ... assuming no incorrectly matching
824 while converging towards full recovery ... assuming no incorrectly matching
826 node hashes.
825 node hashes.
827 The record also contains 4 bytes where 31 bits contains the index of the
826 The record also contains 4 bytes where 31 bits contains the index of the
828 branch and the last bit indicate that it is a branch close commit.
827 branch and the last bit indicate that it is a branch close commit.
829 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
828 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
830 and will grow with it but be 1/8th of its size.
829 and will grow with it but be 1/8th of its size.
831 """
830 """
832
831
833 def __init__(self, repo, readonly=True):
832 def __init__(self, repo, readonly=True):
834 assert repo.filtername is None
833 assert repo.filtername is None
835 self._repo = repo
834 self._repo = repo
836 self._names = [] # branch names in local encoding with static index
835 self._names = [] # branch names in local encoding with static index
837 self._rbcrevs = rbcrevs(bytearray())
836 self._rbcrevs = rbcrevs(bytearray())
838 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
837 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
839 try:
838 try:
840 bndata = repo.cachevfs.read(_rbcnames)
839 bndata = repo.cachevfs.read(_rbcnames)
841 self._rbcsnameslen = len(bndata) # for verification before writing
840 self._rbcsnameslen = len(bndata) # for verification before writing
842 if bndata:
841 if bndata:
843 self._names = [
842 self._names = [
844 encoding.tolocal(bn) for bn in bndata.split(b'\0')
843 encoding.tolocal(bn) for bn in bndata.split(b'\0')
845 ]
844 ]
846 except (IOError, OSError):
845 except (IOError, OSError):
847 if readonly:
846 if readonly:
848 # don't try to use cache - fall back to the slow path
847 # don't try to use cache - fall back to the slow path
849 self.branchinfo = self._branchinfo
848 self.branchinfo = self._branchinfo
850
849
851 if self._names:
850 if self._names:
852 try:
851 try:
853 if repo.ui.configbool(b'format', b'mmap-revbranchcache'):
852 if repo.ui.configbool(b'format', b'mmap-revbranchcache'):
854 with repo.cachevfs(_rbcrevs) as fp:
853 with repo.cachevfs(_rbcrevs) as fp:
855 data = util.buffer(util.mmapread(fp))
854 data = util.buffer(util.mmapread(fp))
856 else:
855 else:
857 data = repo.cachevfs.read(_rbcrevs)
856 data = repo.cachevfs.read(_rbcrevs)
858 self._rbcrevs = rbcrevs(data)
857 self._rbcrevs = rbcrevs(data)
859 except (IOError, OSError) as inst:
858 except (IOError, OSError) as inst:
860 repo.ui.debug(
859 repo.ui.debug(
861 b"couldn't read revision branch cache: %s\n"
860 b"couldn't read revision branch cache: %s\n"
862 % stringutil.forcebytestr(inst)
861 % stringutil.forcebytestr(inst)
863 )
862 )
864 # remember number of good records on disk
863 # remember number of good records on disk
865 self._rbcrevslen = min(
864 self._rbcrevslen = min(
866 len(self._rbcrevs) // _rbcrecsize, len(repo.changelog)
865 len(self._rbcrevs) // _rbcrecsize, len(repo.changelog)
867 )
866 )
868 if self._rbcrevslen == 0:
867 if self._rbcrevslen == 0:
869 self._names = []
868 self._names = []
870 self._rbcnamescount = len(self._names) # number of names read at
869 self._rbcnamescount = len(self._names) # number of names read at
871 # _rbcsnameslen
870 # _rbcsnameslen
872
871
873 def _clear(self):
872 def _clear(self):
874 self._rbcsnameslen = 0
873 self._rbcsnameslen = 0
875 del self._names[:]
874 del self._names[:]
876 self._rbcnamescount = 0
875 self._rbcnamescount = 0
877 self._rbcrevslen = len(self._repo.changelog)
876 self._rbcrevslen = len(self._repo.changelog)
878 self._rbcrevs = rbcrevs(bytearray(self._rbcrevslen * _rbcrecsize))
877 self._rbcrevs = rbcrevs(bytearray(self._rbcrevslen * _rbcrecsize))
879 util.clearcachedproperty(self, b'_namesreverse')
878 util.clearcachedproperty(self, b'_namesreverse')
880
879
881 @util.propertycache
880 @util.propertycache
882 def _namesreverse(self):
881 def _namesreverse(self):
883 return {b: r for r, b in enumerate(self._names)}
882 return {b: r for r, b in enumerate(self._names)}
884
883
885 def branchinfo(self, rev):
884 def branchinfo(self, rev):
886 """Return branch name and close flag for rev, using and updating
885 """Return branch name and close flag for rev, using and updating
887 persistent cache."""
886 persistent cache."""
888 changelog = self._repo.changelog
887 changelog = self._repo.changelog
889 rbcrevidx = rev * _rbcrecsize
888 rbcrevidx = rev * _rbcrecsize
890
889
891 # avoid negative index, changelog.read(nullrev) is fast without cache
890 # avoid negative index, changelog.read(nullrev) is fast without cache
892 if rev == nullrev:
891 if rev == nullrev:
893 return changelog.branchinfo(rev)
892 return changelog.branchinfo(rev)
894
893
895 # if requested rev isn't allocated, grow and cache the rev info
894 # if requested rev isn't allocated, grow and cache the rev info
896 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
895 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
897 return self._branchinfo(rev)
896 return self._branchinfo(rev)
898
897
899 # fast path: extract data from cache, use it if node is matching
898 # fast path: extract data from cache, use it if node is matching
900 reponode = changelog.node(rev)[:_rbcnodelen]
899 reponode = changelog.node(rev)[:_rbcnodelen]
901 cachenode, branchidx = self._rbcrevs.unpack_record(rbcrevidx)
900 cachenode, branchidx = self._rbcrevs.unpack_record(rbcrevidx)
902 close = bool(branchidx & _rbccloseflag)
901 close = bool(branchidx & _rbccloseflag)
903 if close:
902 if close:
904 branchidx &= _rbcbranchidxmask
903 branchidx &= _rbcbranchidxmask
905 if cachenode == b'\0\0\0\0':
904 if cachenode == b'\0\0\0\0':
906 pass
905 pass
907 elif cachenode == reponode:
906 elif cachenode == reponode:
908 try:
907 try:
909 return self._names[branchidx], close
908 return self._names[branchidx], close
910 except IndexError:
909 except IndexError:
911 # recover from invalid reference to unknown branch
910 # recover from invalid reference to unknown branch
912 self._repo.ui.debug(
911 self._repo.ui.debug(
913 b"referenced branch names not found"
912 b"referenced branch names not found"
914 b" - rebuilding revision branch cache from scratch\n"
913 b" - rebuilding revision branch cache from scratch\n"
915 )
914 )
916 self._clear()
915 self._clear()
917 else:
916 else:
918 # rev/node map has changed, invalidate the cache from here up
917 # rev/node map has changed, invalidate the cache from here up
919 self._repo.ui.debug(
918 self._repo.ui.debug(
920 b"history modification detected - truncating "
919 b"history modification detected - truncating "
921 b"revision branch cache to revision %d\n" % rev
920 b"revision branch cache to revision %d\n" % rev
922 )
921 )
923 truncate = rbcrevidx + _rbcrecsize
922 truncate = rbcrevidx + _rbcrecsize
924 self._rbcrevs.truncate(truncate)
923 self._rbcrevs.truncate(truncate)
925 self._rbcrevslen = min(self._rbcrevslen, truncate)
924 self._rbcrevslen = min(self._rbcrevslen, truncate)
926
925
927 # fall back to slow path and make sure it will be written to disk
926 # fall back to slow path and make sure it will be written to disk
928 return self._branchinfo(rev)
927 return self._branchinfo(rev)
929
928
930 def _branchinfo(self, rev):
929 def _branchinfo(self, rev):
931 """Retrieve branch info from changelog and update _rbcrevs"""
930 """Retrieve branch info from changelog and update _rbcrevs"""
932 changelog = self._repo.changelog
931 changelog = self._repo.changelog
933 b, close = changelog.branchinfo(rev)
932 b, close = changelog.branchinfo(rev)
934 if b in self._namesreverse:
933 if b in self._namesreverse:
935 branchidx = self._namesreverse[b]
934 branchidx = self._namesreverse[b]
936 else:
935 else:
937 branchidx = len(self._names)
936 branchidx = len(self._names)
938 self._names.append(b)
937 self._names.append(b)
939 self._namesreverse[b] = branchidx
938 self._namesreverse[b] = branchidx
940 reponode = changelog.node(rev)
939 reponode = changelog.node(rev)
941 if close:
940 if close:
942 branchidx |= _rbccloseflag
941 branchidx |= _rbccloseflag
943 self._setcachedata(rev, reponode, branchidx)
942 self._setcachedata(rev, reponode, branchidx)
944 return b, close
943 return b, close
945
944
946 def setdata(self, rev, changelogrevision):
945 def setdata(self, rev, changelogrevision):
947 """add new data information to the cache"""
946 """add new data information to the cache"""
948 branch, close = changelogrevision.branchinfo
947 branch, close = changelogrevision.branchinfo
949
948
950 if branch in self._namesreverse:
949 if branch in self._namesreverse:
951 branchidx = self._namesreverse[branch]
950 branchidx = self._namesreverse[branch]
952 else:
951 else:
953 branchidx = len(self._names)
952 branchidx = len(self._names)
954 self._names.append(branch)
953 self._names.append(branch)
955 self._namesreverse[branch] = branchidx
954 self._namesreverse[branch] = branchidx
956 if close:
955 if close:
957 branchidx |= _rbccloseflag
956 branchidx |= _rbccloseflag
958 self._setcachedata(rev, self._repo.changelog.node(rev), branchidx)
957 self._setcachedata(rev, self._repo.changelog.node(rev), branchidx)
959 # If no cache data were readable (non exists, bad permission, etc)
958 # If no cache data were readable (non exists, bad permission, etc)
960 # the cache was bypassing itself by setting:
959 # the cache was bypassing itself by setting:
961 #
960 #
962 # self.branchinfo = self._branchinfo
961 # self.branchinfo = self._branchinfo
963 #
962 #
964 # Since we now have data in the cache, we need to drop this bypassing.
963 # Since we now have data in the cache, we need to drop this bypassing.
965 if 'branchinfo' in vars(self):
964 if 'branchinfo' in vars(self):
966 del self.branchinfo
965 del self.branchinfo
967
966
968 def _setcachedata(self, rev, node, branchidx):
967 def _setcachedata(self, rev, node, branchidx):
969 """Writes the node's branch data to the in-memory cache data."""
968 """Writes the node's branch data to the in-memory cache data."""
970 if rev == nullrev:
969 if rev == nullrev:
971 return
970 return
972 rbcrevidx = rev * _rbcrecsize
971 rbcrevidx = rev * _rbcrecsize
973 self._rbcrevs.pack_into(rbcrevidx, node, branchidx)
972 self._rbcrevs.pack_into(rbcrevidx, node, branchidx)
974 self._rbcrevslen = min(self._rbcrevslen, rev)
973 self._rbcrevslen = min(self._rbcrevslen, rev)
975
974
976 tr = self._repo.currenttransaction()
975 tr = self._repo.currenttransaction()
977 if tr:
976 if tr:
978 tr.addfinalize(b'write-revbranchcache', self.write)
977 tr.addfinalize(b'write-revbranchcache', self.write)
979
978
980 def write(self, tr=None):
979 def write(self, tr=None):
981 """Save branch cache if it is dirty."""
980 """Save branch cache if it is dirty."""
982 repo = self._repo
981 repo = self._repo
983 wlock = None
982 wlock = None
984 step = b''
983 step = b''
985 try:
984 try:
986 # write the new names
985 # write the new names
987 if self._rbcnamescount < len(self._names):
986 if self._rbcnamescount < len(self._names):
988 wlock = repo.wlock(wait=False)
987 wlock = repo.wlock(wait=False)
989 step = b' names'
988 step = b' names'
990 self._writenames(repo)
989 self._writenames(repo)
991
990
992 # write the new revs
991 # write the new revs
993 start = self._rbcrevslen * _rbcrecsize
992 start = self._rbcrevslen * _rbcrecsize
994 if start != len(self._rbcrevs):
993 if start != len(self._rbcrevs):
995 step = b''
994 step = b''
996 if wlock is None:
995 if wlock is None:
997 wlock = repo.wlock(wait=False)
996 wlock = repo.wlock(wait=False)
998 self._writerevs(repo, start)
997 self._writerevs(repo, start)
999
998
1000 except (IOError, OSError, error.Abort, error.LockError) as inst:
999 except (IOError, OSError, error.Abort, error.LockError) as inst:
1001 repo.ui.debug(
1000 repo.ui.debug(
1002 b"couldn't write revision branch cache%s: %s\n"
1001 b"couldn't write revision branch cache%s: %s\n"
1003 % (step, stringutil.forcebytestr(inst))
1002 % (step, stringutil.forcebytestr(inst))
1004 )
1003 )
1005 finally:
1004 finally:
1006 if wlock is not None:
1005 if wlock is not None:
1007 wlock.release()
1006 wlock.release()
1008
1007
1009 def _writenames(self, repo):
1008 def _writenames(self, repo):
1010 """write the new branch names to revbranchcache"""
1009 """write the new branch names to revbranchcache"""
1011 if self._rbcnamescount != 0:
1010 if self._rbcnamescount != 0:
1012 f = repo.cachevfs.open(_rbcnames, b'ab')
1011 f = repo.cachevfs.open(_rbcnames, b'ab')
1013 if f.tell() == self._rbcsnameslen:
1012 if f.tell() == self._rbcsnameslen:
1014 f.write(b'\0')
1013 f.write(b'\0')
1015 else:
1014 else:
1016 f.close()
1015 f.close()
1017 repo.ui.debug(b"%s changed - rewriting it\n" % _rbcnames)
1016 repo.ui.debug(b"%s changed - rewriting it\n" % _rbcnames)
1018 self._rbcnamescount = 0
1017 self._rbcnamescount = 0
1019 self._rbcrevslen = 0
1018 self._rbcrevslen = 0
1020 if self._rbcnamescount == 0:
1019 if self._rbcnamescount == 0:
1021 # before rewriting names, make sure references are removed
1020 # before rewriting names, make sure references are removed
1022 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
1021 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
1023 f = repo.cachevfs.open(_rbcnames, b'wb')
1022 f = repo.cachevfs.open(_rbcnames, b'wb')
1024 f.write(
1023 f.write(
1025 b'\0'.join(
1024 b'\0'.join(
1026 encoding.fromlocal(b)
1025 encoding.fromlocal(b)
1027 for b in self._names[self._rbcnamescount :]
1026 for b in self._names[self._rbcnamescount :]
1028 )
1027 )
1029 )
1028 )
1030 self._rbcsnameslen = f.tell()
1029 self._rbcsnameslen = f.tell()
1031 f.close()
1030 f.close()
1032 self._rbcnamescount = len(self._names)
1031 self._rbcnamescount = len(self._names)
1033
1032
1034 def _writerevs(self, repo, start):
1033 def _writerevs(self, repo, start):
1035 """write the new revs to revbranchcache"""
1034 """write the new revs to revbranchcache"""
1036 revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize)
1035 revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize)
1037 with repo.cachevfs.open(_rbcrevs, b'ab') as f:
1036 with repo.cachevfs.open(_rbcrevs, b'ab') as f:
1038 if f.tell() != start:
1037 if f.tell() != start:
1039 repo.ui.debug(
1038 repo.ui.debug(
1040 b"truncating cache/%s to %d\n" % (_rbcrevs, start)
1039 b"truncating cache/%s to %d\n" % (_rbcrevs, start)
1041 )
1040 )
1042 f.seek(start)
1041 f.seek(start)
1043 if f.tell() != start:
1042 if f.tell() != start:
1044 start = 0
1043 start = 0
1045 f.seek(start)
1044 f.seek(start)
1046 f.truncate()
1045 f.truncate()
1047 end = revs * _rbcrecsize
1046 end = revs * _rbcrecsize
1048 f.write(self._rbcrevs.slice(start, end))
1047 f.write(self._rbcrevs.slice(start, end))
1049 self._rbcrevslen = revs
1048 self._rbcrevslen = revs
@@ -1,4035 +1,4035 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 # coding: utf-8
2 # coding: utf-8
3 #
3 #
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import functools
10 import functools
11 import os
11 import os
12 import random
12 import random
13 import re
13 import re
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from concurrent import futures
18 from concurrent import futures
19 from typing import (
19 from typing import (
20 Optional,
20 Optional,
21 )
21 )
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 bin,
25 bin,
26 hex,
26 hex,
27 nullrev,
27 nullrev,
28 sha1nodeconstants,
28 sha1nodeconstants,
29 short,
29 short,
30 )
30 )
31 from . import (
31 from . import (
32 bookmarks,
32 bookmarks,
33 branchmap,
33 branchmap,
34 bundle2,
34 bundle2,
35 bundlecaches,
35 bundlecaches,
36 changegroup,
36 changegroup,
37 color,
37 color,
38 commit,
38 commit,
39 context,
39 context,
40 dirstate,
40 dirstate,
41 discovery,
41 discovery,
42 encoding,
42 encoding,
43 error,
43 error,
44 exchange,
44 exchange,
45 extensions,
45 extensions,
46 filelog,
46 filelog,
47 hook,
47 hook,
48 lock as lockmod,
48 lock as lockmod,
49 match as matchmod,
49 match as matchmod,
50 mergestate as mergestatemod,
50 mergestate as mergestatemod,
51 mergeutil,
51 mergeutil,
52 namespaces,
52 namespaces,
53 narrowspec,
53 narrowspec,
54 obsolete,
54 obsolete,
55 pathutil,
55 pathutil,
56 phases,
56 phases,
57 policy,
57 policy,
58 pushkey,
58 pushkey,
59 pycompat,
59 pycompat,
60 rcutil,
60 rcutil,
61 repoview,
61 repoview,
62 requirements as requirementsmod,
62 requirements as requirementsmod,
63 revlog,
63 revlog,
64 revset,
64 revset,
65 revsetlang,
65 revsetlang,
66 scmutil,
66 scmutil,
67 sparse,
67 sparse,
68 store as storemod,
68 store as storemod,
69 subrepoutil,
69 subrepoutil,
70 tags as tagsmod,
70 tags as tagsmod,
71 transaction,
71 transaction,
72 txnutil,
72 txnutil,
73 util,
73 util,
74 vfs as vfsmod,
74 vfs as vfsmod,
75 wireprototypes,
75 wireprototypes,
76 )
76 )
77
77
78 from .interfaces import (
78 from .interfaces import (
79 repository,
79 repository,
80 util as interfaceutil,
80 util as interfaceutil,
81 )
81 )
82
82
83 from .utils import (
83 from .utils import (
84 hashutil,
84 hashutil,
85 procutil,
85 procutil,
86 stringutil,
86 stringutil,
87 urlutil,
87 urlutil,
88 )
88 )
89
89
90 from .revlogutils import (
90 from .revlogutils import (
91 concurrency_checker as revlogchecker,
91 concurrency_checker as revlogchecker,
92 constants as revlogconst,
92 constants as revlogconst,
93 sidedata as sidedatamod,
93 sidedata as sidedatamod,
94 )
94 )
95
95
96 release = lockmod.release
96 release = lockmod.release
97 urlerr = util.urlerr
97 urlerr = util.urlerr
98 urlreq = util.urlreq
98 urlreq = util.urlreq
99
99
100 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(
100 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(
101 b"^((dirstate|narrowspec.dirstate).*|branch$)"
101 b"^((dirstate|narrowspec.dirstate).*|branch$)"
102 )
102 )
103
103
104 # set of (path, vfs-location) tuples. vfs-location is:
104 # set of (path, vfs-location) tuples. vfs-location is:
105 # - 'plain for vfs relative paths
105 # - 'plain for vfs relative paths
106 # - '' for svfs relative paths
106 # - '' for svfs relative paths
107 _cachedfiles = set()
107 _cachedfiles = set()
108
108
109
109
110 class _basefilecache(scmutil.filecache):
110 class _basefilecache(scmutil.filecache):
111 """All filecache usage on repo are done for logic that should be unfiltered"""
111 """All filecache usage on repo are done for logic that should be unfiltered"""
112
112
113 def __get__(self, repo, type=None):
113 def __get__(self, repo, type=None):
114 if repo is None:
114 if repo is None:
115 return self
115 return self
116 # proxy to unfiltered __dict__ since filtered repo has no entry
116 # proxy to unfiltered __dict__ since filtered repo has no entry
117 unfi = repo.unfiltered()
117 unfi = repo.unfiltered()
118 try:
118 try:
119 return unfi.__dict__[self.sname]
119 return unfi.__dict__[self.sname]
120 except KeyError:
120 except KeyError:
121 pass
121 pass
122 return super(_basefilecache, self).__get__(unfi, type)
122 return super(_basefilecache, self).__get__(unfi, type)
123
123
124 def set(self, repo, value):
124 def set(self, repo, value):
125 return super(_basefilecache, self).set(repo.unfiltered(), value)
125 return super(_basefilecache, self).set(repo.unfiltered(), value)
126
126
127
127
128 class repofilecache(_basefilecache):
128 class repofilecache(_basefilecache):
129 """filecache for files in .hg but outside of .hg/store"""
129 """filecache for files in .hg but outside of .hg/store"""
130
130
131 def __init__(self, *paths):
131 def __init__(self, *paths):
132 super(repofilecache, self).__init__(*paths)
132 super(repofilecache, self).__init__(*paths)
133 for path in paths:
133 for path in paths:
134 _cachedfiles.add((path, b'plain'))
134 _cachedfiles.add((path, b'plain'))
135
135
136 def join(self, obj, fname):
136 def join(self, obj, fname):
137 return obj.vfs.join(fname)
137 return obj.vfs.join(fname)
138
138
139
139
140 class storecache(_basefilecache):
140 class storecache(_basefilecache):
141 """filecache for files in the store"""
141 """filecache for files in the store"""
142
142
143 def __init__(self, *paths):
143 def __init__(self, *paths):
144 super(storecache, self).__init__(*paths)
144 super(storecache, self).__init__(*paths)
145 for path in paths:
145 for path in paths:
146 _cachedfiles.add((path, b''))
146 _cachedfiles.add((path, b''))
147
147
148 def join(self, obj, fname):
148 def join(self, obj, fname):
149 return obj.sjoin(fname)
149 return obj.sjoin(fname)
150
150
151
151
152 class changelogcache(storecache):
152 class changelogcache(storecache):
153 """filecache for the changelog"""
153 """filecache for the changelog"""
154
154
155 def __init__(self):
155 def __init__(self):
156 super(changelogcache, self).__init__()
156 super(changelogcache, self).__init__()
157 _cachedfiles.add((b'00changelog.i', b''))
157 _cachedfiles.add((b'00changelog.i', b''))
158 _cachedfiles.add((b'00changelog.n', b''))
158 _cachedfiles.add((b'00changelog.n', b''))
159
159
160 def tracked_paths(self, obj):
160 def tracked_paths(self, obj):
161 paths = [self.join(obj, b'00changelog.i')]
161 paths = [self.join(obj, b'00changelog.i')]
162 if obj.store.opener.options.get(b'persistent-nodemap', False):
162 if obj.store.opener.options.get(b'persistent-nodemap', False):
163 paths.append(self.join(obj, b'00changelog.n'))
163 paths.append(self.join(obj, b'00changelog.n'))
164 return paths
164 return paths
165
165
166
166
167 class manifestlogcache(storecache):
167 class manifestlogcache(storecache):
168 """filecache for the manifestlog"""
168 """filecache for the manifestlog"""
169
169
170 def __init__(self):
170 def __init__(self):
171 super(manifestlogcache, self).__init__()
171 super(manifestlogcache, self).__init__()
172 _cachedfiles.add((b'00manifest.i', b''))
172 _cachedfiles.add((b'00manifest.i', b''))
173 _cachedfiles.add((b'00manifest.n', b''))
173 _cachedfiles.add((b'00manifest.n', b''))
174
174
175 def tracked_paths(self, obj):
175 def tracked_paths(self, obj):
176 paths = [self.join(obj, b'00manifest.i')]
176 paths = [self.join(obj, b'00manifest.i')]
177 if obj.store.opener.options.get(b'persistent-nodemap', False):
177 if obj.store.opener.options.get(b'persistent-nodemap', False):
178 paths.append(self.join(obj, b'00manifest.n'))
178 paths.append(self.join(obj, b'00manifest.n'))
179 return paths
179 return paths
180
180
181
181
182 class mixedrepostorecache(_basefilecache):
182 class mixedrepostorecache(_basefilecache):
183 """filecache for a mix files in .hg/store and outside"""
183 """filecache for a mix files in .hg/store and outside"""
184
184
185 def __init__(self, *pathsandlocations):
185 def __init__(self, *pathsandlocations):
186 # scmutil.filecache only uses the path for passing back into our
186 # scmutil.filecache only uses the path for passing back into our
187 # join(), so we can safely pass a list of paths and locations
187 # join(), so we can safely pass a list of paths and locations
188 super(mixedrepostorecache, self).__init__(*pathsandlocations)
188 super(mixedrepostorecache, self).__init__(*pathsandlocations)
189 _cachedfiles.update(pathsandlocations)
189 _cachedfiles.update(pathsandlocations)
190
190
191 def join(self, obj, fnameandlocation):
191 def join(self, obj, fnameandlocation):
192 fname, location = fnameandlocation
192 fname, location = fnameandlocation
193 if location == b'plain':
193 if location == b'plain':
194 return obj.vfs.join(fname)
194 return obj.vfs.join(fname)
195 else:
195 else:
196 if location != b'':
196 if location != b'':
197 raise error.ProgrammingError(
197 raise error.ProgrammingError(
198 b'unexpected location: %s' % location
198 b'unexpected location: %s' % location
199 )
199 )
200 return obj.sjoin(fname)
200 return obj.sjoin(fname)
201
201
202
202
203 def isfilecached(repo, name):
203 def isfilecached(repo, name):
204 """check if a repo has already cached "name" filecache-ed property
204 """check if a repo has already cached "name" filecache-ed property
205
205
206 This returns (cachedobj-or-None, iscached) tuple.
206 This returns (cachedobj-or-None, iscached) tuple.
207 """
207 """
208 cacheentry = repo.unfiltered()._filecache.get(name, None)
208 cacheentry = repo.unfiltered()._filecache.get(name, None)
209 if not cacheentry:
209 if not cacheentry:
210 return None, False
210 return None, False
211 return cacheentry.obj, True
211 return cacheentry.obj, True
212
212
213
213
214 class unfilteredpropertycache(util.propertycache):
214 class unfilteredpropertycache(util.propertycache):
215 """propertycache that apply to unfiltered repo only"""
215 """propertycache that apply to unfiltered repo only"""
216
216
217 def __get__(self, repo, type=None):
217 def __get__(self, repo, type=None):
218 unfi = repo.unfiltered()
218 unfi = repo.unfiltered()
219 if unfi is repo:
219 if unfi is repo:
220 return super(unfilteredpropertycache, self).__get__(unfi)
220 return super(unfilteredpropertycache, self).__get__(unfi)
221 return getattr(unfi, self.name)
221 return getattr(unfi, self.name)
222
222
223
223
224 class filteredpropertycache(util.propertycache):
224 class filteredpropertycache(util.propertycache):
225 """propertycache that must take filtering in account"""
225 """propertycache that must take filtering in account"""
226
226
227 def cachevalue(self, obj, value):
227 def cachevalue(self, obj, value):
228 object.__setattr__(obj, self.name, value)
228 object.__setattr__(obj, self.name, value)
229
229
230
230
231 def hasunfilteredcache(repo, name):
231 def hasunfilteredcache(repo, name):
232 """check if a repo has an unfilteredpropertycache value for <name>"""
232 """check if a repo has an unfilteredpropertycache value for <name>"""
233 return name in vars(repo.unfiltered())
233 return name in vars(repo.unfiltered())
234
234
235
235
236 def unfilteredmethod(orig):
236 def unfilteredmethod(orig):
237 """decorate method that always need to be run on unfiltered version"""
237 """decorate method that always need to be run on unfiltered version"""
238
238
239 @functools.wraps(orig)
239 @functools.wraps(orig)
240 def wrapper(repo, *args, **kwargs):
240 def wrapper(repo, *args, **kwargs):
241 return orig(repo.unfiltered(), *args, **kwargs)
241 return orig(repo.unfiltered(), *args, **kwargs)
242
242
243 return wrapper
243 return wrapper
244
244
245
245
246 moderncaps = {
246 moderncaps = {
247 b'lookup',
247 b'lookup',
248 b'branchmap',
248 b'branchmap',
249 b'pushkey',
249 b'pushkey',
250 b'known',
250 b'known',
251 b'getbundle',
251 b'getbundle',
252 b'unbundle',
252 b'unbundle',
253 }
253 }
254 legacycaps = moderncaps.union({b'changegroupsubset'})
254 legacycaps = moderncaps.union({b'changegroupsubset'})
255
255
256
256
257 @interfaceutil.implementer(repository.ipeercommandexecutor)
257 @interfaceutil.implementer(repository.ipeercommandexecutor)
258 class localcommandexecutor:
258 class localcommandexecutor:
259 def __init__(self, peer):
259 def __init__(self, peer):
260 self._peer = peer
260 self._peer = peer
261 self._sent = False
261 self._sent = False
262 self._closed = False
262 self._closed = False
263
263
264 def __enter__(self):
264 def __enter__(self):
265 return self
265 return self
266
266
267 def __exit__(self, exctype, excvalue, exctb):
267 def __exit__(self, exctype, excvalue, exctb):
268 self.close()
268 self.close()
269
269
270 def callcommand(self, command, args):
270 def callcommand(self, command, args):
271 if self._sent:
271 if self._sent:
272 raise error.ProgrammingError(
272 raise error.ProgrammingError(
273 b'callcommand() cannot be used after sendcommands()'
273 b'callcommand() cannot be used after sendcommands()'
274 )
274 )
275
275
276 if self._closed:
276 if self._closed:
277 raise error.ProgrammingError(
277 raise error.ProgrammingError(
278 b'callcommand() cannot be used after close()'
278 b'callcommand() cannot be used after close()'
279 )
279 )
280
280
281 # We don't need to support anything fancy. Just call the named
281 # We don't need to support anything fancy. Just call the named
282 # method on the peer and return a resolved future.
282 # method on the peer and return a resolved future.
283 fn = getattr(self._peer, pycompat.sysstr(command))
283 fn = getattr(self._peer, pycompat.sysstr(command))
284
284
285 f = futures.Future()
285 f = futures.Future()
286
286
287 try:
287 try:
288 result = fn(**pycompat.strkwargs(args))
288 result = fn(**pycompat.strkwargs(args))
289 except Exception:
289 except Exception:
290 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
290 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
291 else:
291 else:
292 f.set_result(result)
292 f.set_result(result)
293
293
294 return f
294 return f
295
295
296 def sendcommands(self):
296 def sendcommands(self):
297 self._sent = True
297 self._sent = True
298
298
299 def close(self):
299 def close(self):
300 self._closed = True
300 self._closed = True
301
301
302
302
303 @interfaceutil.implementer(repository.ipeercommands)
303 @interfaceutil.implementer(repository.ipeercommands)
304 class localpeer(repository.peer):
304 class localpeer(repository.peer):
305 '''peer for a local repo; reflects only the most recent API'''
305 '''peer for a local repo; reflects only the most recent API'''
306
306
307 def __init__(self, repo, caps=None, path=None, remotehidden=False):
307 def __init__(self, repo, caps=None, path=None, remotehidden=False):
308 super(localpeer, self).__init__(
308 super(localpeer, self).__init__(
309 repo.ui, path=path, remotehidden=remotehidden
309 repo.ui, path=path, remotehidden=remotehidden
310 )
310 )
311
311
312 if caps is None:
312 if caps is None:
313 caps = moderncaps.copy()
313 caps = moderncaps.copy()
314 if remotehidden:
314 if remotehidden:
315 self._repo = repo.filtered(b'served.hidden')
315 self._repo = repo.filtered(b'served.hidden')
316 else:
316 else:
317 self._repo = repo.filtered(b'served')
317 self._repo = repo.filtered(b'served')
318 if repo._wanted_sidedata:
318 if repo._wanted_sidedata:
319 formatted = bundle2.format_remote_wanted_sidedata(repo)
319 formatted = bundle2.format_remote_wanted_sidedata(repo)
320 caps.add(b'exp-wanted-sidedata=' + formatted)
320 caps.add(b'exp-wanted-sidedata=' + formatted)
321
321
322 self._caps = repo._restrictcapabilities(caps)
322 self._caps = repo._restrictcapabilities(caps)
323
323
324 # Begin of _basepeer interface.
324 # Begin of _basepeer interface.
325
325
326 def url(self):
326 def url(self):
327 return self._repo.url()
327 return self._repo.url()
328
328
329 def local(self):
329 def local(self):
330 return self._repo
330 return self._repo
331
331
332 def canpush(self):
332 def canpush(self):
333 return True
333 return True
334
334
335 def close(self):
335 def close(self):
336 self._repo.close()
336 self._repo.close()
337
337
338 # End of _basepeer interface.
338 # End of _basepeer interface.
339
339
340 # Begin of _basewirecommands interface.
340 # Begin of _basewirecommands interface.
341
341
342 def branchmap(self):
342 def branchmap(self):
343 return self._repo.branchmap()
343 return self._repo.branchmap()
344
344
345 def capabilities(self):
345 def capabilities(self):
346 return self._caps
346 return self._caps
347
347
348 def get_cached_bundle_inline(self, path):
348 def get_cached_bundle_inline(self, path):
349 # not needed with local peer
349 # not needed with local peer
350 raise NotImplementedError
350 raise NotImplementedError
351
351
352 def clonebundles(self):
352 def clonebundles(self):
353 return bundlecaches.get_manifest(self._repo)
353 return bundlecaches.get_manifest(self._repo)
354
354
355 def debugwireargs(self, one, two, three=None, four=None, five=None):
355 def debugwireargs(self, one, two, three=None, four=None, five=None):
356 """Used to test argument passing over the wire"""
356 """Used to test argument passing over the wire"""
357 return b"%s %s %s %s %s" % (
357 return b"%s %s %s %s %s" % (
358 one,
358 one,
359 two,
359 two,
360 pycompat.bytestr(three),
360 pycompat.bytestr(three),
361 pycompat.bytestr(four),
361 pycompat.bytestr(four),
362 pycompat.bytestr(five),
362 pycompat.bytestr(five),
363 )
363 )
364
364
365 def getbundle(
365 def getbundle(
366 self,
366 self,
367 source,
367 source,
368 heads=None,
368 heads=None,
369 common=None,
369 common=None,
370 bundlecaps=None,
370 bundlecaps=None,
371 remote_sidedata=None,
371 remote_sidedata=None,
372 **kwargs,
372 **kwargs,
373 ):
373 ):
374 chunks = exchange.getbundlechunks(
374 chunks = exchange.getbundlechunks(
375 self._repo,
375 self._repo,
376 source,
376 source,
377 heads=heads,
377 heads=heads,
378 common=common,
378 common=common,
379 bundlecaps=bundlecaps,
379 bundlecaps=bundlecaps,
380 remote_sidedata=remote_sidedata,
380 remote_sidedata=remote_sidedata,
381 **kwargs,
381 **kwargs,
382 )[1]
382 )[1]
383 cb = util.chunkbuffer(chunks)
383 cb = util.chunkbuffer(chunks)
384
384
385 if exchange.bundle2requested(bundlecaps):
385 if exchange.bundle2requested(bundlecaps):
386 # When requesting a bundle2, getbundle returns a stream to make the
386 # When requesting a bundle2, getbundle returns a stream to make the
387 # wire level function happier. We need to build a proper object
387 # wire level function happier. We need to build a proper object
388 # from it in local peer.
388 # from it in local peer.
389 return bundle2.getunbundler(self.ui, cb)
389 return bundle2.getunbundler(self.ui, cb)
390 else:
390 else:
391 return changegroup.getunbundler(b'01', cb, None)
391 return changegroup.getunbundler(b'01', cb, None)
392
392
393 def heads(self):
393 def heads(self):
394 return self._repo.heads()
394 return self._repo.heads()
395
395
396 def known(self, nodes):
396 def known(self, nodes):
397 return self._repo.known(nodes)
397 return self._repo.known(nodes)
398
398
399 def listkeys(self, namespace):
399 def listkeys(self, namespace):
400 return self._repo.listkeys(namespace)
400 return self._repo.listkeys(namespace)
401
401
402 def lookup(self, key):
402 def lookup(self, key):
403 return self._repo.lookup(key)
403 return self._repo.lookup(key)
404
404
405 def pushkey(self, namespace, key, old, new):
405 def pushkey(self, namespace, key, old, new):
406 return self._repo.pushkey(namespace, key, old, new)
406 return self._repo.pushkey(namespace, key, old, new)
407
407
408 def stream_out(self):
408 def stream_out(self):
409 raise error.Abort(_(b'cannot perform stream clone against local peer'))
409 raise error.Abort(_(b'cannot perform stream clone against local peer'))
410
410
411 def unbundle(self, bundle, heads, url):
411 def unbundle(self, bundle, heads, url):
412 """apply a bundle on a repo
412 """apply a bundle on a repo
413
413
414 This function handles the repo locking itself."""
414 This function handles the repo locking itself."""
415 try:
415 try:
416 try:
416 try:
417 bundle = exchange.readbundle(self.ui, bundle, None)
417 bundle = exchange.readbundle(self.ui, bundle, None)
418 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
418 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
419 if hasattr(ret, 'getchunks'):
419 if hasattr(ret, 'getchunks'):
420 # This is a bundle20 object, turn it into an unbundler.
420 # This is a bundle20 object, turn it into an unbundler.
421 # This little dance should be dropped eventually when the
421 # This little dance should be dropped eventually when the
422 # API is finally improved.
422 # API is finally improved.
423 stream = util.chunkbuffer(ret.getchunks())
423 stream = util.chunkbuffer(ret.getchunks())
424 ret = bundle2.getunbundler(self.ui, stream)
424 ret = bundle2.getunbundler(self.ui, stream)
425 return ret
425 return ret
426 except Exception as exc:
426 except Exception as exc:
427 # If the exception contains output salvaged from a bundle2
427 # If the exception contains output salvaged from a bundle2
428 # reply, we need to make sure it is printed before continuing
428 # reply, we need to make sure it is printed before continuing
429 # to fail. So we build a bundle2 with such output and consume
429 # to fail. So we build a bundle2 with such output and consume
430 # it directly.
430 # it directly.
431 #
431 #
432 # This is not very elegant but allows a "simple" solution for
432 # This is not very elegant but allows a "simple" solution for
433 # issue4594
433 # issue4594
434 output = getattr(exc, '_bundle2salvagedoutput', ())
434 output = getattr(exc, '_bundle2salvagedoutput', ())
435 if output:
435 if output:
436 bundler = bundle2.bundle20(self._repo.ui)
436 bundler = bundle2.bundle20(self._repo.ui)
437 for out in output:
437 for out in output:
438 bundler.addpart(out)
438 bundler.addpart(out)
439 stream = util.chunkbuffer(bundler.getchunks())
439 stream = util.chunkbuffer(bundler.getchunks())
440 b = bundle2.getunbundler(self.ui, stream)
440 b = bundle2.getunbundler(self.ui, stream)
441 bundle2.processbundle(self._repo, b)
441 bundle2.processbundle(self._repo, b)
442 raise
442 raise
443 except error.PushRaced as exc:
443 except error.PushRaced as exc:
444 raise error.ResponseError(
444 raise error.ResponseError(
445 _(b'push failed:'), stringutil.forcebytestr(exc)
445 _(b'push failed:'), stringutil.forcebytestr(exc)
446 )
446 )
447
447
448 # End of _basewirecommands interface.
448 # End of _basewirecommands interface.
449
449
450 # Begin of peer interface.
450 # Begin of peer interface.
451
451
452 def commandexecutor(self):
452 def commandexecutor(self):
453 return localcommandexecutor(self)
453 return localcommandexecutor(self)
454
454
455 # End of peer interface.
455 # End of peer interface.
456
456
457
457
458 @interfaceutil.implementer(repository.ipeerlegacycommands)
458 @interfaceutil.implementer(repository.ipeerlegacycommands)
459 class locallegacypeer(localpeer):
459 class locallegacypeer(localpeer):
460 """peer extension which implements legacy methods too; used for tests with
460 """peer extension which implements legacy methods too; used for tests with
461 restricted capabilities"""
461 restricted capabilities"""
462
462
463 def __init__(self, repo, path=None, remotehidden=False):
463 def __init__(self, repo, path=None, remotehidden=False):
464 super(locallegacypeer, self).__init__(
464 super(locallegacypeer, self).__init__(
465 repo, caps=legacycaps, path=path, remotehidden=remotehidden
465 repo, caps=legacycaps, path=path, remotehidden=remotehidden
466 )
466 )
467
467
468 # Begin of baselegacywirecommands interface.
468 # Begin of baselegacywirecommands interface.
469
469
470 def between(self, pairs):
470 def between(self, pairs):
471 return self._repo.between(pairs)
471 return self._repo.between(pairs)
472
472
473 def branches(self, nodes):
473 def branches(self, nodes):
474 return self._repo.branches(nodes)
474 return self._repo.branches(nodes)
475
475
476 def changegroup(self, nodes, source):
476 def changegroup(self, nodes, source):
477 outgoing = discovery.outgoing(
477 outgoing = discovery.outgoing(
478 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
478 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
479 )
479 )
480 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
480 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
481
481
482 def changegroupsubset(self, bases, heads, source):
482 def changegroupsubset(self, bases, heads, source):
483 outgoing = discovery.outgoing(
483 outgoing = discovery.outgoing(
484 self._repo, missingroots=bases, ancestorsof=heads
484 self._repo, missingroots=bases, ancestorsof=heads
485 )
485 )
486 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
486 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
487
487
488 # End of baselegacywirecommands interface.
488 # End of baselegacywirecommands interface.
489
489
490
490
491 # Functions receiving (ui, features) that extensions can register to impact
491 # Functions receiving (ui, features) that extensions can register to impact
492 # the ability to load repositories with custom requirements. Only
492 # the ability to load repositories with custom requirements. Only
493 # functions defined in loaded extensions are called.
493 # functions defined in loaded extensions are called.
494 #
494 #
495 # The function receives a set of requirement strings that the repository
495 # The function receives a set of requirement strings that the repository
496 # is capable of opening. Functions will typically add elements to the
496 # is capable of opening. Functions will typically add elements to the
497 # set to reflect that the extension knows how to handle that requirements.
497 # set to reflect that the extension knows how to handle that requirements.
498 featuresetupfuncs = set()
498 featuresetupfuncs = set()
499
499
500
500
501 def _getsharedvfs(hgvfs, requirements):
501 def _getsharedvfs(hgvfs, requirements):
502 """returns the vfs object pointing to root of shared source
502 """returns the vfs object pointing to root of shared source
503 repo for a shared repository
503 repo for a shared repository
504
504
505 hgvfs is vfs pointing at .hg/ of current repo (shared one)
505 hgvfs is vfs pointing at .hg/ of current repo (shared one)
506 requirements is a set of requirements of current repo (shared one)
506 requirements is a set of requirements of current repo (shared one)
507 """
507 """
508 # The ``shared`` or ``relshared`` requirements indicate the
508 # The ``shared`` or ``relshared`` requirements indicate the
509 # store lives in the path contained in the ``.hg/sharedpath`` file.
509 # store lives in the path contained in the ``.hg/sharedpath`` file.
510 # This is an absolute path for ``shared`` and relative to
510 # This is an absolute path for ``shared`` and relative to
511 # ``.hg/`` for ``relshared``.
511 # ``.hg/`` for ``relshared``.
512 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
512 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
513 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
513 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
514 sharedpath = util.normpath(hgvfs.join(sharedpath))
514 sharedpath = util.normpath(hgvfs.join(sharedpath))
515
515
516 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
516 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
517
517
518 if not sharedvfs.exists():
518 if not sharedvfs.exists():
519 raise error.RepoError(
519 raise error.RepoError(
520 _(b'.hg/sharedpath points to nonexistent directory %s')
520 _(b'.hg/sharedpath points to nonexistent directory %s')
521 % sharedvfs.base
521 % sharedvfs.base
522 )
522 )
523 return sharedvfs
523 return sharedvfs
524
524
525
525
526 def _readrequires(vfs, allowmissing):
526 def _readrequires(vfs, allowmissing):
527 """reads the require file present at root of this vfs
527 """reads the require file present at root of this vfs
528 and return a set of requirements
528 and return a set of requirements
529
529
530 If allowmissing is True, we suppress FileNotFoundError if raised"""
530 If allowmissing is True, we suppress FileNotFoundError if raised"""
531 # requires file contains a newline-delimited list of
531 # requires file contains a newline-delimited list of
532 # features/capabilities the opener (us) must have in order to use
532 # features/capabilities the opener (us) must have in order to use
533 # the repository. This file was introduced in Mercurial 0.9.2,
533 # the repository. This file was introduced in Mercurial 0.9.2,
534 # which means very old repositories may not have one. We assume
534 # which means very old repositories may not have one. We assume
535 # a missing file translates to no requirements.
535 # a missing file translates to no requirements.
536 read = vfs.tryread if allowmissing else vfs.read
536 read = vfs.tryread if allowmissing else vfs.read
537 return set(read(b'requires').splitlines())
537 return set(read(b'requires').splitlines())
538
538
539
539
540 def makelocalrepository(baseui, path: bytes, intents=None):
540 def makelocalrepository(baseui, path: bytes, intents=None):
541 """Create a local repository object.
541 """Create a local repository object.
542
542
543 Given arguments needed to construct a local repository, this function
543 Given arguments needed to construct a local repository, this function
544 performs various early repository loading functionality (such as
544 performs various early repository loading functionality (such as
545 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
545 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
546 the repository can be opened, derives a type suitable for representing
546 the repository can be opened, derives a type suitable for representing
547 that repository, and returns an instance of it.
547 that repository, and returns an instance of it.
548
548
549 The returned object conforms to the ``repository.completelocalrepository``
549 The returned object conforms to the ``repository.completelocalrepository``
550 interface.
550 interface.
551
551
552 The repository type is derived by calling a series of factory functions
552 The repository type is derived by calling a series of factory functions
553 for each aspect/interface of the final repository. These are defined by
553 for each aspect/interface of the final repository. These are defined by
554 ``REPO_INTERFACES``.
554 ``REPO_INTERFACES``.
555
555
556 Each factory function is called to produce a type implementing a specific
556 Each factory function is called to produce a type implementing a specific
557 interface. The cumulative list of returned types will be combined into a
557 interface. The cumulative list of returned types will be combined into a
558 new type and that type will be instantiated to represent the local
558 new type and that type will be instantiated to represent the local
559 repository.
559 repository.
560
560
561 The factory functions each receive various state that may be consulted
561 The factory functions each receive various state that may be consulted
562 as part of deriving a type.
562 as part of deriving a type.
563
563
564 Extensions should wrap these factory functions to customize repository type
564 Extensions should wrap these factory functions to customize repository type
565 creation. Note that an extension's wrapped function may be called even if
565 creation. Note that an extension's wrapped function may be called even if
566 that extension is not loaded for the repo being constructed. Extensions
566 that extension is not loaded for the repo being constructed. Extensions
567 should check if their ``__name__`` appears in the
567 should check if their ``__name__`` appears in the
568 ``extensionmodulenames`` set passed to the factory function and no-op if
568 ``extensionmodulenames`` set passed to the factory function and no-op if
569 not.
569 not.
570 """
570 """
571 ui = baseui.copy()
571 ui = baseui.copy()
572 # Prevent copying repo configuration.
572 # Prevent copying repo configuration.
573 ui.copy = baseui.copy
573 ui.copy = baseui.copy
574
574
575 # Working directory VFS rooted at repository root.
575 # Working directory VFS rooted at repository root.
576 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
576 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
577
577
578 # Main VFS for .hg/ directory.
578 # Main VFS for .hg/ directory.
579 hgpath = wdirvfs.join(b'.hg')
579 hgpath = wdirvfs.join(b'.hg')
580 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
580 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
581 # Whether this repository is shared one or not
581 # Whether this repository is shared one or not
582 shared = False
582 shared = False
583 # If this repository is shared, vfs pointing to shared repo
583 # If this repository is shared, vfs pointing to shared repo
584 sharedvfs = None
584 sharedvfs = None
585
585
586 # The .hg/ path should exist and should be a directory. All other
586 # The .hg/ path should exist and should be a directory. All other
587 # cases are errors.
587 # cases are errors.
588 if not hgvfs.isdir():
588 if not hgvfs.isdir():
589 try:
589 try:
590 hgvfs.stat()
590 hgvfs.stat()
591 except FileNotFoundError:
591 except FileNotFoundError:
592 pass
592 pass
593 except ValueError as e:
593 except ValueError as e:
594 # Can be raised on Python 3.8 when path is invalid.
594 # Can be raised on Python 3.8 when path is invalid.
595 raise error.Abort(
595 raise error.Abort(
596 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
596 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
597 )
597 )
598
598
599 raise error.RepoError(_(b'repository %s not found') % path)
599 raise error.RepoError(_(b'repository %s not found') % path)
600
600
601 requirements = _readrequires(hgvfs, True)
601 requirements = _readrequires(hgvfs, True)
602 shared = (
602 shared = (
603 requirementsmod.SHARED_REQUIREMENT in requirements
603 requirementsmod.SHARED_REQUIREMENT in requirements
604 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
604 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
605 )
605 )
606 storevfs = None
606 storevfs = None
607 if shared:
607 if shared:
608 # This is a shared repo
608 # This is a shared repo
609 sharedvfs = _getsharedvfs(hgvfs, requirements)
609 sharedvfs = _getsharedvfs(hgvfs, requirements)
610 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
610 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
611 else:
611 else:
612 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
612 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
613
613
614 # if .hg/requires contains the sharesafe requirement, it means
614 # if .hg/requires contains the sharesafe requirement, it means
615 # there exists a `.hg/store/requires` too and we should read it
615 # there exists a `.hg/store/requires` too and we should read it
616 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
616 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
617 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
617 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
618 # is not present, refer checkrequirementscompat() for that
618 # is not present, refer checkrequirementscompat() for that
619 #
619 #
620 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
620 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
621 # repository was shared the old way. We check the share source .hg/requires
621 # repository was shared the old way. We check the share source .hg/requires
622 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
622 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
623 # to be reshared
623 # to be reshared
624 hint = _(b"see `hg help config.format.use-share-safe` for more information")
624 hint = _(b"see `hg help config.format.use-share-safe` for more information")
625 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
625 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
626 if (
626 if (
627 shared
627 shared
628 and requirementsmod.SHARESAFE_REQUIREMENT
628 and requirementsmod.SHARESAFE_REQUIREMENT
629 not in _readrequires(sharedvfs, True)
629 not in _readrequires(sharedvfs, True)
630 ):
630 ):
631 mismatch_warn = ui.configbool(
631 mismatch_warn = ui.configbool(
632 b'share', b'safe-mismatch.source-not-safe.warn'
632 b'share', b'safe-mismatch.source-not-safe.warn'
633 )
633 )
634 mismatch_config = ui.config(
634 mismatch_config = ui.config(
635 b'share', b'safe-mismatch.source-not-safe'
635 b'share', b'safe-mismatch.source-not-safe'
636 )
636 )
637 mismatch_verbose_upgrade = ui.configbool(
637 mismatch_verbose_upgrade = ui.configbool(
638 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
638 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
639 )
639 )
640 if mismatch_config in (
640 if mismatch_config in (
641 b'downgrade-allow',
641 b'downgrade-allow',
642 b'allow',
642 b'allow',
643 b'downgrade-abort',
643 b'downgrade-abort',
644 ):
644 ):
645 # prevent cyclic import localrepo -> upgrade -> localrepo
645 # prevent cyclic import localrepo -> upgrade -> localrepo
646 from . import upgrade
646 from . import upgrade
647
647
648 upgrade.downgrade_share_to_non_safe(
648 upgrade.downgrade_share_to_non_safe(
649 ui,
649 ui,
650 hgvfs,
650 hgvfs,
651 sharedvfs,
651 sharedvfs,
652 requirements,
652 requirements,
653 mismatch_config,
653 mismatch_config,
654 mismatch_warn,
654 mismatch_warn,
655 mismatch_verbose_upgrade,
655 mismatch_verbose_upgrade,
656 )
656 )
657 elif mismatch_config == b'abort':
657 elif mismatch_config == b'abort':
658 raise error.Abort(
658 raise error.Abort(
659 _(b"share source does not support share-safe requirement"),
659 _(b"share source does not support share-safe requirement"),
660 hint=hint,
660 hint=hint,
661 )
661 )
662 else:
662 else:
663 raise error.Abort(
663 raise error.Abort(
664 _(
664 _(
665 b"share-safe mismatch with source.\nUnrecognized"
665 b"share-safe mismatch with source.\nUnrecognized"
666 b" value '%s' of `share.safe-mismatch.source-not-safe`"
666 b" value '%s' of `share.safe-mismatch.source-not-safe`"
667 b" set."
667 b" set."
668 )
668 )
669 % mismatch_config,
669 % mismatch_config,
670 hint=hint,
670 hint=hint,
671 )
671 )
672 else:
672 else:
673 requirements |= _readrequires(storevfs, False)
673 requirements |= _readrequires(storevfs, False)
674 elif shared:
674 elif shared:
675 sourcerequires = _readrequires(sharedvfs, False)
675 sourcerequires = _readrequires(sharedvfs, False)
676 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
676 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
677 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
677 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
678 mismatch_warn = ui.configbool(
678 mismatch_warn = ui.configbool(
679 b'share', b'safe-mismatch.source-safe.warn'
679 b'share', b'safe-mismatch.source-safe.warn'
680 )
680 )
681 mismatch_verbose_upgrade = ui.configbool(
681 mismatch_verbose_upgrade = ui.configbool(
682 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
682 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
683 )
683 )
684 if mismatch_config in (
684 if mismatch_config in (
685 b'upgrade-allow',
685 b'upgrade-allow',
686 b'allow',
686 b'allow',
687 b'upgrade-abort',
687 b'upgrade-abort',
688 ):
688 ):
689 # prevent cyclic import localrepo -> upgrade -> localrepo
689 # prevent cyclic import localrepo -> upgrade -> localrepo
690 from . import upgrade
690 from . import upgrade
691
691
692 upgrade.upgrade_share_to_safe(
692 upgrade.upgrade_share_to_safe(
693 ui,
693 ui,
694 hgvfs,
694 hgvfs,
695 storevfs,
695 storevfs,
696 requirements,
696 requirements,
697 mismatch_config,
697 mismatch_config,
698 mismatch_warn,
698 mismatch_warn,
699 mismatch_verbose_upgrade,
699 mismatch_verbose_upgrade,
700 )
700 )
701 elif mismatch_config == b'abort':
701 elif mismatch_config == b'abort':
702 raise error.Abort(
702 raise error.Abort(
703 _(
703 _(
704 b'version mismatch: source uses share-safe'
704 b'version mismatch: source uses share-safe'
705 b' functionality while the current share does not'
705 b' functionality while the current share does not'
706 ),
706 ),
707 hint=hint,
707 hint=hint,
708 )
708 )
709 else:
709 else:
710 raise error.Abort(
710 raise error.Abort(
711 _(
711 _(
712 b"share-safe mismatch with source.\nUnrecognized"
712 b"share-safe mismatch with source.\nUnrecognized"
713 b" value '%s' of `share.safe-mismatch.source-safe` set."
713 b" value '%s' of `share.safe-mismatch.source-safe` set."
714 )
714 )
715 % mismatch_config,
715 % mismatch_config,
716 hint=hint,
716 hint=hint,
717 )
717 )
718
718
719 # The .hg/hgrc file may load extensions or contain config options
719 # The .hg/hgrc file may load extensions or contain config options
720 # that influence repository construction. Attempt to load it and
720 # that influence repository construction. Attempt to load it and
721 # process any new extensions that it may have pulled in.
721 # process any new extensions that it may have pulled in.
722 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
722 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
723 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
723 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
724 extensions.loadall(ui)
724 extensions.loadall(ui)
725 extensions.populateui(ui)
725 extensions.populateui(ui)
726
726
727 # Set of module names of extensions loaded for this repository.
727 # Set of module names of extensions loaded for this repository.
728 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
728 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
729
729
730 supportedrequirements = gathersupportedrequirements(ui)
730 supportedrequirements = gathersupportedrequirements(ui)
731
731
732 # We first validate the requirements are known.
732 # We first validate the requirements are known.
733 ensurerequirementsrecognized(requirements, supportedrequirements)
733 ensurerequirementsrecognized(requirements, supportedrequirements)
734
734
735 # Then we validate that the known set is reasonable to use together.
735 # Then we validate that the known set is reasonable to use together.
736 ensurerequirementscompatible(ui, requirements)
736 ensurerequirementscompatible(ui, requirements)
737
737
738 # TODO there are unhandled edge cases related to opening repositories with
738 # TODO there are unhandled edge cases related to opening repositories with
739 # shared storage. If storage is shared, we should also test for requirements
739 # shared storage. If storage is shared, we should also test for requirements
740 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
740 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
741 # that repo, as that repo may load extensions needed to open it. This is a
741 # that repo, as that repo may load extensions needed to open it. This is a
742 # bit complicated because we don't want the other hgrc to overwrite settings
742 # bit complicated because we don't want the other hgrc to overwrite settings
743 # in this hgrc.
743 # in this hgrc.
744 #
744 #
745 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
745 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
746 # file when sharing repos. But if a requirement is added after the share is
746 # file when sharing repos. But if a requirement is added after the share is
747 # performed, thereby introducing a new requirement for the opener, we may
747 # performed, thereby introducing a new requirement for the opener, we may
748 # will not see that and could encounter a run-time error interacting with
748 # will not see that and could encounter a run-time error interacting with
749 # that shared store since it has an unknown-to-us requirement.
749 # that shared store since it has an unknown-to-us requirement.
750
750
751 # At this point, we know we should be capable of opening the repository.
751 # At this point, we know we should be capable of opening the repository.
752 # Now get on with doing that.
752 # Now get on with doing that.
753
753
754 features = set()
754 features = set()
755
755
756 # The "store" part of the repository holds versioned data. How it is
756 # The "store" part of the repository holds versioned data. How it is
757 # accessed is determined by various requirements. If `shared` or
757 # accessed is determined by various requirements. If `shared` or
758 # `relshared` requirements are present, this indicates current repository
758 # `relshared` requirements are present, this indicates current repository
759 # is a share and store exists in path mentioned in `.hg/sharedpath`
759 # is a share and store exists in path mentioned in `.hg/sharedpath`
760 if shared:
760 if shared:
761 storebasepath = sharedvfs.base
761 storebasepath = sharedvfs.base
762 cachepath = sharedvfs.join(b'cache')
762 cachepath = sharedvfs.join(b'cache')
763 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
763 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
764 else:
764 else:
765 storebasepath = hgvfs.base
765 storebasepath = hgvfs.base
766 cachepath = hgvfs.join(b'cache')
766 cachepath = hgvfs.join(b'cache')
767 wcachepath = hgvfs.join(b'wcache')
767 wcachepath = hgvfs.join(b'wcache')
768
768
769 # The store has changed over time and the exact layout is dictated by
769 # The store has changed over time and the exact layout is dictated by
770 # requirements. The store interface abstracts differences across all
770 # requirements. The store interface abstracts differences across all
771 # of them.
771 # of them.
772 store = makestore(
772 store = makestore(
773 requirements,
773 requirements,
774 storebasepath,
774 storebasepath,
775 lambda base: vfsmod.vfs(base, cacheaudited=True),
775 lambda base: vfsmod.vfs(base, cacheaudited=True),
776 )
776 )
777 hgvfs.createmode = store.createmode
777 hgvfs.createmode = store.createmode
778
778
779 storevfs = store.vfs
779 storevfs = store.vfs
780 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
780 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
781
781
782 if (
782 if (
783 requirementsmod.REVLOGV2_REQUIREMENT in requirements
783 requirementsmod.REVLOGV2_REQUIREMENT in requirements
784 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
784 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
785 ):
785 ):
786 features.add(repository.REPO_FEATURE_SIDE_DATA)
786 features.add(repository.REPO_FEATURE_SIDE_DATA)
787 # the revlogv2 docket introduced race condition that we need to fix
787 # the revlogv2 docket introduced race condition that we need to fix
788 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
788 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
789
789
790 # The cache vfs is used to manage cache files.
790 # The cache vfs is used to manage cache files.
791 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
791 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
792 cachevfs.createmode = store.createmode
792 cachevfs.createmode = store.createmode
793 # The cache vfs is used to manage cache files related to the working copy
793 # The cache vfs is used to manage cache files related to the working copy
794 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
794 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
795 wcachevfs.createmode = store.createmode
795 wcachevfs.createmode = store.createmode
796
796
797 # Now resolve the type for the repository object. We do this by repeatedly
797 # Now resolve the type for the repository object. We do this by repeatedly
798 # calling a factory function to produces types for specific aspects of the
798 # calling a factory function to produces types for specific aspects of the
799 # repo's operation. The aggregate returned types are used as base classes
799 # repo's operation. The aggregate returned types are used as base classes
800 # for a dynamically-derived type, which will represent our new repository.
800 # for a dynamically-derived type, which will represent our new repository.
801
801
802 bases = []
802 bases = []
803 extrastate = {}
803 extrastate = {}
804
804
805 for iface, fn in REPO_INTERFACES:
805 for iface, fn in REPO_INTERFACES:
806 # We pass all potentially useful state to give extensions tons of
806 # We pass all potentially useful state to give extensions tons of
807 # flexibility.
807 # flexibility.
808 typ = fn()(
808 typ = fn()(
809 ui=ui,
809 ui=ui,
810 intents=intents,
810 intents=intents,
811 requirements=requirements,
811 requirements=requirements,
812 features=features,
812 features=features,
813 wdirvfs=wdirvfs,
813 wdirvfs=wdirvfs,
814 hgvfs=hgvfs,
814 hgvfs=hgvfs,
815 store=store,
815 store=store,
816 storevfs=storevfs,
816 storevfs=storevfs,
817 storeoptions=storevfs.options,
817 storeoptions=storevfs.options,
818 cachevfs=cachevfs,
818 cachevfs=cachevfs,
819 wcachevfs=wcachevfs,
819 wcachevfs=wcachevfs,
820 extensionmodulenames=extensionmodulenames,
820 extensionmodulenames=extensionmodulenames,
821 extrastate=extrastate,
821 extrastate=extrastate,
822 baseclasses=bases,
822 baseclasses=bases,
823 )
823 )
824
824
825 if not isinstance(typ, type):
825 if not isinstance(typ, type):
826 raise error.ProgrammingError(
826 raise error.ProgrammingError(
827 b'unable to construct type for %s' % iface
827 b'unable to construct type for %s' % iface
828 )
828 )
829
829
830 bases.append(typ)
830 bases.append(typ)
831
831
832 # type() allows you to use characters in type names that wouldn't be
832 # type() allows you to use characters in type names that wouldn't be
833 # recognized as Python symbols in source code. We abuse that to add
833 # recognized as Python symbols in source code. We abuse that to add
834 # rich information about our constructed repo.
834 # rich information about our constructed repo.
835 name = pycompat.sysstr(
835 name = pycompat.sysstr(
836 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
836 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
837 )
837 )
838
838
839 cls = type(name, tuple(bases), {})
839 cls = type(name, tuple(bases), {})
840
840
841 return cls(
841 return cls(
842 baseui=baseui,
842 baseui=baseui,
843 ui=ui,
843 ui=ui,
844 origroot=path,
844 origroot=path,
845 wdirvfs=wdirvfs,
845 wdirvfs=wdirvfs,
846 hgvfs=hgvfs,
846 hgvfs=hgvfs,
847 requirements=requirements,
847 requirements=requirements,
848 supportedrequirements=supportedrequirements,
848 supportedrequirements=supportedrequirements,
849 sharedpath=storebasepath,
849 sharedpath=storebasepath,
850 store=store,
850 store=store,
851 cachevfs=cachevfs,
851 cachevfs=cachevfs,
852 wcachevfs=wcachevfs,
852 wcachevfs=wcachevfs,
853 features=features,
853 features=features,
854 intents=intents,
854 intents=intents,
855 )
855 )
856
856
857
857
858 def loadhgrc(
858 def loadhgrc(
859 ui,
859 ui,
860 wdirvfs: vfsmod.vfs,
860 wdirvfs: vfsmod.vfs,
861 hgvfs: vfsmod.vfs,
861 hgvfs: vfsmod.vfs,
862 requirements,
862 requirements,
863 sharedvfs: Optional[vfsmod.vfs] = None,
863 sharedvfs: Optional[vfsmod.vfs] = None,
864 ):
864 ):
865 """Load hgrc files/content into a ui instance.
865 """Load hgrc files/content into a ui instance.
866
866
867 This is called during repository opening to load any additional
867 This is called during repository opening to load any additional
868 config files or settings relevant to the current repository.
868 config files or settings relevant to the current repository.
869
869
870 Returns a bool indicating whether any additional configs were loaded.
870 Returns a bool indicating whether any additional configs were loaded.
871
871
872 Extensions should monkeypatch this function to modify how per-repo
872 Extensions should monkeypatch this function to modify how per-repo
873 configs are loaded. For example, an extension may wish to pull in
873 configs are loaded. For example, an extension may wish to pull in
874 configs from alternate files or sources.
874 configs from alternate files or sources.
875
875
876 sharedvfs is vfs object pointing to source repo if the current one is a
876 sharedvfs is vfs object pointing to source repo if the current one is a
877 shared one
877 shared one
878 """
878 """
879 if not rcutil.use_repo_hgrc():
879 if not rcutil.use_repo_hgrc():
880 return False
880 return False
881
881
882 ret = False
882 ret = False
883 # first load config from shared source if we has to
883 # first load config from shared source if we has to
884 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
884 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
885 try:
885 try:
886 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
886 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
887 ret = True
887 ret = True
888 except IOError:
888 except IOError:
889 pass
889 pass
890
890
891 try:
891 try:
892 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
892 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
893 ret = True
893 ret = True
894 except IOError:
894 except IOError:
895 pass
895 pass
896
896
897 try:
897 try:
898 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
898 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
899 ret = True
899 ret = True
900 except IOError:
900 except IOError:
901 pass
901 pass
902
902
903 return ret
903 return ret
904
904
905
905
906 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
906 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
907 """Perform additional actions after .hg/hgrc is loaded.
907 """Perform additional actions after .hg/hgrc is loaded.
908
908
909 This function is called during repository loading immediately after
909 This function is called during repository loading immediately after
910 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
910 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
911
911
912 The function can be used to validate configs, automatically add
912 The function can be used to validate configs, automatically add
913 options (including extensions) based on requirements, etc.
913 options (including extensions) based on requirements, etc.
914 """
914 """
915
915
916 # Map of requirements to list of extensions to load automatically when
916 # Map of requirements to list of extensions to load automatically when
917 # requirement is present.
917 # requirement is present.
918 autoextensions = {
918 autoextensions = {
919 b'git': [b'git'],
919 b'git': [b'git'],
920 b'largefiles': [b'largefiles'],
920 b'largefiles': [b'largefiles'],
921 b'lfs': [b'lfs'],
921 b'lfs': [b'lfs'],
922 }
922 }
923
923
924 for requirement, names in sorted(autoextensions.items()):
924 for requirement, names in sorted(autoextensions.items()):
925 if requirement not in requirements:
925 if requirement not in requirements:
926 continue
926 continue
927
927
928 for name in names:
928 for name in names:
929 if not ui.hasconfig(b'extensions', name):
929 if not ui.hasconfig(b'extensions', name):
930 ui.setconfig(b'extensions', name, b'', source=b'autoload')
930 ui.setconfig(b'extensions', name, b'', source=b'autoload')
931
931
932
932
933 def gathersupportedrequirements(ui):
933 def gathersupportedrequirements(ui):
934 """Determine the complete set of recognized requirements."""
934 """Determine the complete set of recognized requirements."""
935 # Start with all requirements supported by this file.
935 # Start with all requirements supported by this file.
936 supported = set(localrepository._basesupported)
936 supported = set(localrepository._basesupported)
937
937
938 # Execute ``featuresetupfuncs`` entries if they belong to an extension
938 # Execute ``featuresetupfuncs`` entries if they belong to an extension
939 # relevant to this ui instance.
939 # relevant to this ui instance.
940 modules = {m.__name__ for n, m in extensions.extensions(ui)}
940 modules = {m.__name__ for n, m in extensions.extensions(ui)}
941
941
942 for fn in featuresetupfuncs:
942 for fn in featuresetupfuncs:
943 if fn.__module__ in modules:
943 if fn.__module__ in modules:
944 fn(ui, supported)
944 fn(ui, supported)
945
945
946 # Add derived requirements from registered compression engines.
946 # Add derived requirements from registered compression engines.
947 for name in util.compengines:
947 for name in util.compengines:
948 engine = util.compengines[name]
948 engine = util.compengines[name]
949 if engine.available() and engine.revlogheader():
949 if engine.available() and engine.revlogheader():
950 supported.add(b'exp-compression-%s' % name)
950 supported.add(b'exp-compression-%s' % name)
951 if engine.name() == b'zstd':
951 if engine.name() == b'zstd':
952 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
952 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
953
953
954 return supported
954 return supported
955
955
956
956
957 def ensurerequirementsrecognized(requirements, supported):
957 def ensurerequirementsrecognized(requirements, supported):
958 """Validate that a set of local requirements is recognized.
958 """Validate that a set of local requirements is recognized.
959
959
960 Receives a set of requirements. Raises an ``error.RepoError`` if there
960 Receives a set of requirements. Raises an ``error.RepoError`` if there
961 exists any requirement in that set that currently loaded code doesn't
961 exists any requirement in that set that currently loaded code doesn't
962 recognize.
962 recognize.
963
963
964 Returns a set of supported requirements.
964 Returns a set of supported requirements.
965 """
965 """
966 missing = set()
966 missing = set()
967
967
968 for requirement in requirements:
968 for requirement in requirements:
969 if requirement in supported:
969 if requirement in supported:
970 continue
970 continue
971
971
972 if not requirement or not requirement[0:1].isalnum():
972 if not requirement or not requirement[0:1].isalnum():
973 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
973 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
974
974
975 missing.add(requirement)
975 missing.add(requirement)
976
976
977 if missing:
977 if missing:
978 raise error.RequirementError(
978 raise error.RequirementError(
979 _(b'repository requires features unknown to this Mercurial: %s')
979 _(b'repository requires features unknown to this Mercurial: %s')
980 % b' '.join(sorted(missing)),
980 % b' '.join(sorted(missing)),
981 hint=_(
981 hint=_(
982 b'see https://mercurial-scm.org/wiki/MissingRequirement '
982 b'see https://mercurial-scm.org/wiki/MissingRequirement '
983 b'for more information'
983 b'for more information'
984 ),
984 ),
985 )
985 )
986
986
987
987
988 def ensurerequirementscompatible(ui, requirements):
988 def ensurerequirementscompatible(ui, requirements):
989 """Validates that a set of recognized requirements is mutually compatible.
989 """Validates that a set of recognized requirements is mutually compatible.
990
990
991 Some requirements may not be compatible with others or require
991 Some requirements may not be compatible with others or require
992 config options that aren't enabled. This function is called during
992 config options that aren't enabled. This function is called during
993 repository opening to ensure that the set of requirements needed
993 repository opening to ensure that the set of requirements needed
994 to open a repository is sane and compatible with config options.
994 to open a repository is sane and compatible with config options.
995
995
996 Extensions can monkeypatch this function to perform additional
996 Extensions can monkeypatch this function to perform additional
997 checking.
997 checking.
998
998
999 ``error.RepoError`` should be raised on failure.
999 ``error.RepoError`` should be raised on failure.
1000 """
1000 """
1001 if (
1001 if (
1002 requirementsmod.SPARSE_REQUIREMENT in requirements
1002 requirementsmod.SPARSE_REQUIREMENT in requirements
1003 and not sparse.enabled
1003 and not sparse.enabled
1004 ):
1004 ):
1005 raise error.RepoError(
1005 raise error.RepoError(
1006 _(
1006 _(
1007 b'repository is using sparse feature but '
1007 b'repository is using sparse feature but '
1008 b'sparse is not enabled; enable the '
1008 b'sparse is not enabled; enable the '
1009 b'"sparse" extensions to access'
1009 b'"sparse" extensions to access'
1010 )
1010 )
1011 )
1011 )
1012
1012
1013
1013
1014 def makestore(requirements, path, vfstype):
1014 def makestore(requirements, path, vfstype):
1015 """Construct a storage object for a repository."""
1015 """Construct a storage object for a repository."""
1016 if requirementsmod.STORE_REQUIREMENT in requirements:
1016 if requirementsmod.STORE_REQUIREMENT in requirements:
1017 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1017 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1018 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1018 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1019 return storemod.fncachestore(path, vfstype, dotencode)
1019 return storemod.fncachestore(path, vfstype, dotencode)
1020
1020
1021 return storemod.encodedstore(path, vfstype)
1021 return storemod.encodedstore(path, vfstype)
1022
1022
1023 return storemod.basicstore(path, vfstype)
1023 return storemod.basicstore(path, vfstype)
1024
1024
1025
1025
1026 def resolvestorevfsoptions(ui, requirements, features):
1026 def resolvestorevfsoptions(ui, requirements, features):
1027 """Resolve the options to pass to the store vfs opener.
1027 """Resolve the options to pass to the store vfs opener.
1028
1028
1029 The returned dict is used to influence behavior of the storage layer.
1029 The returned dict is used to influence behavior of the storage layer.
1030 """
1030 """
1031 options = {}
1031 options = {}
1032
1032
1033 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1033 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1034 options[b'treemanifest'] = True
1034 options[b'treemanifest'] = True
1035
1035
1036 # experimental config: format.manifestcachesize
1036 # experimental config: format.manifestcachesize
1037 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1037 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1038 if manifestcachesize is not None:
1038 if manifestcachesize is not None:
1039 options[b'manifestcachesize'] = manifestcachesize
1039 options[b'manifestcachesize'] = manifestcachesize
1040
1040
1041 # In the absence of another requirement superseding a revlog-related
1041 # In the absence of another requirement superseding a revlog-related
1042 # requirement, we have to assume the repo is using revlog version 0.
1042 # requirement, we have to assume the repo is using revlog version 0.
1043 # This revlog format is super old and we don't bother trying to parse
1043 # This revlog format is super old and we don't bother trying to parse
1044 # opener options for it because those options wouldn't do anything
1044 # opener options for it because those options wouldn't do anything
1045 # meaningful on such old repos.
1045 # meaningful on such old repos.
1046 if (
1046 if (
1047 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1047 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1048 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1048 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1049 ):
1049 ):
1050 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1050 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1051 else: # explicitly mark repo as using revlogv0
1051 else: # explicitly mark repo as using revlogv0
1052 options[b'revlogv0'] = True
1052 options[b'revlogv0'] = True
1053
1053
1054 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1054 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1055 options[b'copies-storage'] = b'changeset-sidedata'
1055 options[b'copies-storage'] = b'changeset-sidedata'
1056 else:
1056 else:
1057 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1057 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1058 copiesextramode = (b'changeset-only', b'compatibility')
1058 copiesextramode = (b'changeset-only', b'compatibility')
1059 if writecopiesto in copiesextramode:
1059 if writecopiesto in copiesextramode:
1060 options[b'copies-storage'] = b'extra'
1060 options[b'copies-storage'] = b'extra'
1061
1061
1062 return options
1062 return options
1063
1063
1064
1064
1065 def resolverevlogstorevfsoptions(ui, requirements, features):
1065 def resolverevlogstorevfsoptions(ui, requirements, features):
1066 """Resolve opener options specific to revlogs."""
1066 """Resolve opener options specific to revlogs."""
1067
1067
1068 options = {}
1068 options = {}
1069 options[b'flagprocessors'] = {}
1069 options[b'flagprocessors'] = {}
1070
1070
1071 feature_config = options[b'feature-config'] = revlog.FeatureConfig()
1071 feature_config = options[b'feature-config'] = revlog.FeatureConfig()
1072 data_config = options[b'data-config'] = revlog.DataConfig()
1072 data_config = options[b'data-config'] = revlog.DataConfig()
1073 delta_config = options[b'delta-config'] = revlog.DeltaConfig()
1073 delta_config = options[b'delta-config'] = revlog.DeltaConfig()
1074
1074
1075 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1075 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1076 options[b'revlogv1'] = True
1076 options[b'revlogv1'] = True
1077 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1077 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1078 options[b'revlogv2'] = True
1078 options[b'revlogv2'] = True
1079 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1079 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1080 options[b'changelogv2'] = True
1080 options[b'changelogv2'] = True
1081 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1081 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1082 options[b'changelogv2.compute-rank'] = cmp_rank
1082 options[b'changelogv2.compute-rank'] = cmp_rank
1083
1083
1084 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1084 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1085 options[b'generaldelta'] = True
1085 options[b'generaldelta'] = True
1086
1086
1087 # experimental config: format.chunkcachesize
1087 # experimental config: format.chunkcachesize
1088 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1088 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1089 if chunkcachesize is not None:
1089 if chunkcachesize is not None:
1090 data_config.chunk_cache_size = chunkcachesize
1090 data_config.chunk_cache_size = chunkcachesize
1091
1091
1092 memory_profile = scmutil.get_resource_profile(ui, b'memory')
1092 memory_profile = scmutil.get_resource_profile(ui, b'memory')
1093 if memory_profile >= scmutil.RESOURCE_MEDIUM:
1093 if memory_profile >= scmutil.RESOURCE_MEDIUM:
1094 data_config.uncompressed_cache_count = 10_000
1094 data_config.uncompressed_cache_count = 10_000
1095 data_config.uncompressed_cache_factor = 4
1095 data_config.uncompressed_cache_factor = 4
1096 if memory_profile >= scmutil.RESOURCE_HIGH:
1096 if memory_profile >= scmutil.RESOURCE_HIGH:
1097 data_config.uncompressed_cache_factor = 10
1097 data_config.uncompressed_cache_factor = 10
1098
1098
1099 delta_config.delta_both_parents = ui.configbool(
1099 delta_config.delta_both_parents = ui.configbool(
1100 b'storage', b'revlog.optimize-delta-parent-choice'
1100 b'storage', b'revlog.optimize-delta-parent-choice'
1101 )
1101 )
1102 delta_config.candidate_group_chunk_size = ui.configint(
1102 delta_config.candidate_group_chunk_size = ui.configint(
1103 b'storage',
1103 b'storage',
1104 b'revlog.delta-parent-search.candidate-group-chunk-size',
1104 b'revlog.delta-parent-search.candidate-group-chunk-size',
1105 )
1105 )
1106 delta_config.debug_delta = ui.configbool(b'debug', b'revlog.debug-delta')
1106 delta_config.debug_delta = ui.configbool(b'debug', b'revlog.debug-delta')
1107
1107
1108 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1108 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1109 options[b'issue6528.fix-incoming'] = issue6528
1109 options[b'issue6528.fix-incoming'] = issue6528
1110
1110
1111 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1111 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1112 lazydeltabase = False
1112 lazydeltabase = False
1113 if lazydelta:
1113 if lazydelta:
1114 lazydeltabase = ui.configbool(
1114 lazydeltabase = ui.configbool(
1115 b'storage', b'revlog.reuse-external-delta-parent'
1115 b'storage', b'revlog.reuse-external-delta-parent'
1116 )
1116 )
1117 if lazydeltabase is None:
1117 if lazydeltabase is None:
1118 lazydeltabase = not scmutil.gddeltaconfig(ui)
1118 lazydeltabase = not scmutil.gddeltaconfig(ui)
1119 delta_config.lazy_delta = lazydelta
1119 delta_config.lazy_delta = lazydelta
1120 delta_config.lazy_delta_base = lazydeltabase
1120 delta_config.lazy_delta_base = lazydeltabase
1121
1121
1122 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1122 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1123 if 0 <= chainspan:
1123 if 0 <= chainspan:
1124 delta_config.max_deltachain_span = chainspan
1124 delta_config.max_deltachain_span = chainspan
1125
1125
1126 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1126 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1127 if mmapindexthreshold is not None:
1127 if mmapindexthreshold is not None:
1128 data_config.mmap_index_threshold = mmapindexthreshold
1128 data_config.mmap_index_threshold = mmapindexthreshold
1129
1129
1130 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1130 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1131 srdensitythres = float(
1131 srdensitythres = float(
1132 ui.config(b'experimental', b'sparse-read.density-threshold')
1132 ui.config(b'experimental', b'sparse-read.density-threshold')
1133 )
1133 )
1134 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1134 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1135 data_config.with_sparse_read = withsparseread
1135 data_config.with_sparse_read = withsparseread
1136 data_config.sr_density_threshold = srdensitythres
1136 data_config.sr_density_threshold = srdensitythres
1137 data_config.sr_min_gap_size = srmingapsize
1137 data_config.sr_min_gap_size = srmingapsize
1138
1138
1139 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1139 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1140 delta_config.sparse_revlog = sparserevlog
1140 delta_config.sparse_revlog = sparserevlog
1141 if sparserevlog:
1141 if sparserevlog:
1142 options[b'generaldelta'] = True
1142 options[b'generaldelta'] = True
1143 data_config.with_sparse_read = True
1143 data_config.with_sparse_read = True
1144
1144
1145 maxchainlen = None
1145 maxchainlen = None
1146 if sparserevlog:
1146 if sparserevlog:
1147 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1147 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1148 # experimental config: format.maxchainlen
1148 # experimental config: format.maxchainlen
1149 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1149 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1150 if maxchainlen is not None:
1150 if maxchainlen is not None:
1151 delta_config.max_chain_len = maxchainlen
1151 delta_config.max_chain_len = maxchainlen
1152
1152
1153 for r in requirements:
1153 for r in requirements:
1154 # we allow multiple compression engine requirement to co-exist because
1154 # we allow multiple compression engine requirement to co-exist because
1155 # strickly speaking, revlog seems to support mixed compression style.
1155 # strickly speaking, revlog seems to support mixed compression style.
1156 #
1156 #
1157 # The compression used for new entries will be "the last one"
1157 # The compression used for new entries will be "the last one"
1158 prefix = r.startswith
1158 prefix = r.startswith
1159 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1159 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1160 feature_config.compression_engine = r.split(b'-', 2)[2]
1160 feature_config.compression_engine = r.split(b'-', 2)[2]
1161
1161
1162 zlib_level = ui.configint(b'storage', b'revlog.zlib.level')
1162 zlib_level = ui.configint(b'storage', b'revlog.zlib.level')
1163 if zlib_level is not None:
1163 if zlib_level is not None:
1164 if not (0 <= zlib_level <= 9):
1164 if not (0 <= zlib_level <= 9):
1165 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1165 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1166 raise error.Abort(msg % zlib_level)
1166 raise error.Abort(msg % zlib_level)
1167 feature_config.compression_engine_options[b'zlib.level'] = zlib_level
1167 feature_config.compression_engine_options[b'zlib.level'] = zlib_level
1168 zstd_level = ui.configint(b'storage', b'revlog.zstd.level')
1168 zstd_level = ui.configint(b'storage', b'revlog.zstd.level')
1169 if zstd_level is not None:
1169 if zstd_level is not None:
1170 if not (0 <= zstd_level <= 22):
1170 if not (0 <= zstd_level <= 22):
1171 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1171 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1172 raise error.Abort(msg % zstd_level)
1172 raise error.Abort(msg % zstd_level)
1173 feature_config.compression_engine_options[b'zstd.level'] = zstd_level
1173 feature_config.compression_engine_options[b'zstd.level'] = zstd_level
1174
1174
1175 if requirementsmod.NARROW_REQUIREMENT in requirements:
1175 if requirementsmod.NARROW_REQUIREMENT in requirements:
1176 feature_config.enable_ellipsis = True
1176 feature_config.enable_ellipsis = True
1177
1177
1178 if ui.configbool(b'experimental', b'rust.index'):
1178 if ui.configbool(b'experimental', b'rust.index'):
1179 options[b'rust.index'] = True
1179 options[b'rust.index'] = True
1180 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1180 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1181 slow_path = ui.config(
1181 slow_path = ui.config(
1182 b'storage', b'revlog.persistent-nodemap.slow-path'
1182 b'storage', b'revlog.persistent-nodemap.slow-path'
1183 )
1183 )
1184 if slow_path not in (b'allow', b'warn', b'abort'):
1184 if slow_path not in (b'allow', b'warn', b'abort'):
1185 default = ui.config_default(
1185 default = ui.config_default(
1186 b'storage', b'revlog.persistent-nodemap.slow-path'
1186 b'storage', b'revlog.persistent-nodemap.slow-path'
1187 )
1187 )
1188 msg = _(
1188 msg = _(
1189 b'unknown value for config '
1189 b'unknown value for config '
1190 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1190 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1191 )
1191 )
1192 ui.warn(msg % slow_path)
1192 ui.warn(msg % slow_path)
1193 if not ui.quiet:
1193 if not ui.quiet:
1194 ui.warn(_(b'falling back to default value: %s\n') % default)
1194 ui.warn(_(b'falling back to default value: %s\n') % default)
1195 slow_path = default
1195 slow_path = default
1196
1196
1197 msg = _(
1197 msg = _(
1198 b"accessing `persistent-nodemap` repository without associated "
1198 b"accessing `persistent-nodemap` repository without associated "
1199 b"fast implementation."
1199 b"fast implementation."
1200 )
1200 )
1201 hint = _(
1201 hint = _(
1202 b"check `hg help config.format.use-persistent-nodemap` "
1202 b"check `hg help config.format.use-persistent-nodemap` "
1203 b"for details"
1203 b"for details"
1204 )
1204 )
1205 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1205 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1206 if slow_path == b'warn':
1206 if slow_path == b'warn':
1207 msg = b"warning: " + msg + b'\n'
1207 msg = b"warning: " + msg + b'\n'
1208 ui.warn(msg)
1208 ui.warn(msg)
1209 if not ui.quiet:
1209 if not ui.quiet:
1210 hint = b'(' + hint + b')\n'
1210 hint = b'(' + hint + b')\n'
1211 ui.warn(hint)
1211 ui.warn(hint)
1212 if slow_path == b'abort':
1212 if slow_path == b'abort':
1213 raise error.Abort(msg, hint=hint)
1213 raise error.Abort(msg, hint=hint)
1214 options[b'persistent-nodemap'] = True
1214 options[b'persistent-nodemap'] = True
1215 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1215 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1216 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1216 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1217 if slow_path not in (b'allow', b'warn', b'abort'):
1217 if slow_path not in (b'allow', b'warn', b'abort'):
1218 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1218 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1219 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1219 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1220 ui.warn(msg % slow_path)
1220 ui.warn(msg % slow_path)
1221 if not ui.quiet:
1221 if not ui.quiet:
1222 ui.warn(_(b'falling back to default value: %s\n') % default)
1222 ui.warn(_(b'falling back to default value: %s\n') % default)
1223 slow_path = default
1223 slow_path = default
1224
1224
1225 msg = _(
1225 msg = _(
1226 b"accessing `dirstate-v2` repository without associated "
1226 b"accessing `dirstate-v2` repository without associated "
1227 b"fast implementation."
1227 b"fast implementation."
1228 )
1228 )
1229 hint = _(
1229 hint = _(
1230 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1230 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1231 )
1231 )
1232 if not dirstate.HAS_FAST_DIRSTATE_V2:
1232 if not dirstate.HAS_FAST_DIRSTATE_V2:
1233 if slow_path == b'warn':
1233 if slow_path == b'warn':
1234 msg = b"warning: " + msg + b'\n'
1234 msg = b"warning: " + msg + b'\n'
1235 ui.warn(msg)
1235 ui.warn(msg)
1236 if not ui.quiet:
1236 if not ui.quiet:
1237 hint = b'(' + hint + b')\n'
1237 hint = b'(' + hint + b')\n'
1238 ui.warn(hint)
1238 ui.warn(hint)
1239 if slow_path == b'abort':
1239 if slow_path == b'abort':
1240 raise error.Abort(msg, hint=hint)
1240 raise error.Abort(msg, hint=hint)
1241 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1241 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1242 options[b'persistent-nodemap.mmap'] = True
1242 options[b'persistent-nodemap.mmap'] = True
1243 if ui.configbool(b'devel', b'persistent-nodemap'):
1243 if ui.configbool(b'devel', b'persistent-nodemap'):
1244 options[b'devel-force-nodemap'] = True
1244 options[b'devel-force-nodemap'] = True
1245
1245
1246 return options
1246 return options
1247
1247
1248
1248
1249 def makemain(**kwargs):
1249 def makemain(**kwargs):
1250 """Produce a type conforming to ``ilocalrepositorymain``."""
1250 """Produce a type conforming to ``ilocalrepositorymain``."""
1251 return localrepository
1251 return localrepository
1252
1252
1253
1253
1254 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1254 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1255 class revlogfilestorage:
1255 class revlogfilestorage:
1256 """File storage when using revlogs."""
1256 """File storage when using revlogs."""
1257
1257
1258 def file(self, path):
1258 def file(self, path):
1259 if path.startswith(b'/'):
1259 if path.startswith(b'/'):
1260 path = path[1:]
1260 path = path[1:]
1261
1261
1262 try_split = (
1262 try_split = (
1263 self.currenttransaction() is not None
1263 self.currenttransaction() is not None
1264 or txnutil.mayhavepending(self.root)
1264 or txnutil.mayhavepending(self.root)
1265 )
1265 )
1266
1266
1267 return filelog.filelog(self.svfs, path, try_split=try_split)
1267 return filelog.filelog(self.svfs, path, try_split=try_split)
1268
1268
1269
1269
1270 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1270 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1271 class revlognarrowfilestorage:
1271 class revlognarrowfilestorage:
1272 """File storage when using revlogs and narrow files."""
1272 """File storage when using revlogs and narrow files."""
1273
1273
1274 def file(self, path):
1274 def file(self, path):
1275 if path.startswith(b'/'):
1275 if path.startswith(b'/'):
1276 path = path[1:]
1276 path = path[1:]
1277
1277
1278 try_split = (
1278 try_split = (
1279 self.currenttransaction() is not None
1279 self.currenttransaction() is not None
1280 or txnutil.mayhavepending(self.root)
1280 or txnutil.mayhavepending(self.root)
1281 )
1281 )
1282 return filelog.narrowfilelog(
1282 return filelog.narrowfilelog(
1283 self.svfs, path, self._storenarrowmatch, try_split=try_split
1283 self.svfs, path, self._storenarrowmatch, try_split=try_split
1284 )
1284 )
1285
1285
1286
1286
1287 def makefilestorage(requirements, features, **kwargs):
1287 def makefilestorage(requirements, features, **kwargs):
1288 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1288 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1289 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1289 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1290 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1290 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1291
1291
1292 if requirementsmod.NARROW_REQUIREMENT in requirements:
1292 if requirementsmod.NARROW_REQUIREMENT in requirements:
1293 return revlognarrowfilestorage
1293 return revlognarrowfilestorage
1294 else:
1294 else:
1295 return revlogfilestorage
1295 return revlogfilestorage
1296
1296
1297
1297
1298 # List of repository interfaces and factory functions for them. Each
1298 # List of repository interfaces and factory functions for them. Each
1299 # will be called in order during ``makelocalrepository()`` to iteratively
1299 # will be called in order during ``makelocalrepository()`` to iteratively
1300 # derive the final type for a local repository instance. We capture the
1300 # derive the final type for a local repository instance. We capture the
1301 # function as a lambda so we don't hold a reference and the module-level
1301 # function as a lambda so we don't hold a reference and the module-level
1302 # functions can be wrapped.
1302 # functions can be wrapped.
1303 REPO_INTERFACES = [
1303 REPO_INTERFACES = [
1304 (repository.ilocalrepositorymain, lambda: makemain),
1304 (repository.ilocalrepositorymain, lambda: makemain),
1305 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1305 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1306 ]
1306 ]
1307
1307
1308
1308
1309 @interfaceutil.implementer(repository.ilocalrepositorymain)
1309 @interfaceutil.implementer(repository.ilocalrepositorymain)
1310 class localrepository:
1310 class localrepository:
1311 """Main class for representing local repositories.
1311 """Main class for representing local repositories.
1312
1312
1313 All local repositories are instances of this class.
1313 All local repositories are instances of this class.
1314
1314
1315 Constructed on its own, instances of this class are not usable as
1315 Constructed on its own, instances of this class are not usable as
1316 repository objects. To obtain a usable repository object, call
1316 repository objects. To obtain a usable repository object, call
1317 ``hg.repository()``, ``localrepo.instance()``, or
1317 ``hg.repository()``, ``localrepo.instance()``, or
1318 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1318 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1319 ``instance()`` adds support for creating new repositories.
1319 ``instance()`` adds support for creating new repositories.
1320 ``hg.repository()`` adds more extension integration, including calling
1320 ``hg.repository()`` adds more extension integration, including calling
1321 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1321 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1322 used.
1322 used.
1323 """
1323 """
1324
1324
1325 _basesupported = {
1325 _basesupported = {
1326 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1326 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1327 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1327 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1328 requirementsmod.CHANGELOGV2_REQUIREMENT,
1328 requirementsmod.CHANGELOGV2_REQUIREMENT,
1329 requirementsmod.COPIESSDC_REQUIREMENT,
1329 requirementsmod.COPIESSDC_REQUIREMENT,
1330 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1330 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1331 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1331 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1332 requirementsmod.DOTENCODE_REQUIREMENT,
1332 requirementsmod.DOTENCODE_REQUIREMENT,
1333 requirementsmod.FNCACHE_REQUIREMENT,
1333 requirementsmod.FNCACHE_REQUIREMENT,
1334 requirementsmod.GENERALDELTA_REQUIREMENT,
1334 requirementsmod.GENERALDELTA_REQUIREMENT,
1335 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1335 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1336 requirementsmod.NODEMAP_REQUIREMENT,
1336 requirementsmod.NODEMAP_REQUIREMENT,
1337 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1337 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1338 requirementsmod.REVLOGV1_REQUIREMENT,
1338 requirementsmod.REVLOGV1_REQUIREMENT,
1339 requirementsmod.REVLOGV2_REQUIREMENT,
1339 requirementsmod.REVLOGV2_REQUIREMENT,
1340 requirementsmod.SHARED_REQUIREMENT,
1340 requirementsmod.SHARED_REQUIREMENT,
1341 requirementsmod.SHARESAFE_REQUIREMENT,
1341 requirementsmod.SHARESAFE_REQUIREMENT,
1342 requirementsmod.SPARSE_REQUIREMENT,
1342 requirementsmod.SPARSE_REQUIREMENT,
1343 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1343 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1344 requirementsmod.STORE_REQUIREMENT,
1344 requirementsmod.STORE_REQUIREMENT,
1345 requirementsmod.TREEMANIFEST_REQUIREMENT,
1345 requirementsmod.TREEMANIFEST_REQUIREMENT,
1346 }
1346 }
1347
1347
1348 # list of prefix for file which can be written without 'wlock'
1348 # list of prefix for file which can be written without 'wlock'
1349 # Extensions should extend this list when needed
1349 # Extensions should extend this list when needed
1350 _wlockfreeprefix = {
1350 _wlockfreeprefix = {
1351 # We migh consider requiring 'wlock' for the next
1351 # We migh consider requiring 'wlock' for the next
1352 # two, but pretty much all the existing code assume
1352 # two, but pretty much all the existing code assume
1353 # wlock is not needed so we keep them excluded for
1353 # wlock is not needed so we keep them excluded for
1354 # now.
1354 # now.
1355 b'hgrc',
1355 b'hgrc',
1356 b'requires',
1356 b'requires',
1357 # XXX cache is a complicatged business someone
1357 # XXX cache is a complicatged business someone
1358 # should investigate this in depth at some point
1358 # should investigate this in depth at some point
1359 b'cache/',
1359 b'cache/',
1360 # XXX bisect was still a bit too messy at the time
1360 # XXX bisect was still a bit too messy at the time
1361 # this changeset was introduced. Someone should fix
1361 # this changeset was introduced. Someone should fix
1362 # the remainig bit and drop this line
1362 # the remainig bit and drop this line
1363 b'bisect.state',
1363 b'bisect.state',
1364 }
1364 }
1365
1365
1366 def __init__(
1366 def __init__(
1367 self,
1367 self,
1368 baseui,
1368 baseui,
1369 ui,
1369 ui,
1370 origroot: bytes,
1370 origroot: bytes,
1371 wdirvfs: vfsmod.vfs,
1371 wdirvfs: vfsmod.vfs,
1372 hgvfs: vfsmod.vfs,
1372 hgvfs: vfsmod.vfs,
1373 requirements,
1373 requirements,
1374 supportedrequirements,
1374 supportedrequirements,
1375 sharedpath: bytes,
1375 sharedpath: bytes,
1376 store,
1376 store,
1377 cachevfs: vfsmod.vfs,
1377 cachevfs: vfsmod.vfs,
1378 wcachevfs: vfsmod.vfs,
1378 wcachevfs: vfsmod.vfs,
1379 features,
1379 features,
1380 intents=None,
1380 intents=None,
1381 ):
1381 ):
1382 """Create a new local repository instance.
1382 """Create a new local repository instance.
1383
1383
1384 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1384 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1385 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1385 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1386 object.
1386 object.
1387
1387
1388 Arguments:
1388 Arguments:
1389
1389
1390 baseui
1390 baseui
1391 ``ui.ui`` instance that ``ui`` argument was based off of.
1391 ``ui.ui`` instance that ``ui`` argument was based off of.
1392
1392
1393 ui
1393 ui
1394 ``ui.ui`` instance for use by the repository.
1394 ``ui.ui`` instance for use by the repository.
1395
1395
1396 origroot
1396 origroot
1397 ``bytes`` path to working directory root of this repository.
1397 ``bytes`` path to working directory root of this repository.
1398
1398
1399 wdirvfs
1399 wdirvfs
1400 ``vfs.vfs`` rooted at the working directory.
1400 ``vfs.vfs`` rooted at the working directory.
1401
1401
1402 hgvfs
1402 hgvfs
1403 ``vfs.vfs`` rooted at .hg/
1403 ``vfs.vfs`` rooted at .hg/
1404
1404
1405 requirements
1405 requirements
1406 ``set`` of bytestrings representing repository opening requirements.
1406 ``set`` of bytestrings representing repository opening requirements.
1407
1407
1408 supportedrequirements
1408 supportedrequirements
1409 ``set`` of bytestrings representing repository requirements that we
1409 ``set`` of bytestrings representing repository requirements that we
1410 know how to open. May be a supetset of ``requirements``.
1410 know how to open. May be a supetset of ``requirements``.
1411
1411
1412 sharedpath
1412 sharedpath
1413 ``bytes`` Defining path to storage base directory. Points to a
1413 ``bytes`` Defining path to storage base directory. Points to a
1414 ``.hg/`` directory somewhere.
1414 ``.hg/`` directory somewhere.
1415
1415
1416 store
1416 store
1417 ``store.basicstore`` (or derived) instance providing access to
1417 ``store.basicstore`` (or derived) instance providing access to
1418 versioned storage.
1418 versioned storage.
1419
1419
1420 cachevfs
1420 cachevfs
1421 ``vfs.vfs`` used for cache files.
1421 ``vfs.vfs`` used for cache files.
1422
1422
1423 wcachevfs
1423 wcachevfs
1424 ``vfs.vfs`` used for cache files related to the working copy.
1424 ``vfs.vfs`` used for cache files related to the working copy.
1425
1425
1426 features
1426 features
1427 ``set`` of bytestrings defining features/capabilities of this
1427 ``set`` of bytestrings defining features/capabilities of this
1428 instance.
1428 instance.
1429
1429
1430 intents
1430 intents
1431 ``set`` of system strings indicating what this repo will be used
1431 ``set`` of system strings indicating what this repo will be used
1432 for.
1432 for.
1433 """
1433 """
1434 self.baseui = baseui
1434 self.baseui = baseui
1435 self.ui = ui
1435 self.ui = ui
1436 self.origroot = origroot
1436 self.origroot = origroot
1437 # vfs rooted at working directory.
1437 # vfs rooted at working directory.
1438 self.wvfs = wdirvfs
1438 self.wvfs = wdirvfs
1439 self.root = wdirvfs.base
1439 self.root = wdirvfs.base
1440 # vfs rooted at .hg/. Used to access most non-store paths.
1440 # vfs rooted at .hg/. Used to access most non-store paths.
1441 self.vfs = hgvfs
1441 self.vfs = hgvfs
1442 self.path = hgvfs.base
1442 self.path = hgvfs.base
1443 self.requirements = requirements
1443 self.requirements = requirements
1444 self.nodeconstants = sha1nodeconstants
1444 self.nodeconstants = sha1nodeconstants
1445 self.nullid = self.nodeconstants.nullid
1445 self.nullid = self.nodeconstants.nullid
1446 self.supported = supportedrequirements
1446 self.supported = supportedrequirements
1447 self.sharedpath = sharedpath
1447 self.sharedpath = sharedpath
1448 self.store = store
1448 self.store = store
1449 self.cachevfs = cachevfs
1449 self.cachevfs = cachevfs
1450 self.wcachevfs = wcachevfs
1450 self.wcachevfs = wcachevfs
1451 self.features = features
1451 self.features = features
1452
1452
1453 self.filtername = None
1453 self.filtername = None
1454
1454
1455 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1455 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1456 b'devel', b'check-locks'
1456 b'devel', b'check-locks'
1457 ):
1457 ):
1458 self.vfs.audit = self._getvfsward(self.vfs.audit)
1458 self.vfs.audit = self._getvfsward(self.vfs.audit)
1459 # A list of callback to shape the phase if no data were found.
1459 # A list of callback to shape the phase if no data were found.
1460 # Callback are in the form: func(repo, roots) --> processed root.
1460 # Callback are in the form: func(repo, roots) --> processed root.
1461 # This list it to be filled by extension during repo setup
1461 # This list it to be filled by extension during repo setup
1462 self._phasedefaults = []
1462 self._phasedefaults = []
1463
1463
1464 color.setup(self.ui)
1464 color.setup(self.ui)
1465
1465
1466 self.spath = self.store.path
1466 self.spath = self.store.path
1467 self.svfs = self.store.vfs
1467 self.svfs = self.store.vfs
1468 self.sjoin = self.store.join
1468 self.sjoin = self.store.join
1469 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1469 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1470 b'devel', b'check-locks'
1470 b'devel', b'check-locks'
1471 ):
1471 ):
1472 if hasattr(self.svfs, 'vfs'): # this is filtervfs
1472 if hasattr(self.svfs, 'vfs'): # this is filtervfs
1473 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1473 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1474 else: # standard vfs
1474 else: # standard vfs
1475 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1475 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1476
1476
1477 self._dirstatevalidatewarned = False
1477 self._dirstatevalidatewarned = False
1478
1478
1479 self._branchcaches = branchmap.BranchMapCache()
1479 self._branchcaches = branchmap.BranchMapCache()
1480 self._revbranchcache = None
1480 self._revbranchcache = None
1481 self._filterpats = {}
1481 self._filterpats = {}
1482 self._datafilters = {}
1482 self._datafilters = {}
1483 self._transref = self._lockref = self._wlockref = None
1483 self._transref = self._lockref = self._wlockref = None
1484
1484
1485 # A cache for various files under .hg/ that tracks file changes,
1485 # A cache for various files under .hg/ that tracks file changes,
1486 # (used by the filecache decorator)
1486 # (used by the filecache decorator)
1487 #
1487 #
1488 # Maps a property name to its util.filecacheentry
1488 # Maps a property name to its util.filecacheentry
1489 self._filecache = {}
1489 self._filecache = {}
1490
1490
1491 # hold sets of revision to be filtered
1491 # hold sets of revision to be filtered
1492 # should be cleared when something might have changed the filter value:
1492 # should be cleared when something might have changed the filter value:
1493 # - new changesets,
1493 # - new changesets,
1494 # - phase change,
1494 # - phase change,
1495 # - new obsolescence marker,
1495 # - new obsolescence marker,
1496 # - working directory parent change,
1496 # - working directory parent change,
1497 # - bookmark changes
1497 # - bookmark changes
1498 self.filteredrevcache = {}
1498 self.filteredrevcache = {}
1499
1499
1500 self._dirstate = None
1500 self._dirstate = None
1501 # post-dirstate-status hooks
1501 # post-dirstate-status hooks
1502 self._postdsstatus = []
1502 self._postdsstatus = []
1503
1503
1504 self._pending_narrow_pats = None
1504 self._pending_narrow_pats = None
1505 self._pending_narrow_pats_dirstate = None
1505 self._pending_narrow_pats_dirstate = None
1506
1506
1507 # generic mapping between names and nodes
1507 # generic mapping between names and nodes
1508 self.names = namespaces.namespaces()
1508 self.names = namespaces.namespaces()
1509
1509
1510 # Key to signature value.
1510 # Key to signature value.
1511 self._sparsesignaturecache = {}
1511 self._sparsesignaturecache = {}
1512 # Signature to cached matcher instance.
1512 # Signature to cached matcher instance.
1513 self._sparsematchercache = {}
1513 self._sparsematchercache = {}
1514
1514
1515 self._extrafilterid = repoview.extrafilter(ui)
1515 self._extrafilterid = repoview.extrafilter(ui)
1516
1516
1517 self.filecopiesmode = None
1517 self.filecopiesmode = None
1518 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1518 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1519 self.filecopiesmode = b'changeset-sidedata'
1519 self.filecopiesmode = b'changeset-sidedata'
1520
1520
1521 self._wanted_sidedata = set()
1521 self._wanted_sidedata = set()
1522 self._sidedata_computers = {}
1522 self._sidedata_computers = {}
1523 sidedatamod.set_sidedata_spec_for_repo(self)
1523 sidedatamod.set_sidedata_spec_for_repo(self)
1524
1524
1525 def _getvfsward(self, origfunc):
1525 def _getvfsward(self, origfunc):
1526 """build a ward for self.vfs"""
1526 """build a ward for self.vfs"""
1527 rref = weakref.ref(self)
1527 rref = weakref.ref(self)
1528
1528
1529 def checkvfs(path, mode=None):
1529 def checkvfs(path, mode=None):
1530 ret = origfunc(path, mode=mode)
1530 ret = origfunc(path, mode=mode)
1531 repo = rref()
1531 repo = rref()
1532 if (
1532 if (
1533 repo is None
1533 repo is None
1534 or not hasattr(repo, '_wlockref')
1534 or not hasattr(repo, '_wlockref')
1535 or not hasattr(repo, '_lockref')
1535 or not hasattr(repo, '_lockref')
1536 ):
1536 ):
1537 return
1537 return
1538 if mode in (None, b'r', b'rb'):
1538 if mode in (None, b'r', b'rb'):
1539 return
1539 return
1540 if path.startswith(repo.path):
1540 if path.startswith(repo.path):
1541 # truncate name relative to the repository (.hg)
1541 # truncate name relative to the repository (.hg)
1542 path = path[len(repo.path) + 1 :]
1542 path = path[len(repo.path) + 1 :]
1543 if path.startswith(b'cache/'):
1543 if path.startswith(b'cache/'):
1544 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1544 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1545 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1545 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1546 # path prefixes covered by 'lock'
1546 # path prefixes covered by 'lock'
1547 vfs_path_prefixes = (
1547 vfs_path_prefixes = (
1548 b'journal.',
1548 b'journal.',
1549 b'undo.',
1549 b'undo.',
1550 b'strip-backup/',
1550 b'strip-backup/',
1551 b'cache/',
1551 b'cache/',
1552 )
1552 )
1553 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1553 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1554 if repo._currentlock(repo._lockref) is None:
1554 if repo._currentlock(repo._lockref) is None:
1555 repo.ui.develwarn(
1555 repo.ui.develwarn(
1556 b'write with no lock: "%s"' % path,
1556 b'write with no lock: "%s"' % path,
1557 stacklevel=3,
1557 stacklevel=3,
1558 config=b'check-locks',
1558 config=b'check-locks',
1559 )
1559 )
1560 elif repo._currentlock(repo._wlockref) is None:
1560 elif repo._currentlock(repo._wlockref) is None:
1561 # rest of vfs files are covered by 'wlock'
1561 # rest of vfs files are covered by 'wlock'
1562 #
1562 #
1563 # exclude special files
1563 # exclude special files
1564 for prefix in self._wlockfreeprefix:
1564 for prefix in self._wlockfreeprefix:
1565 if path.startswith(prefix):
1565 if path.startswith(prefix):
1566 return
1566 return
1567 repo.ui.develwarn(
1567 repo.ui.develwarn(
1568 b'write with no wlock: "%s"' % path,
1568 b'write with no wlock: "%s"' % path,
1569 stacklevel=3,
1569 stacklevel=3,
1570 config=b'check-locks',
1570 config=b'check-locks',
1571 )
1571 )
1572 return ret
1572 return ret
1573
1573
1574 return checkvfs
1574 return checkvfs
1575
1575
1576 def _getsvfsward(self, origfunc):
1576 def _getsvfsward(self, origfunc):
1577 """build a ward for self.svfs"""
1577 """build a ward for self.svfs"""
1578 rref = weakref.ref(self)
1578 rref = weakref.ref(self)
1579
1579
1580 def checksvfs(path, mode=None):
1580 def checksvfs(path, mode=None):
1581 ret = origfunc(path, mode=mode)
1581 ret = origfunc(path, mode=mode)
1582 repo = rref()
1582 repo = rref()
1583 if repo is None or not hasattr(repo, '_lockref'):
1583 if repo is None or not hasattr(repo, '_lockref'):
1584 return
1584 return
1585 if mode in (None, b'r', b'rb'):
1585 if mode in (None, b'r', b'rb'):
1586 return
1586 return
1587 if path.startswith(repo.sharedpath):
1587 if path.startswith(repo.sharedpath):
1588 # truncate name relative to the repository (.hg)
1588 # truncate name relative to the repository (.hg)
1589 path = path[len(repo.sharedpath) + 1 :]
1589 path = path[len(repo.sharedpath) + 1 :]
1590 if repo._currentlock(repo._lockref) is None:
1590 if repo._currentlock(repo._lockref) is None:
1591 repo.ui.develwarn(
1591 repo.ui.develwarn(
1592 b'write with no lock: "%s"' % path, stacklevel=4
1592 b'write with no lock: "%s"' % path, stacklevel=4
1593 )
1593 )
1594 return ret
1594 return ret
1595
1595
1596 return checksvfs
1596 return checksvfs
1597
1597
1598 @property
1598 @property
1599 def vfs_map(self):
1599 def vfs_map(self):
1600 return {
1600 return {
1601 b'': self.svfs,
1601 b'': self.svfs,
1602 b'plain': self.vfs,
1602 b'plain': self.vfs,
1603 b'store': self.svfs,
1603 b'store': self.svfs,
1604 }
1604 }
1605
1605
1606 def close(self):
1606 def close(self):
1607 self._writecaches()
1607 self._writecaches()
1608
1608
1609 def _writecaches(self):
1609 def _writecaches(self):
1610 if self._revbranchcache:
1610 if self._revbranchcache:
1611 self._revbranchcache.write()
1611 self._revbranchcache.write()
1612
1612
1613 def _restrictcapabilities(self, caps):
1613 def _restrictcapabilities(self, caps):
1614 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1614 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1615 caps = set(caps)
1615 caps = set(caps)
1616 capsblob = bundle2.encodecaps(
1616 capsblob = bundle2.encodecaps(
1617 bundle2.getrepocaps(self, role=b'client')
1617 bundle2.getrepocaps(self, role=b'client')
1618 )
1618 )
1619 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1619 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1620 if self.ui.configbool(b'experimental', b'narrow'):
1620 if self.ui.configbool(b'experimental', b'narrow'):
1621 caps.add(wireprototypes.NARROWCAP)
1621 caps.add(wireprototypes.NARROWCAP)
1622 return caps
1622 return caps
1623
1623
1624 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1624 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1625 # self -> auditor -> self._checknested -> self
1625 # self -> auditor -> self._checknested -> self
1626
1626
1627 @property
1627 @property
1628 def auditor(self):
1628 def auditor(self):
1629 # This is only used by context.workingctx.match in order to
1629 # This is only used by context.workingctx.match in order to
1630 # detect files in subrepos.
1630 # detect files in subrepos.
1631 return pathutil.pathauditor(self.root, callback=self._checknested)
1631 return pathutil.pathauditor(self.root, callback=self._checknested)
1632
1632
1633 @property
1633 @property
1634 def nofsauditor(self):
1634 def nofsauditor(self):
1635 # This is only used by context.basectx.match in order to detect
1635 # This is only used by context.basectx.match in order to detect
1636 # files in subrepos.
1636 # files in subrepos.
1637 return pathutil.pathauditor(
1637 return pathutil.pathauditor(
1638 self.root, callback=self._checknested, realfs=False, cached=True
1638 self.root, callback=self._checknested, realfs=False, cached=True
1639 )
1639 )
1640
1640
1641 def _checknested(self, path):
1641 def _checknested(self, path):
1642 """Determine if path is a legal nested repository."""
1642 """Determine if path is a legal nested repository."""
1643 if not path.startswith(self.root):
1643 if not path.startswith(self.root):
1644 return False
1644 return False
1645 subpath = path[len(self.root) + 1 :]
1645 subpath = path[len(self.root) + 1 :]
1646 normsubpath = util.pconvert(subpath)
1646 normsubpath = util.pconvert(subpath)
1647
1647
1648 # XXX: Checking against the current working copy is wrong in
1648 # XXX: Checking against the current working copy is wrong in
1649 # the sense that it can reject things like
1649 # the sense that it can reject things like
1650 #
1650 #
1651 # $ hg cat -r 10 sub/x.txt
1651 # $ hg cat -r 10 sub/x.txt
1652 #
1652 #
1653 # if sub/ is no longer a subrepository in the working copy
1653 # if sub/ is no longer a subrepository in the working copy
1654 # parent revision.
1654 # parent revision.
1655 #
1655 #
1656 # However, it can of course also allow things that would have
1656 # However, it can of course also allow things that would have
1657 # been rejected before, such as the above cat command if sub/
1657 # been rejected before, such as the above cat command if sub/
1658 # is a subrepository now, but was a normal directory before.
1658 # is a subrepository now, but was a normal directory before.
1659 # The old path auditor would have rejected by mistake since it
1659 # The old path auditor would have rejected by mistake since it
1660 # panics when it sees sub/.hg/.
1660 # panics when it sees sub/.hg/.
1661 #
1661 #
1662 # All in all, checking against the working copy seems sensible
1662 # All in all, checking against the working copy seems sensible
1663 # since we want to prevent access to nested repositories on
1663 # since we want to prevent access to nested repositories on
1664 # the filesystem *now*.
1664 # the filesystem *now*.
1665 ctx = self[None]
1665 ctx = self[None]
1666 parts = util.splitpath(subpath)
1666 parts = util.splitpath(subpath)
1667 while parts:
1667 while parts:
1668 prefix = b'/'.join(parts)
1668 prefix = b'/'.join(parts)
1669 if prefix in ctx.substate:
1669 if prefix in ctx.substate:
1670 if prefix == normsubpath:
1670 if prefix == normsubpath:
1671 return True
1671 return True
1672 else:
1672 else:
1673 sub = ctx.sub(prefix)
1673 sub = ctx.sub(prefix)
1674 return sub.checknested(subpath[len(prefix) + 1 :])
1674 return sub.checknested(subpath[len(prefix) + 1 :])
1675 else:
1675 else:
1676 parts.pop()
1676 parts.pop()
1677 return False
1677 return False
1678
1678
1679 def peer(self, path=None, remotehidden=False):
1679 def peer(self, path=None, remotehidden=False):
1680 return localpeer(
1680 return localpeer(
1681 self, path=path, remotehidden=remotehidden
1681 self, path=path, remotehidden=remotehidden
1682 ) # not cached to avoid reference cycle
1682 ) # not cached to avoid reference cycle
1683
1683
1684 def unfiltered(self):
1684 def unfiltered(self):
1685 """Return unfiltered version of the repository
1685 """Return unfiltered version of the repository
1686
1686
1687 Intended to be overwritten by filtered repo."""
1687 Intended to be overwritten by filtered repo."""
1688 return self
1688 return self
1689
1689
1690 def filtered(self, name, visibilityexceptions=None):
1690 def filtered(self, name, visibilityexceptions=None):
1691 """Return a filtered version of a repository
1691 """Return a filtered version of a repository
1692
1692
1693 The `name` parameter is the identifier of the requested view. This
1693 The `name` parameter is the identifier of the requested view. This
1694 will return a repoview object set "exactly" to the specified view.
1694 will return a repoview object set "exactly" to the specified view.
1695
1695
1696 This function does not apply recursive filtering to a repository. For
1696 This function does not apply recursive filtering to a repository. For
1697 example calling `repo.filtered("served")` will return a repoview using
1697 example calling `repo.filtered("served")` will return a repoview using
1698 the "served" view, regardless of the initial view used by `repo`.
1698 the "served" view, regardless of the initial view used by `repo`.
1699
1699
1700 In other word, there is always only one level of `repoview` "filtering".
1700 In other word, there is always only one level of `repoview` "filtering".
1701 """
1701 """
1702 if self._extrafilterid is not None and b'%' not in name:
1702 if self._extrafilterid is not None and b'%' not in name:
1703 name = name + b'%' + self._extrafilterid
1703 name = name + b'%' + self._extrafilterid
1704
1704
1705 cls = repoview.newtype(self.unfiltered().__class__)
1705 cls = repoview.newtype(self.unfiltered().__class__)
1706 return cls(self, name, visibilityexceptions)
1706 return cls(self, name, visibilityexceptions)
1707
1707
1708 @mixedrepostorecache(
1708 @mixedrepostorecache(
1709 (b'bookmarks', b'plain'),
1709 (b'bookmarks', b'plain'),
1710 (b'bookmarks.current', b'plain'),
1710 (b'bookmarks.current', b'plain'),
1711 (b'bookmarks', b''),
1711 (b'bookmarks', b''),
1712 (b'00changelog.i', b''),
1712 (b'00changelog.i', b''),
1713 )
1713 )
1714 def _bookmarks(self):
1714 def _bookmarks(self):
1715 # Since the multiple files involved in the transaction cannot be
1715 # Since the multiple files involved in the transaction cannot be
1716 # written atomically (with current repository format), there is a race
1716 # written atomically (with current repository format), there is a race
1717 # condition here.
1717 # condition here.
1718 #
1718 #
1719 # 1) changelog content A is read
1719 # 1) changelog content A is read
1720 # 2) outside transaction update changelog to content B
1720 # 2) outside transaction update changelog to content B
1721 # 3) outside transaction update bookmark file referring to content B
1721 # 3) outside transaction update bookmark file referring to content B
1722 # 4) bookmarks file content is read and filtered against changelog-A
1722 # 4) bookmarks file content is read and filtered against changelog-A
1723 #
1723 #
1724 # When this happens, bookmarks against nodes missing from A are dropped.
1724 # When this happens, bookmarks against nodes missing from A are dropped.
1725 #
1725 #
1726 # Having this happening during read is not great, but it become worse
1726 # Having this happening during read is not great, but it become worse
1727 # when this happen during write because the bookmarks to the "unknown"
1727 # when this happen during write because the bookmarks to the "unknown"
1728 # nodes will be dropped for good. However, writes happen within locks.
1728 # nodes will be dropped for good. However, writes happen within locks.
1729 # This locking makes it possible to have a race free consistent read.
1729 # This locking makes it possible to have a race free consistent read.
1730 # For this purpose data read from disc before locking are
1730 # For this purpose data read from disc before locking are
1731 # "invalidated" right after the locks are taken. This invalidations are
1731 # "invalidated" right after the locks are taken. This invalidations are
1732 # "light", the `filecache` mechanism keep the data in memory and will
1732 # "light", the `filecache` mechanism keep the data in memory and will
1733 # reuse them if the underlying files did not changed. Not parsing the
1733 # reuse them if the underlying files did not changed. Not parsing the
1734 # same data multiple times helps performances.
1734 # same data multiple times helps performances.
1735 #
1735 #
1736 # Unfortunately in the case describe above, the files tracked by the
1736 # Unfortunately in the case describe above, the files tracked by the
1737 # bookmarks file cache might not have changed, but the in-memory
1737 # bookmarks file cache might not have changed, but the in-memory
1738 # content is still "wrong" because we used an older changelog content
1738 # content is still "wrong" because we used an older changelog content
1739 # to process the on-disk data. So after locking, the changelog would be
1739 # to process the on-disk data. So after locking, the changelog would be
1740 # refreshed but `_bookmarks` would be preserved.
1740 # refreshed but `_bookmarks` would be preserved.
1741 # Adding `00changelog.i` to the list of tracked file is not
1741 # Adding `00changelog.i` to the list of tracked file is not
1742 # enough, because at the time we build the content for `_bookmarks` in
1742 # enough, because at the time we build the content for `_bookmarks` in
1743 # (4), the changelog file has already diverged from the content used
1743 # (4), the changelog file has already diverged from the content used
1744 # for loading `changelog` in (1)
1744 # for loading `changelog` in (1)
1745 #
1745 #
1746 # To prevent the issue, we force the changelog to be explicitly
1746 # To prevent the issue, we force the changelog to be explicitly
1747 # reloaded while computing `_bookmarks`. The data race can still happen
1747 # reloaded while computing `_bookmarks`. The data race can still happen
1748 # without the lock (with a narrower window), but it would no longer go
1748 # without the lock (with a narrower window), but it would no longer go
1749 # undetected during the lock time refresh.
1749 # undetected during the lock time refresh.
1750 #
1750 #
1751 # The new schedule is as follow
1751 # The new schedule is as follow
1752 #
1752 #
1753 # 1) filecache logic detect that `_bookmarks` needs to be computed
1753 # 1) filecache logic detect that `_bookmarks` needs to be computed
1754 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1754 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1755 # 3) We force `changelog` filecache to be tested
1755 # 3) We force `changelog` filecache to be tested
1756 # 4) cachestat for `changelog` are captured (for changelog)
1756 # 4) cachestat for `changelog` are captured (for changelog)
1757 # 5) `_bookmarks` is computed and cached
1757 # 5) `_bookmarks` is computed and cached
1758 #
1758 #
1759 # The step in (3) ensure we have a changelog at least as recent as the
1759 # The step in (3) ensure we have a changelog at least as recent as the
1760 # cache stat computed in (1). As a result at locking time:
1760 # cache stat computed in (1). As a result at locking time:
1761 # * if the changelog did not changed since (1) -> we can reuse the data
1761 # * if the changelog did not changed since (1) -> we can reuse the data
1762 # * otherwise -> the bookmarks get refreshed.
1762 # * otherwise -> the bookmarks get refreshed.
1763 self._refreshchangelog()
1763 self._refreshchangelog()
1764 return bookmarks.bmstore(self)
1764 return bookmarks.bmstore(self)
1765
1765
1766 def _refreshchangelog(self):
1766 def _refreshchangelog(self):
1767 """make sure the in memory changelog match the on-disk one"""
1767 """make sure the in memory changelog match the on-disk one"""
1768 if 'changelog' in vars(self) and self.currenttransaction() is None:
1768 if 'changelog' in vars(self) and self.currenttransaction() is None:
1769 del self.changelog
1769 del self.changelog
1770
1770
1771 @property
1771 @property
1772 def _activebookmark(self):
1772 def _activebookmark(self):
1773 return self._bookmarks.active
1773 return self._bookmarks.active
1774
1774
1775 # _phasesets depend on changelog. what we need is to call
1775 # _phasesets depend on changelog. what we need is to call
1776 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1776 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1777 # can't be easily expressed in filecache mechanism.
1777 # can't be easily expressed in filecache mechanism.
1778 @storecache(b'phaseroots', b'00changelog.i')
1778 @storecache(b'phaseroots', b'00changelog.i')
1779 def _phasecache(self):
1779 def _phasecache(self):
1780 return phases.phasecache(self, self._phasedefaults)
1780 return phases.phasecache(self, self._phasedefaults)
1781
1781
1782 @storecache(b'obsstore')
1782 @storecache(b'obsstore')
1783 def obsstore(self):
1783 def obsstore(self):
1784 return obsolete.makestore(self.ui, self)
1784 return obsolete.makestore(self.ui, self)
1785
1785
1786 @changelogcache()
1786 @changelogcache()
1787 def changelog(repo):
1787 def changelog(repo):
1788 # load dirstate before changelog to avoid race see issue6303
1788 # load dirstate before changelog to avoid race see issue6303
1789 repo.dirstate.prefetch_parents()
1789 repo.dirstate.prefetch_parents()
1790 return repo.store.changelog(
1790 return repo.store.changelog(
1791 txnutil.mayhavepending(repo.root),
1791 txnutil.mayhavepending(repo.root),
1792 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1792 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1793 )
1793 )
1794
1794
1795 @manifestlogcache()
1795 @manifestlogcache()
1796 def manifestlog(self):
1796 def manifestlog(self):
1797 return self.store.manifestlog(self, self._storenarrowmatch)
1797 return self.store.manifestlog(self, self._storenarrowmatch)
1798
1798
1799 @unfilteredpropertycache
1799 @unfilteredpropertycache
1800 def dirstate(self):
1800 def dirstate(self):
1801 if self._dirstate is None:
1801 if self._dirstate is None:
1802 self._dirstate = self._makedirstate()
1802 self._dirstate = self._makedirstate()
1803 else:
1803 else:
1804 self._dirstate.refresh()
1804 self._dirstate.refresh()
1805 return self._dirstate
1805 return self._dirstate
1806
1806
1807 def _makedirstate(self):
1807 def _makedirstate(self):
1808 """Extension point for wrapping the dirstate per-repo."""
1808 """Extension point for wrapping the dirstate per-repo."""
1809 sparsematchfn = None
1809 sparsematchfn = None
1810 if sparse.use_sparse(self):
1810 if sparse.use_sparse(self):
1811 sparsematchfn = lambda: sparse.matcher(self)
1811 sparsematchfn = lambda: sparse.matcher(self)
1812 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1812 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1813 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1813 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1814 use_dirstate_v2 = v2_req in self.requirements
1814 use_dirstate_v2 = v2_req in self.requirements
1815 use_tracked_hint = th in self.requirements
1815 use_tracked_hint = th in self.requirements
1816
1816
1817 return dirstate.dirstate(
1817 return dirstate.dirstate(
1818 self.vfs,
1818 self.vfs,
1819 self.ui,
1819 self.ui,
1820 self.root,
1820 self.root,
1821 self._dirstatevalidate,
1821 self._dirstatevalidate,
1822 sparsematchfn,
1822 sparsematchfn,
1823 self.nodeconstants,
1823 self.nodeconstants,
1824 use_dirstate_v2,
1824 use_dirstate_v2,
1825 use_tracked_hint=use_tracked_hint,
1825 use_tracked_hint=use_tracked_hint,
1826 )
1826 )
1827
1827
1828 def _dirstatevalidate(self, node):
1828 def _dirstatevalidate(self, node):
1829 okay = True
1829 okay = True
1830 try:
1830 try:
1831 self.changelog.rev(node)
1831 self.changelog.rev(node)
1832 except error.LookupError:
1832 except error.LookupError:
1833 # If the parent are unknown it might just be because the changelog
1833 # If the parent are unknown it might just be because the changelog
1834 # in memory is lagging behind the dirstate in memory. So try to
1834 # in memory is lagging behind the dirstate in memory. So try to
1835 # refresh the changelog first.
1835 # refresh the changelog first.
1836 #
1836 #
1837 # We only do so if we don't hold the lock, if we do hold the lock
1837 # We only do so if we don't hold the lock, if we do hold the lock
1838 # the invalidation at that time should have taken care of this and
1838 # the invalidation at that time should have taken care of this and
1839 # something is very fishy.
1839 # something is very fishy.
1840 if self.currentlock() is None:
1840 if self.currentlock() is None:
1841 self.invalidate()
1841 self.invalidate()
1842 try:
1842 try:
1843 self.changelog.rev(node)
1843 self.changelog.rev(node)
1844 except error.LookupError:
1844 except error.LookupError:
1845 okay = False
1845 okay = False
1846 else:
1846 else:
1847 # XXX we should consider raising an error here.
1847 # XXX we should consider raising an error here.
1848 okay = False
1848 okay = False
1849 if okay:
1849 if okay:
1850 return node
1850 return node
1851 else:
1851 else:
1852 if not self._dirstatevalidatewarned:
1852 if not self._dirstatevalidatewarned:
1853 self._dirstatevalidatewarned = True
1853 self._dirstatevalidatewarned = True
1854 self.ui.warn(
1854 self.ui.warn(
1855 _(b"warning: ignoring unknown working parent %s!\n")
1855 _(b"warning: ignoring unknown working parent %s!\n")
1856 % short(node)
1856 % short(node)
1857 )
1857 )
1858 return self.nullid
1858 return self.nullid
1859
1859
1860 @storecache(narrowspec.FILENAME)
1860 @storecache(narrowspec.FILENAME)
1861 def narrowpats(self):
1861 def narrowpats(self):
1862 """matcher patterns for this repository's narrowspec
1862 """matcher patterns for this repository's narrowspec
1863
1863
1864 A tuple of (includes, excludes).
1864 A tuple of (includes, excludes).
1865 """
1865 """
1866 # the narrow management should probably move into its own object
1866 # the narrow management should probably move into its own object
1867 val = self._pending_narrow_pats
1867 val = self._pending_narrow_pats
1868 if val is None:
1868 if val is None:
1869 val = narrowspec.load(self)
1869 val = narrowspec.load(self)
1870 return val
1870 return val
1871
1871
1872 @storecache(narrowspec.FILENAME)
1872 @storecache(narrowspec.FILENAME)
1873 def _storenarrowmatch(self):
1873 def _storenarrowmatch(self):
1874 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1874 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1875 return matchmod.always()
1875 return matchmod.always()
1876 include, exclude = self.narrowpats
1876 include, exclude = self.narrowpats
1877 return narrowspec.match(self.root, include=include, exclude=exclude)
1877 return narrowspec.match(self.root, include=include, exclude=exclude)
1878
1878
1879 @storecache(narrowspec.FILENAME)
1879 @storecache(narrowspec.FILENAME)
1880 def _narrowmatch(self):
1880 def _narrowmatch(self):
1881 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1881 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1882 return matchmod.always()
1882 return matchmod.always()
1883 narrowspec.checkworkingcopynarrowspec(self)
1883 narrowspec.checkworkingcopynarrowspec(self)
1884 include, exclude = self.narrowpats
1884 include, exclude = self.narrowpats
1885 return narrowspec.match(self.root, include=include, exclude=exclude)
1885 return narrowspec.match(self.root, include=include, exclude=exclude)
1886
1886
1887 def narrowmatch(self, match=None, includeexact=False):
1887 def narrowmatch(self, match=None, includeexact=False):
1888 """matcher corresponding the the repo's narrowspec
1888 """matcher corresponding the the repo's narrowspec
1889
1889
1890 If `match` is given, then that will be intersected with the narrow
1890 If `match` is given, then that will be intersected with the narrow
1891 matcher.
1891 matcher.
1892
1892
1893 If `includeexact` is True, then any exact matches from `match` will
1893 If `includeexact` is True, then any exact matches from `match` will
1894 be included even if they're outside the narrowspec.
1894 be included even if they're outside the narrowspec.
1895 """
1895 """
1896 if match:
1896 if match:
1897 if includeexact and not self._narrowmatch.always():
1897 if includeexact and not self._narrowmatch.always():
1898 # do not exclude explicitly-specified paths so that they can
1898 # do not exclude explicitly-specified paths so that they can
1899 # be warned later on
1899 # be warned later on
1900 em = matchmod.exact(match.files())
1900 em = matchmod.exact(match.files())
1901 nm = matchmod.unionmatcher([self._narrowmatch, em])
1901 nm = matchmod.unionmatcher([self._narrowmatch, em])
1902 return matchmod.intersectmatchers(match, nm)
1902 return matchmod.intersectmatchers(match, nm)
1903 return matchmod.intersectmatchers(match, self._narrowmatch)
1903 return matchmod.intersectmatchers(match, self._narrowmatch)
1904 return self._narrowmatch
1904 return self._narrowmatch
1905
1905
1906 def setnarrowpats(self, newincludes, newexcludes):
1906 def setnarrowpats(self, newincludes, newexcludes):
1907 narrowspec.save(self, newincludes, newexcludes)
1907 narrowspec.save(self, newincludes, newexcludes)
1908 self.invalidate(clearfilecache=True)
1908 self.invalidate(clearfilecache=True)
1909
1909
1910 @unfilteredpropertycache
1910 @unfilteredpropertycache
1911 def _quick_access_changeid_null(self):
1911 def _quick_access_changeid_null(self):
1912 return {
1912 return {
1913 b'null': (nullrev, self.nodeconstants.nullid),
1913 b'null': (nullrev, self.nodeconstants.nullid),
1914 nullrev: (nullrev, self.nodeconstants.nullid),
1914 nullrev: (nullrev, self.nodeconstants.nullid),
1915 self.nullid: (nullrev, self.nullid),
1915 self.nullid: (nullrev, self.nullid),
1916 }
1916 }
1917
1917
1918 @unfilteredpropertycache
1918 @unfilteredpropertycache
1919 def _quick_access_changeid_wc(self):
1919 def _quick_access_changeid_wc(self):
1920 # also fast path access to the working copy parents
1920 # also fast path access to the working copy parents
1921 # however, only do it for filter that ensure wc is visible.
1921 # however, only do it for filter that ensure wc is visible.
1922 quick = self._quick_access_changeid_null.copy()
1922 quick = self._quick_access_changeid_null.copy()
1923 cl = self.unfiltered().changelog
1923 cl = self.unfiltered().changelog
1924 for node in self.dirstate.parents():
1924 for node in self.dirstate.parents():
1925 if node == self.nullid:
1925 if node == self.nullid:
1926 continue
1926 continue
1927 rev = cl.index.get_rev(node)
1927 rev = cl.index.get_rev(node)
1928 if rev is None:
1928 if rev is None:
1929 # unknown working copy parent case:
1929 # unknown working copy parent case:
1930 #
1930 #
1931 # skip the fast path and let higher code deal with it
1931 # skip the fast path and let higher code deal with it
1932 continue
1932 continue
1933 pair = (rev, node)
1933 pair = (rev, node)
1934 quick[rev] = pair
1934 quick[rev] = pair
1935 quick[node] = pair
1935 quick[node] = pair
1936 # also add the parents of the parents
1936 # also add the parents of the parents
1937 for r in cl.parentrevs(rev):
1937 for r in cl.parentrevs(rev):
1938 if r == nullrev:
1938 if r == nullrev:
1939 continue
1939 continue
1940 n = cl.node(r)
1940 n = cl.node(r)
1941 pair = (r, n)
1941 pair = (r, n)
1942 quick[r] = pair
1942 quick[r] = pair
1943 quick[n] = pair
1943 quick[n] = pair
1944 p1node = self.dirstate.p1()
1944 p1node = self.dirstate.p1()
1945 if p1node != self.nullid:
1945 if p1node != self.nullid:
1946 quick[b'.'] = quick[p1node]
1946 quick[b'.'] = quick[p1node]
1947 return quick
1947 return quick
1948
1948
1949 @unfilteredmethod
1949 @unfilteredmethod
1950 def _quick_access_changeid_invalidate(self):
1950 def _quick_access_changeid_invalidate(self):
1951 if '_quick_access_changeid_wc' in vars(self):
1951 if '_quick_access_changeid_wc' in vars(self):
1952 del self.__dict__['_quick_access_changeid_wc']
1952 del self.__dict__['_quick_access_changeid_wc']
1953
1953
1954 @property
1954 @property
1955 def _quick_access_changeid(self):
1955 def _quick_access_changeid(self):
1956 """an helper dictionnary for __getitem__ calls
1956 """an helper dictionnary for __getitem__ calls
1957
1957
1958 This contains a list of symbol we can recognise right away without
1958 This contains a list of symbol we can recognise right away without
1959 further processing.
1959 further processing.
1960 """
1960 """
1961 if self.filtername in repoview.filter_has_wc:
1961 if self.filtername in repoview.filter_has_wc:
1962 return self._quick_access_changeid_wc
1962 return self._quick_access_changeid_wc
1963 return self._quick_access_changeid_null
1963 return self._quick_access_changeid_null
1964
1964
1965 def __getitem__(self, changeid):
1965 def __getitem__(self, changeid):
1966 # dealing with special cases
1966 # dealing with special cases
1967 if changeid is None:
1967 if changeid is None:
1968 return context.workingctx(self)
1968 return context.workingctx(self)
1969 if isinstance(changeid, context.basectx):
1969 if isinstance(changeid, context.basectx):
1970 return changeid
1970 return changeid
1971
1971
1972 # dealing with multiple revisions
1972 # dealing with multiple revisions
1973 if isinstance(changeid, slice):
1973 if isinstance(changeid, slice):
1974 # wdirrev isn't contiguous so the slice shouldn't include it
1974 # wdirrev isn't contiguous so the slice shouldn't include it
1975 return [
1975 return [
1976 self[i]
1976 self[i]
1977 for i in range(*changeid.indices(len(self)))
1977 for i in range(*changeid.indices(len(self)))
1978 if i not in self.changelog.filteredrevs
1978 if i not in self.changelog.filteredrevs
1979 ]
1979 ]
1980
1980
1981 # dealing with some special values
1981 # dealing with some special values
1982 quick_access = self._quick_access_changeid.get(changeid)
1982 quick_access = self._quick_access_changeid.get(changeid)
1983 if quick_access is not None:
1983 if quick_access is not None:
1984 rev, node = quick_access
1984 rev, node = quick_access
1985 return context.changectx(self, rev, node, maybe_filtered=False)
1985 return context.changectx(self, rev, node, maybe_filtered=False)
1986 if changeid == b'tip':
1986 if changeid == b'tip':
1987 node = self.changelog.tip()
1987 node = self.changelog.tip()
1988 rev = self.changelog.rev(node)
1988 rev = self.changelog.rev(node)
1989 return context.changectx(self, rev, node)
1989 return context.changectx(self, rev, node)
1990
1990
1991 # dealing with arbitrary values
1991 # dealing with arbitrary values
1992 try:
1992 try:
1993 if isinstance(changeid, int):
1993 if isinstance(changeid, int):
1994 node = self.changelog.node(changeid)
1994 node = self.changelog.node(changeid)
1995 rev = changeid
1995 rev = changeid
1996 elif changeid == b'.':
1996 elif changeid == b'.':
1997 # this is a hack to delay/avoid loading obsmarkers
1997 # this is a hack to delay/avoid loading obsmarkers
1998 # when we know that '.' won't be hidden
1998 # when we know that '.' won't be hidden
1999 node = self.dirstate.p1()
1999 node = self.dirstate.p1()
2000 rev = self.unfiltered().changelog.rev(node)
2000 rev = self.unfiltered().changelog.rev(node)
2001 elif len(changeid) == self.nodeconstants.nodelen:
2001 elif len(changeid) == self.nodeconstants.nodelen:
2002 try:
2002 try:
2003 node = changeid
2003 node = changeid
2004 rev = self.changelog.rev(changeid)
2004 rev = self.changelog.rev(changeid)
2005 except error.FilteredLookupError:
2005 except error.FilteredLookupError:
2006 changeid = hex(changeid) # for the error message
2006 changeid = hex(changeid) # for the error message
2007 raise
2007 raise
2008 except LookupError:
2008 except LookupError:
2009 # check if it might have come from damaged dirstate
2009 # check if it might have come from damaged dirstate
2010 #
2010 #
2011 # XXX we could avoid the unfiltered if we had a recognizable
2011 # XXX we could avoid the unfiltered if we had a recognizable
2012 # exception for filtered changeset access
2012 # exception for filtered changeset access
2013 if (
2013 if (
2014 self.local()
2014 self.local()
2015 and changeid in self.unfiltered().dirstate.parents()
2015 and changeid in self.unfiltered().dirstate.parents()
2016 ):
2016 ):
2017 msg = _(b"working directory has unknown parent '%s'!")
2017 msg = _(b"working directory has unknown parent '%s'!")
2018 raise error.Abort(msg % short(changeid))
2018 raise error.Abort(msg % short(changeid))
2019 changeid = hex(changeid) # for the error message
2019 changeid = hex(changeid) # for the error message
2020 raise
2020 raise
2021
2021
2022 elif len(changeid) == 2 * self.nodeconstants.nodelen:
2022 elif len(changeid) == 2 * self.nodeconstants.nodelen:
2023 node = bin(changeid)
2023 node = bin(changeid)
2024 rev = self.changelog.rev(node)
2024 rev = self.changelog.rev(node)
2025 else:
2025 else:
2026 raise error.ProgrammingError(
2026 raise error.ProgrammingError(
2027 b"unsupported changeid '%s' of type %s"
2027 b"unsupported changeid '%s' of type %s"
2028 % (changeid, pycompat.bytestr(type(changeid)))
2028 % (changeid, pycompat.bytestr(type(changeid)))
2029 )
2029 )
2030
2030
2031 return context.changectx(self, rev, node)
2031 return context.changectx(self, rev, node)
2032
2032
2033 except (error.FilteredIndexError, error.FilteredLookupError):
2033 except (error.FilteredIndexError, error.FilteredLookupError):
2034 raise error.FilteredRepoLookupError(
2034 raise error.FilteredRepoLookupError(
2035 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
2035 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
2036 )
2036 )
2037 except (IndexError, LookupError):
2037 except (IndexError, LookupError):
2038 raise error.RepoLookupError(
2038 raise error.RepoLookupError(
2039 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
2039 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
2040 )
2040 )
2041 except error.WdirUnsupported:
2041 except error.WdirUnsupported:
2042 return context.workingctx(self)
2042 return context.workingctx(self)
2043
2043
2044 def __contains__(self, changeid):
2044 def __contains__(self, changeid):
2045 """True if the given changeid exists"""
2045 """True if the given changeid exists"""
2046 try:
2046 try:
2047 self[changeid]
2047 self[changeid]
2048 return True
2048 return True
2049 except error.RepoLookupError:
2049 except error.RepoLookupError:
2050 return False
2050 return False
2051
2051
2052 def __nonzero__(self):
2052 def __nonzero__(self):
2053 return True
2053 return True
2054
2054
2055 __bool__ = __nonzero__
2055 __bool__ = __nonzero__
2056
2056
2057 def __len__(self):
2057 def __len__(self):
2058 # no need to pay the cost of repoview.changelog
2058 # no need to pay the cost of repoview.changelog
2059 unfi = self.unfiltered()
2059 unfi = self.unfiltered()
2060 return len(unfi.changelog)
2060 return len(unfi.changelog)
2061
2061
2062 def __iter__(self):
2062 def __iter__(self):
2063 return iter(self.changelog)
2063 return iter(self.changelog)
2064
2064
2065 def revs(self, expr: bytes, *args):
2065 def revs(self, expr: bytes, *args):
2066 """Find revisions matching a revset.
2066 """Find revisions matching a revset.
2067
2067
2068 The revset is specified as a string ``expr`` that may contain
2068 The revset is specified as a string ``expr`` that may contain
2069 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2069 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2070
2070
2071 Revset aliases from the configuration are not expanded. To expand
2071 Revset aliases from the configuration are not expanded. To expand
2072 user aliases, consider calling ``scmutil.revrange()`` or
2072 user aliases, consider calling ``scmutil.revrange()`` or
2073 ``repo.anyrevs([expr], user=True)``.
2073 ``repo.anyrevs([expr], user=True)``.
2074
2074
2075 Returns a smartset.abstractsmartset, which is a list-like interface
2075 Returns a smartset.abstractsmartset, which is a list-like interface
2076 that contains integer revisions.
2076 that contains integer revisions.
2077 """
2077 """
2078 tree = revsetlang.spectree(expr, *args)
2078 tree = revsetlang.spectree(expr, *args)
2079 return revset.makematcher(tree)(self)
2079 return revset.makematcher(tree)(self)
2080
2080
2081 def set(self, expr: bytes, *args):
2081 def set(self, expr: bytes, *args):
2082 """Find revisions matching a revset and emit changectx instances.
2082 """Find revisions matching a revset and emit changectx instances.
2083
2083
2084 This is a convenience wrapper around ``revs()`` that iterates the
2084 This is a convenience wrapper around ``revs()`` that iterates the
2085 result and is a generator of changectx instances.
2085 result and is a generator of changectx instances.
2086
2086
2087 Revset aliases from the configuration are not expanded. To expand
2087 Revset aliases from the configuration are not expanded. To expand
2088 user aliases, consider calling ``scmutil.revrange()``.
2088 user aliases, consider calling ``scmutil.revrange()``.
2089 """
2089 """
2090 for r in self.revs(expr, *args):
2090 for r in self.revs(expr, *args):
2091 yield self[r]
2091 yield self[r]
2092
2092
2093 def anyrevs(self, specs: bytes, user=False, localalias=None):
2093 def anyrevs(self, specs: bytes, user=False, localalias=None):
2094 """Find revisions matching one of the given revsets.
2094 """Find revisions matching one of the given revsets.
2095
2095
2096 Revset aliases from the configuration are not expanded by default. To
2096 Revset aliases from the configuration are not expanded by default. To
2097 expand user aliases, specify ``user=True``. To provide some local
2097 expand user aliases, specify ``user=True``. To provide some local
2098 definitions overriding user aliases, set ``localalias`` to
2098 definitions overriding user aliases, set ``localalias`` to
2099 ``{name: definitionstring}``.
2099 ``{name: definitionstring}``.
2100 """
2100 """
2101 if specs == [b'null']:
2101 if specs == [b'null']:
2102 return revset.baseset([nullrev])
2102 return revset.baseset([nullrev])
2103 if specs == [b'.']:
2103 if specs == [b'.']:
2104 quick_data = self._quick_access_changeid.get(b'.')
2104 quick_data = self._quick_access_changeid.get(b'.')
2105 if quick_data is not None:
2105 if quick_data is not None:
2106 return revset.baseset([quick_data[0]])
2106 return revset.baseset([quick_data[0]])
2107 if user:
2107 if user:
2108 m = revset.matchany(
2108 m = revset.matchany(
2109 self.ui,
2109 self.ui,
2110 specs,
2110 specs,
2111 lookup=revset.lookupfn(self),
2111 lookup=revset.lookupfn(self),
2112 localalias=localalias,
2112 localalias=localalias,
2113 )
2113 )
2114 else:
2114 else:
2115 m = revset.matchany(None, specs, localalias=localalias)
2115 m = revset.matchany(None, specs, localalias=localalias)
2116 return m(self)
2116 return m(self)
2117
2117
2118 def url(self) -> bytes:
2118 def url(self) -> bytes:
2119 return b'file:' + self.root
2119 return b'file:' + self.root
2120
2120
2121 def hook(self, name, throw=False, **args):
2121 def hook(self, name, throw=False, **args):
2122 """Call a hook, passing this repo instance.
2122 """Call a hook, passing this repo instance.
2123
2123
2124 This a convenience method to aid invoking hooks. Extensions likely
2124 This a convenience method to aid invoking hooks. Extensions likely
2125 won't call this unless they have registered a custom hook or are
2125 won't call this unless they have registered a custom hook or are
2126 replacing code that is expected to call a hook.
2126 replacing code that is expected to call a hook.
2127 """
2127 """
2128 return hook.hook(self.ui, self, name, throw, **args)
2128 return hook.hook(self.ui, self, name, throw, **args)
2129
2129
2130 @filteredpropertycache
2130 @filteredpropertycache
2131 def _tagscache(self):
2131 def _tagscache(self):
2132 """Returns a tagscache object that contains various tags related
2132 """Returns a tagscache object that contains various tags related
2133 caches."""
2133 caches."""
2134
2134
2135 # This simplifies its cache management by having one decorated
2135 # This simplifies its cache management by having one decorated
2136 # function (this one) and the rest simply fetch things from it.
2136 # function (this one) and the rest simply fetch things from it.
2137 class tagscache:
2137 class tagscache:
2138 def __init__(self):
2138 def __init__(self):
2139 # These two define the set of tags for this repository. tags
2139 # These two define the set of tags for this repository. tags
2140 # maps tag name to node; tagtypes maps tag name to 'global' or
2140 # maps tag name to node; tagtypes maps tag name to 'global' or
2141 # 'local'. (Global tags are defined by .hgtags across all
2141 # 'local'. (Global tags are defined by .hgtags across all
2142 # heads, and local tags are defined in .hg/localtags.)
2142 # heads, and local tags are defined in .hg/localtags.)
2143 # They constitute the in-memory cache of tags.
2143 # They constitute the in-memory cache of tags.
2144 self.tags = self.tagtypes = None
2144 self.tags = self.tagtypes = None
2145
2145
2146 self.nodetagscache = self.tagslist = None
2146 self.nodetagscache = self.tagslist = None
2147
2147
2148 cache = tagscache()
2148 cache = tagscache()
2149 cache.tags, cache.tagtypes = self._findtags()
2149 cache.tags, cache.tagtypes = self._findtags()
2150
2150
2151 return cache
2151 return cache
2152
2152
2153 def tags(self):
2153 def tags(self):
2154 '''return a mapping of tag to node'''
2154 '''return a mapping of tag to node'''
2155 t = {}
2155 t = {}
2156 if self.changelog.filteredrevs:
2156 if self.changelog.filteredrevs:
2157 tags, tt = self._findtags()
2157 tags, tt = self._findtags()
2158 else:
2158 else:
2159 tags = self._tagscache.tags
2159 tags = self._tagscache.tags
2160 rev = self.changelog.rev
2160 rev = self.changelog.rev
2161 for k, v in tags.items():
2161 for k, v in tags.items():
2162 try:
2162 try:
2163 # ignore tags to unknown nodes
2163 # ignore tags to unknown nodes
2164 rev(v)
2164 rev(v)
2165 t[k] = v
2165 t[k] = v
2166 except (error.LookupError, ValueError):
2166 except (error.LookupError, ValueError):
2167 pass
2167 pass
2168 return t
2168 return t
2169
2169
2170 def _findtags(self):
2170 def _findtags(self):
2171 """Do the hard work of finding tags. Return a pair of dicts
2171 """Do the hard work of finding tags. Return a pair of dicts
2172 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2172 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2173 maps tag name to a string like \'global\' or \'local\'.
2173 maps tag name to a string like \'global\' or \'local\'.
2174 Subclasses or extensions are free to add their own tags, but
2174 Subclasses or extensions are free to add their own tags, but
2175 should be aware that the returned dicts will be retained for the
2175 should be aware that the returned dicts will be retained for the
2176 duration of the localrepo object."""
2176 duration of the localrepo object."""
2177
2177
2178 # XXX what tagtype should subclasses/extensions use? Currently
2178 # XXX what tagtype should subclasses/extensions use? Currently
2179 # mq and bookmarks add tags, but do not set the tagtype at all.
2179 # mq and bookmarks add tags, but do not set the tagtype at all.
2180 # Should each extension invent its own tag type? Should there
2180 # Should each extension invent its own tag type? Should there
2181 # be one tagtype for all such "virtual" tags? Or is the status
2181 # be one tagtype for all such "virtual" tags? Or is the status
2182 # quo fine?
2182 # quo fine?
2183
2183
2184 # map tag name to (node, hist)
2184 # map tag name to (node, hist)
2185 alltags = tagsmod.findglobaltags(self.ui, self)
2185 alltags = tagsmod.findglobaltags(self.ui, self)
2186 # map tag name to tag type
2186 # map tag name to tag type
2187 tagtypes = {tag: b'global' for tag in alltags}
2187 tagtypes = {tag: b'global' for tag in alltags}
2188
2188
2189 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2189 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2190
2190
2191 # Build the return dicts. Have to re-encode tag names because
2191 # Build the return dicts. Have to re-encode tag names because
2192 # the tags module always uses UTF-8 (in order not to lose info
2192 # the tags module always uses UTF-8 (in order not to lose info
2193 # writing to the cache), but the rest of Mercurial wants them in
2193 # writing to the cache), but the rest of Mercurial wants them in
2194 # local encoding.
2194 # local encoding.
2195 tags = {}
2195 tags = {}
2196 for name, (node, hist) in alltags.items():
2196 for name, (node, hist) in alltags.items():
2197 if node != self.nullid:
2197 if node != self.nullid:
2198 tags[encoding.tolocal(name)] = node
2198 tags[encoding.tolocal(name)] = node
2199 tags[b'tip'] = self.changelog.tip()
2199 tags[b'tip'] = self.changelog.tip()
2200 tagtypes = {
2200 tagtypes = {
2201 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2201 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2202 }
2202 }
2203 return (tags, tagtypes)
2203 return (tags, tagtypes)
2204
2204
2205 def tagtype(self, tagname):
2205 def tagtype(self, tagname):
2206 """
2206 """
2207 return the type of the given tag. result can be:
2207 return the type of the given tag. result can be:
2208
2208
2209 'local' : a local tag
2209 'local' : a local tag
2210 'global' : a global tag
2210 'global' : a global tag
2211 None : tag does not exist
2211 None : tag does not exist
2212 """
2212 """
2213
2213
2214 return self._tagscache.tagtypes.get(tagname)
2214 return self._tagscache.tagtypes.get(tagname)
2215
2215
2216 def tagslist(self):
2216 def tagslist(self):
2217 '''return a list of tags ordered by revision'''
2217 '''return a list of tags ordered by revision'''
2218 if not self._tagscache.tagslist:
2218 if not self._tagscache.tagslist:
2219 l = []
2219 l = []
2220 for t, n in self.tags().items():
2220 for t, n in self.tags().items():
2221 l.append((self.changelog.rev(n), t, n))
2221 l.append((self.changelog.rev(n), t, n))
2222 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2222 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2223
2223
2224 return self._tagscache.tagslist
2224 return self._tagscache.tagslist
2225
2225
2226 def nodetags(self, node):
2226 def nodetags(self, node):
2227 '''return the tags associated with a node'''
2227 '''return the tags associated with a node'''
2228 if not self._tagscache.nodetagscache:
2228 if not self._tagscache.nodetagscache:
2229 nodetagscache = {}
2229 nodetagscache = {}
2230 for t, n in self._tagscache.tags.items():
2230 for t, n in self._tagscache.tags.items():
2231 nodetagscache.setdefault(n, []).append(t)
2231 nodetagscache.setdefault(n, []).append(t)
2232 for tags in nodetagscache.values():
2232 for tags in nodetagscache.values():
2233 tags.sort()
2233 tags.sort()
2234 self._tagscache.nodetagscache = nodetagscache
2234 self._tagscache.nodetagscache = nodetagscache
2235 return self._tagscache.nodetagscache.get(node, [])
2235 return self._tagscache.nodetagscache.get(node, [])
2236
2236
2237 def nodebookmarks(self, node):
2237 def nodebookmarks(self, node):
2238 """return the list of bookmarks pointing to the specified node"""
2238 """return the list of bookmarks pointing to the specified node"""
2239 return self._bookmarks.names(node)
2239 return self._bookmarks.names(node)
2240
2240
2241 def branchmap(self):
2241 def branchmap(self):
2242 """returns a dictionary {branch: [branchheads]} with branchheads
2242 """returns a dictionary {branch: [branchheads]} with branchheads
2243 ordered by increasing revision number"""
2243 ordered by increasing revision number"""
2244 return self._branchcaches[self]
2244 return self._branchcaches[self]
2245
2245
2246 @unfilteredmethod
2246 @unfilteredmethod
2247 def revbranchcache(self):
2247 def revbranchcache(self):
2248 if not self._revbranchcache:
2248 if not self._revbranchcache:
2249 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2249 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2250 return self._revbranchcache
2250 return self._revbranchcache
2251
2251
2252 def register_changeset(self, rev, changelogrevision):
2252 def register_changeset(self, rev, changelogrevision):
2253 self.revbranchcache().setdata(rev, changelogrevision)
2253 self.revbranchcache().setdata(rev, changelogrevision)
2254
2254
2255 def branchtip(self, branch, ignoremissing=False):
2255 def branchtip(self, branch, ignoremissing=False):
2256 """return the tip node for a given branch
2256 """return the tip node for a given branch
2257
2257
2258 If ignoremissing is True, then this method will not raise an error.
2258 If ignoremissing is True, then this method will not raise an error.
2259 This is helpful for callers that only expect None for a missing branch
2259 This is helpful for callers that only expect None for a missing branch
2260 (e.g. namespace).
2260 (e.g. namespace).
2261
2261
2262 """
2262 """
2263 try:
2263 try:
2264 return self.branchmap().branchtip(branch)
2264 return self.branchmap().branchtip(branch)
2265 except KeyError:
2265 except KeyError:
2266 if not ignoremissing:
2266 if not ignoremissing:
2267 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2267 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2268 else:
2268 else:
2269 pass
2269 pass
2270
2270
2271 def lookup(self, key):
2271 def lookup(self, key):
2272 node = scmutil.revsymbol(self, key).node()
2272 node = scmutil.revsymbol(self, key).node()
2273 if node is None:
2273 if node is None:
2274 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2274 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2275 return node
2275 return node
2276
2276
2277 def lookupbranch(self, key):
2277 def lookupbranch(self, key):
2278 if self.branchmap().hasbranch(key):
2278 if self.branchmap().hasbranch(key):
2279 return key
2279 return key
2280
2280
2281 return scmutil.revsymbol(self, key).branch()
2281 return scmutil.revsymbol(self, key).branch()
2282
2282
2283 def known(self, nodes):
2283 def known(self, nodes):
2284 cl = self.changelog
2284 cl = self.changelog
2285 get_rev = cl.index.get_rev
2285 get_rev = cl.index.get_rev
2286 filtered = cl.filteredrevs
2286 filtered = cl.filteredrevs
2287 result = []
2287 result = []
2288 for n in nodes:
2288 for n in nodes:
2289 r = get_rev(n)
2289 r = get_rev(n)
2290 resp = not (r is None or r in filtered)
2290 resp = not (r is None or r in filtered)
2291 result.append(resp)
2291 result.append(resp)
2292 return result
2292 return result
2293
2293
2294 def local(self):
2294 def local(self):
2295 return self
2295 return self
2296
2296
2297 def publishing(self):
2297 def publishing(self):
2298 # it's safe (and desirable) to trust the publish flag unconditionally
2298 # it's safe (and desirable) to trust the publish flag unconditionally
2299 # so that we don't finalize changes shared between users via ssh or nfs
2299 # so that we don't finalize changes shared between users via ssh or nfs
2300 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2300 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2301
2301
2302 def cancopy(self):
2302 def cancopy(self):
2303 # so statichttprepo's override of local() works
2303 # so statichttprepo's override of local() works
2304 if not self.local():
2304 if not self.local():
2305 return False
2305 return False
2306 if not self.publishing():
2306 if not self.publishing():
2307 return True
2307 return True
2308 # if publishing we can't copy if there is filtered content
2308 # if publishing we can't copy if there is filtered content
2309 return not self.filtered(b'visible').changelog.filteredrevs
2309 return not self.filtered(b'visible').changelog.filteredrevs
2310
2310
2311 def shared(self):
2311 def shared(self):
2312 '''the type of shared repository (None if not shared)'''
2312 '''the type of shared repository (None if not shared)'''
2313 if self.sharedpath != self.path:
2313 if self.sharedpath != self.path:
2314 return b'store'
2314 return b'store'
2315 return None
2315 return None
2316
2316
2317 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2317 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2318 return self.vfs.reljoin(self.root, f, *insidef)
2318 return self.vfs.reljoin(self.root, f, *insidef)
2319
2319
2320 def setparents(self, p1, p2=None):
2320 def setparents(self, p1, p2=None):
2321 if p2 is None:
2321 if p2 is None:
2322 p2 = self.nullid
2322 p2 = self.nullid
2323 self[None].setparents(p1, p2)
2323 self[None].setparents(p1, p2)
2324 self._quick_access_changeid_invalidate()
2324 self._quick_access_changeid_invalidate()
2325
2325
2326 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2326 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2327 """changeid must be a changeset revision, if specified.
2327 """changeid must be a changeset revision, if specified.
2328 fileid can be a file revision or node."""
2328 fileid can be a file revision or node."""
2329 return context.filectx(
2329 return context.filectx(
2330 self, path, changeid, fileid, changectx=changectx
2330 self, path, changeid, fileid, changectx=changectx
2331 )
2331 )
2332
2332
2333 def getcwd(self) -> bytes:
2333 def getcwd(self) -> bytes:
2334 return self.dirstate.getcwd()
2334 return self.dirstate.getcwd()
2335
2335
2336 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2336 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2337 return self.dirstate.pathto(f, cwd)
2337 return self.dirstate.pathto(f, cwd)
2338
2338
2339 def _loadfilter(self, filter):
2339 def _loadfilter(self, filter):
2340 if filter not in self._filterpats:
2340 if filter not in self._filterpats:
2341 l = []
2341 l = []
2342 for pat, cmd in self.ui.configitems(filter):
2342 for pat, cmd in self.ui.configitems(filter):
2343 if cmd == b'!':
2343 if cmd == b'!':
2344 continue
2344 continue
2345 mf = matchmod.match(self.root, b'', [pat])
2345 mf = matchmod.match(self.root, b'', [pat])
2346 fn = None
2346 fn = None
2347 params = cmd
2347 params = cmd
2348 for name, filterfn in self._datafilters.items():
2348 for name, filterfn in self._datafilters.items():
2349 if cmd.startswith(name):
2349 if cmd.startswith(name):
2350 fn = filterfn
2350 fn = filterfn
2351 params = cmd[len(name) :].lstrip()
2351 params = cmd[len(name) :].lstrip()
2352 break
2352 break
2353 if not fn:
2353 if not fn:
2354 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2354 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2355 fn.__name__ = 'commandfilter'
2355 fn.__name__ = 'commandfilter'
2356 # Wrap old filters not supporting keyword arguments
2356 # Wrap old filters not supporting keyword arguments
2357 if not pycompat.getargspec(fn)[2]:
2357 if not pycompat.getargspec(fn)[2]:
2358 oldfn = fn
2358 oldfn = fn
2359 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2359 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2360 fn.__name__ = 'compat-' + oldfn.__name__
2360 fn.__name__ = 'compat-' + oldfn.__name__
2361 l.append((mf, fn, params))
2361 l.append((mf, fn, params))
2362 self._filterpats[filter] = l
2362 self._filterpats[filter] = l
2363 return self._filterpats[filter]
2363 return self._filterpats[filter]
2364
2364
2365 def _filter(self, filterpats, filename, data):
2365 def _filter(self, filterpats, filename, data):
2366 for mf, fn, cmd in filterpats:
2366 for mf, fn, cmd in filterpats:
2367 if mf(filename):
2367 if mf(filename):
2368 self.ui.debug(
2368 self.ui.debug(
2369 b"filtering %s through %s\n"
2369 b"filtering %s through %s\n"
2370 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2370 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2371 )
2371 )
2372 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2372 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2373 break
2373 break
2374
2374
2375 return data
2375 return data
2376
2376
2377 @unfilteredpropertycache
2377 @unfilteredpropertycache
2378 def _encodefilterpats(self):
2378 def _encodefilterpats(self):
2379 return self._loadfilter(b'encode')
2379 return self._loadfilter(b'encode')
2380
2380
2381 @unfilteredpropertycache
2381 @unfilteredpropertycache
2382 def _decodefilterpats(self):
2382 def _decodefilterpats(self):
2383 return self._loadfilter(b'decode')
2383 return self._loadfilter(b'decode')
2384
2384
2385 def adddatafilter(self, name, filter):
2385 def adddatafilter(self, name, filter):
2386 self._datafilters[name] = filter
2386 self._datafilters[name] = filter
2387
2387
2388 def wread(self, filename: bytes) -> bytes:
2388 def wread(self, filename: bytes) -> bytes:
2389 if self.wvfs.islink(filename):
2389 if self.wvfs.islink(filename):
2390 data = self.wvfs.readlink(filename)
2390 data = self.wvfs.readlink(filename)
2391 else:
2391 else:
2392 data = self.wvfs.read(filename)
2392 data = self.wvfs.read(filename)
2393 return self._filter(self._encodefilterpats, filename, data)
2393 return self._filter(self._encodefilterpats, filename, data)
2394
2394
2395 def wwrite(
2395 def wwrite(
2396 self,
2396 self,
2397 filename: bytes,
2397 filename: bytes,
2398 data: bytes,
2398 data: bytes,
2399 flags: bytes,
2399 flags: bytes,
2400 backgroundclose=False,
2400 backgroundclose=False,
2401 **kwargs,
2401 **kwargs,
2402 ) -> int:
2402 ) -> int:
2403 """write ``data`` into ``filename`` in the working directory
2403 """write ``data`` into ``filename`` in the working directory
2404
2404
2405 This returns length of written (maybe decoded) data.
2405 This returns length of written (maybe decoded) data.
2406 """
2406 """
2407 data = self._filter(self._decodefilterpats, filename, data)
2407 data = self._filter(self._decodefilterpats, filename, data)
2408 if b'l' in flags:
2408 if b'l' in flags:
2409 self.wvfs.symlink(data, filename)
2409 self.wvfs.symlink(data, filename)
2410 else:
2410 else:
2411 self.wvfs.write(
2411 self.wvfs.write(
2412 filename, data, backgroundclose=backgroundclose, **kwargs
2412 filename, data, backgroundclose=backgroundclose, **kwargs
2413 )
2413 )
2414 if b'x' in flags:
2414 if b'x' in flags:
2415 self.wvfs.setflags(filename, False, True)
2415 self.wvfs.setflags(filename, False, True)
2416 else:
2416 else:
2417 self.wvfs.setflags(filename, False, False)
2417 self.wvfs.setflags(filename, False, False)
2418 return len(data)
2418 return len(data)
2419
2419
2420 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2420 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2421 return self._filter(self._decodefilterpats, filename, data)
2421 return self._filter(self._decodefilterpats, filename, data)
2422
2422
2423 def currenttransaction(self):
2423 def currenttransaction(self):
2424 """return the current transaction or None if non exists"""
2424 """return the current transaction or None if non exists"""
2425 if self._transref:
2425 if self._transref:
2426 tr = self._transref()
2426 tr = self._transref()
2427 else:
2427 else:
2428 tr = None
2428 tr = None
2429
2429
2430 if tr and tr.running():
2430 if tr and tr.running():
2431 return tr
2431 return tr
2432 return None
2432 return None
2433
2433
2434 def transaction(self, desc, report=None):
2434 def transaction(self, desc, report=None):
2435 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2435 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2436 b'devel', b'check-locks'
2436 b'devel', b'check-locks'
2437 ):
2437 ):
2438 if self._currentlock(self._lockref) is None:
2438 if self._currentlock(self._lockref) is None:
2439 raise error.ProgrammingError(b'transaction requires locking')
2439 raise error.ProgrammingError(b'transaction requires locking')
2440 tr = self.currenttransaction()
2440 tr = self.currenttransaction()
2441 if tr is not None:
2441 if tr is not None:
2442 return tr.nest(name=desc)
2442 return tr.nest(name=desc)
2443
2443
2444 # abort here if the journal already exists
2444 # abort here if the journal already exists
2445 if self.svfs.exists(b"journal"):
2445 if self.svfs.exists(b"journal"):
2446 raise error.RepoError(
2446 raise error.RepoError(
2447 _(b"abandoned transaction found"),
2447 _(b"abandoned transaction found"),
2448 hint=_(b"run 'hg recover' to clean up transaction"),
2448 hint=_(b"run 'hg recover' to clean up transaction"),
2449 )
2449 )
2450
2450
2451 # At that point your dirstate should be clean:
2451 # At that point your dirstate should be clean:
2452 #
2452 #
2453 # - If you don't have the wlock, why would you still have a dirty
2453 # - If you don't have the wlock, why would you still have a dirty
2454 # dirstate ?
2454 # dirstate ?
2455 #
2455 #
2456 # - If you hold the wlock, you should not be opening a transaction in
2456 # - If you hold the wlock, you should not be opening a transaction in
2457 # the middle of a `distate.changing_*` block. The transaction needs to
2457 # the middle of a `distate.changing_*` block. The transaction needs to
2458 # be open before that and wrap the change-context.
2458 # be open before that and wrap the change-context.
2459 #
2459 #
2460 # - If you are not within a `dirstate.changing_*` context, why is our
2460 # - If you are not within a `dirstate.changing_*` context, why is our
2461 # dirstate dirty?
2461 # dirstate dirty?
2462 if self.dirstate._dirty:
2462 if self.dirstate._dirty:
2463 m = "cannot open a transaction with a dirty dirstate"
2463 m = "cannot open a transaction with a dirty dirstate"
2464 raise error.ProgrammingError(m)
2464 raise error.ProgrammingError(m)
2465
2465
2466 idbase = b"%.40f#%f" % (random.random(), time.time())
2466 idbase = b"%.40f#%f" % (random.random(), time.time())
2467 ha = hex(hashutil.sha1(idbase).digest())
2467 ha = hex(hashutil.sha1(idbase).digest())
2468 txnid = b'TXN:' + ha
2468 txnid = b'TXN:' + ha
2469 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2469 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2470
2470
2471 self._writejournal(desc)
2471 self._writejournal(desc)
2472 if report:
2472 if report:
2473 rp = report
2473 rp = report
2474 else:
2474 else:
2475 rp = self.ui.warn
2475 rp = self.ui.warn
2476 vfsmap = self.vfs_map
2476 vfsmap = self.vfs_map
2477 # we must avoid cyclic reference between repo and transaction.
2477 # we must avoid cyclic reference between repo and transaction.
2478 reporef = weakref.ref(self)
2478 reporef = weakref.ref(self)
2479 # Code to track tag movement
2479 # Code to track tag movement
2480 #
2480 #
2481 # Since tags are all handled as file content, it is actually quite hard
2481 # Since tags are all handled as file content, it is actually quite hard
2482 # to track these movement from a code perspective. So we fallback to a
2482 # to track these movement from a code perspective. So we fallback to a
2483 # tracking at the repository level. One could envision to track changes
2483 # tracking at the repository level. One could envision to track changes
2484 # to the '.hgtags' file through changegroup apply but that fails to
2484 # to the '.hgtags' file through changegroup apply but that fails to
2485 # cope with case where transaction expose new heads without changegroup
2485 # cope with case where transaction expose new heads without changegroup
2486 # being involved (eg: phase movement).
2486 # being involved (eg: phase movement).
2487 #
2487 #
2488 # For now, We gate the feature behind a flag since this likely comes
2488 # For now, We gate the feature behind a flag since this likely comes
2489 # with performance impacts. The current code run more often than needed
2489 # with performance impacts. The current code run more often than needed
2490 # and do not use caches as much as it could. The current focus is on
2490 # and do not use caches as much as it could. The current focus is on
2491 # the behavior of the feature so we disable it by default. The flag
2491 # the behavior of the feature so we disable it by default. The flag
2492 # will be removed when we are happy with the performance impact.
2492 # will be removed when we are happy with the performance impact.
2493 #
2493 #
2494 # Once this feature is no longer experimental move the following
2494 # Once this feature is no longer experimental move the following
2495 # documentation to the appropriate help section:
2495 # documentation to the appropriate help section:
2496 #
2496 #
2497 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2497 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2498 # tags (new or changed or deleted tags). In addition the details of
2498 # tags (new or changed or deleted tags). In addition the details of
2499 # these changes are made available in a file at:
2499 # these changes are made available in a file at:
2500 # ``REPOROOT/.hg/changes/tags.changes``.
2500 # ``REPOROOT/.hg/changes/tags.changes``.
2501 # Make sure you check for HG_TAG_MOVED before reading that file as it
2501 # Make sure you check for HG_TAG_MOVED before reading that file as it
2502 # might exist from a previous transaction even if no tag were touched
2502 # might exist from a previous transaction even if no tag were touched
2503 # in this one. Changes are recorded in a line base format::
2503 # in this one. Changes are recorded in a line base format::
2504 #
2504 #
2505 # <action> <hex-node> <tag-name>\n
2505 # <action> <hex-node> <tag-name>\n
2506 #
2506 #
2507 # Actions are defined as follow:
2507 # Actions are defined as follow:
2508 # "-R": tag is removed,
2508 # "-R": tag is removed,
2509 # "+A": tag is added,
2509 # "+A": tag is added,
2510 # "-M": tag is moved (old value),
2510 # "-M": tag is moved (old value),
2511 # "+M": tag is moved (new value),
2511 # "+M": tag is moved (new value),
2512 tracktags = lambda x: None
2512 tracktags = lambda x: None
2513 # experimental config: experimental.hook-track-tags
2513 # experimental config: experimental.hook-track-tags
2514 shouldtracktags = self.ui.configbool(
2514 shouldtracktags = self.ui.configbool(
2515 b'experimental', b'hook-track-tags'
2515 b'experimental', b'hook-track-tags'
2516 )
2516 )
2517 if desc != b'strip' and shouldtracktags:
2517 if desc != b'strip' and shouldtracktags:
2518 oldheads = self.changelog.headrevs()
2518 oldheads = self.changelog.headrevs()
2519
2519
2520 def tracktags(tr2):
2520 def tracktags(tr2):
2521 repo = reporef()
2521 repo = reporef()
2522 assert repo is not None # help pytype
2522 assert repo is not None # help pytype
2523 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2523 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2524 newheads = repo.changelog.headrevs()
2524 newheads = repo.changelog.headrevs()
2525 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2525 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2526 # notes: we compare lists here.
2526 # notes: we compare lists here.
2527 # As we do it only once buiding set would not be cheaper
2527 # As we do it only once buiding set would not be cheaper
2528 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2528 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2529 if changes:
2529 if changes:
2530 tr2.hookargs[b'tag_moved'] = b'1'
2530 tr2.hookargs[b'tag_moved'] = b'1'
2531 with repo.vfs(
2531 with repo.vfs(
2532 b'changes/tags.changes', b'w', atomictemp=True
2532 b'changes/tags.changes', b'w', atomictemp=True
2533 ) as changesfile:
2533 ) as changesfile:
2534 # note: we do not register the file to the transaction
2534 # note: we do not register the file to the transaction
2535 # because we needs it to still exist on the transaction
2535 # because we needs it to still exist on the transaction
2536 # is close (for txnclose hooks)
2536 # is close (for txnclose hooks)
2537 tagsmod.writediff(changesfile, changes)
2537 tagsmod.writediff(changesfile, changes)
2538
2538
2539 def validate(tr2):
2539 def validate(tr2):
2540 """will run pre-closing hooks"""
2540 """will run pre-closing hooks"""
2541 # XXX the transaction API is a bit lacking here so we take a hacky
2541 # XXX the transaction API is a bit lacking here so we take a hacky
2542 # path for now
2542 # path for now
2543 #
2543 #
2544 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2544 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2545 # dict is copied before these run. In addition we needs the data
2545 # dict is copied before these run. In addition we needs the data
2546 # available to in memory hooks too.
2546 # available to in memory hooks too.
2547 #
2547 #
2548 # Moreover, we also need to make sure this runs before txnclose
2548 # Moreover, we also need to make sure this runs before txnclose
2549 # hooks and there is no "pending" mechanism that would execute
2549 # hooks and there is no "pending" mechanism that would execute
2550 # logic only if hooks are about to run.
2550 # logic only if hooks are about to run.
2551 #
2551 #
2552 # Fixing this limitation of the transaction is also needed to track
2552 # Fixing this limitation of the transaction is also needed to track
2553 # other families of changes (bookmarks, phases, obsolescence).
2553 # other families of changes (bookmarks, phases, obsolescence).
2554 #
2554 #
2555 # This will have to be fixed before we remove the experimental
2555 # This will have to be fixed before we remove the experimental
2556 # gating.
2556 # gating.
2557 tracktags(tr2)
2557 tracktags(tr2)
2558 repo = reporef()
2558 repo = reporef()
2559 assert repo is not None # help pytype
2559 assert repo is not None # help pytype
2560
2560
2561 singleheadopt = (b'experimental', b'single-head-per-branch')
2561 singleheadopt = (b'experimental', b'single-head-per-branch')
2562 singlehead = repo.ui.configbool(*singleheadopt)
2562 singlehead = repo.ui.configbool(*singleheadopt)
2563 if singlehead:
2563 if singlehead:
2564 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2564 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2565 accountclosed = singleheadsub.get(
2565 accountclosed = singleheadsub.get(
2566 b"account-closed-heads", False
2566 b"account-closed-heads", False
2567 )
2567 )
2568 if singleheadsub.get(b"public-changes-only", False):
2568 if singleheadsub.get(b"public-changes-only", False):
2569 filtername = b"immutable"
2569 filtername = b"immutable"
2570 else:
2570 else:
2571 filtername = b"visible"
2571 filtername = b"visible"
2572 scmutil.enforcesinglehead(
2572 scmutil.enforcesinglehead(
2573 repo, tr2, desc, accountclosed, filtername
2573 repo, tr2, desc, accountclosed, filtername
2574 )
2574 )
2575 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2575 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2576 for name, (old, new) in sorted(
2576 for name, (old, new) in sorted(
2577 tr.changes[b'bookmarks'].items()
2577 tr.changes[b'bookmarks'].items()
2578 ):
2578 ):
2579 args = tr.hookargs.copy()
2579 args = tr.hookargs.copy()
2580 args.update(bookmarks.preparehookargs(name, old, new))
2580 args.update(bookmarks.preparehookargs(name, old, new))
2581 repo.hook(
2581 repo.hook(
2582 b'pretxnclose-bookmark',
2582 b'pretxnclose-bookmark',
2583 throw=True,
2583 throw=True,
2584 **pycompat.strkwargs(args),
2584 **pycompat.strkwargs(args),
2585 )
2585 )
2586 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2586 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2587 cl = repo.unfiltered().changelog
2587 cl = repo.unfiltered().changelog
2588 for revs, (old, new) in tr.changes[b'phases']:
2588 for revs, (old, new) in tr.changes[b'phases']:
2589 for rev in revs:
2589 for rev in revs:
2590 args = tr.hookargs.copy()
2590 args = tr.hookargs.copy()
2591 node = hex(cl.node(rev))
2591 node = hex(cl.node(rev))
2592 args.update(phases.preparehookargs(node, old, new))
2592 args.update(phases.preparehookargs(node, old, new))
2593 repo.hook(
2593 repo.hook(
2594 b'pretxnclose-phase',
2594 b'pretxnclose-phase',
2595 throw=True,
2595 throw=True,
2596 **pycompat.strkwargs(args),
2596 **pycompat.strkwargs(args),
2597 )
2597 )
2598
2598
2599 repo.hook(
2599 repo.hook(
2600 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2600 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2601 )
2601 )
2602
2602
2603 def releasefn(tr, success):
2603 def releasefn(tr, success):
2604 repo = reporef()
2604 repo = reporef()
2605 if repo is None:
2605 if repo is None:
2606 # If the repo has been GC'd (and this release function is being
2606 # If the repo has been GC'd (and this release function is being
2607 # called from transaction.__del__), there's not much we can do,
2607 # called from transaction.__del__), there's not much we can do,
2608 # so just leave the unfinished transaction there and let the
2608 # so just leave the unfinished transaction there and let the
2609 # user run `hg recover`.
2609 # user run `hg recover`.
2610 return
2610 return
2611 if success:
2611 if success:
2612 # this should be explicitly invoked here, because
2612 # this should be explicitly invoked here, because
2613 # in-memory changes aren't written out at closing
2613 # in-memory changes aren't written out at closing
2614 # transaction, if tr.addfilegenerator (via
2614 # transaction, if tr.addfilegenerator (via
2615 # dirstate.write or so) isn't invoked while
2615 # dirstate.write or so) isn't invoked while
2616 # transaction running
2616 # transaction running
2617 repo.dirstate.write(None)
2617 repo.dirstate.write(None)
2618 else:
2618 else:
2619 # discard all changes (including ones already written
2619 # discard all changes (including ones already written
2620 # out) in this transaction
2620 # out) in this transaction
2621 repo.invalidate(clearfilecache=True)
2621 repo.invalidate(clearfilecache=True)
2622
2622
2623 tr = transaction.transaction(
2623 tr = transaction.transaction(
2624 rp,
2624 rp,
2625 self.svfs,
2625 self.svfs,
2626 vfsmap,
2626 vfsmap,
2627 b"journal",
2627 b"journal",
2628 b"undo",
2628 b"undo",
2629 lambda: None,
2629 lambda: None,
2630 self.store.createmode,
2630 self.store.createmode,
2631 validator=validate,
2631 validator=validate,
2632 releasefn=releasefn,
2632 releasefn=releasefn,
2633 checkambigfiles=_cachedfiles,
2633 checkambigfiles=_cachedfiles,
2634 name=desc,
2634 name=desc,
2635 )
2635 )
2636 for vfs_id, path in self._journalfiles():
2636 for vfs_id, path in self._journalfiles():
2637 tr.add_journal(vfs_id, path)
2637 tr.add_journal(vfs_id, path)
2638 tr.changes[b'origrepolen'] = len(self)
2638 tr.changes[b'origrepolen'] = len(self)
2639 tr.changes[b'obsmarkers'] = set()
2639 tr.changes[b'obsmarkers'] = set()
2640 tr.changes[b'phases'] = []
2640 tr.changes[b'phases'] = []
2641 tr.changes[b'bookmarks'] = {}
2641 tr.changes[b'bookmarks'] = {}
2642
2642
2643 tr.hookargs[b'txnid'] = txnid
2643 tr.hookargs[b'txnid'] = txnid
2644 tr.hookargs[b'txnname'] = desc
2644 tr.hookargs[b'txnname'] = desc
2645 tr.hookargs[b'changes'] = tr.changes
2645 tr.hookargs[b'changes'] = tr.changes
2646 # note: writing the fncache only during finalize mean that the file is
2646 # note: writing the fncache only during finalize mean that the file is
2647 # outdated when running hooks. As fncache is used for streaming clone,
2647 # outdated when running hooks. As fncache is used for streaming clone,
2648 # this is not expected to break anything that happen during the hooks.
2648 # this is not expected to break anything that happen during the hooks.
2649 tr.addfinalize(b'flush-fncache', self.store.write)
2649 tr.addfinalize(b'flush-fncache', self.store.write)
2650
2650
2651 def txnclosehook(tr2):
2651 def txnclosehook(tr2):
2652 """To be run if transaction is successful, will schedule a hook run"""
2652 """To be run if transaction is successful, will schedule a hook run"""
2653 # Don't reference tr2 in hook() so we don't hold a reference.
2653 # Don't reference tr2 in hook() so we don't hold a reference.
2654 # This reduces memory consumption when there are multiple
2654 # This reduces memory consumption when there are multiple
2655 # transactions per lock. This can likely go away if issue5045
2655 # transactions per lock. This can likely go away if issue5045
2656 # fixes the function accumulation.
2656 # fixes the function accumulation.
2657 hookargs = tr2.hookargs
2657 hookargs = tr2.hookargs
2658
2658
2659 def hookfunc(unused_success):
2659 def hookfunc(unused_success):
2660 repo = reporef()
2660 repo = reporef()
2661 assert repo is not None # help pytype
2661 assert repo is not None # help pytype
2662
2662
2663 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2663 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2664 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2664 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2665 for name, (old, new) in bmchanges:
2665 for name, (old, new) in bmchanges:
2666 args = tr.hookargs.copy()
2666 args = tr.hookargs.copy()
2667 args.update(bookmarks.preparehookargs(name, old, new))
2667 args.update(bookmarks.preparehookargs(name, old, new))
2668 repo.hook(
2668 repo.hook(
2669 b'txnclose-bookmark',
2669 b'txnclose-bookmark',
2670 throw=False,
2670 throw=False,
2671 **pycompat.strkwargs(args),
2671 **pycompat.strkwargs(args),
2672 )
2672 )
2673
2673
2674 if hook.hashook(repo.ui, b'txnclose-phase'):
2674 if hook.hashook(repo.ui, b'txnclose-phase'):
2675 cl = repo.unfiltered().changelog
2675 cl = repo.unfiltered().changelog
2676 phasemv = sorted(
2676 phasemv = sorted(
2677 tr.changes[b'phases'], key=lambda r: r[0][0]
2677 tr.changes[b'phases'], key=lambda r: r[0][0]
2678 )
2678 )
2679 for revs, (old, new) in phasemv:
2679 for revs, (old, new) in phasemv:
2680 for rev in revs:
2680 for rev in revs:
2681 args = tr.hookargs.copy()
2681 args = tr.hookargs.copy()
2682 node = hex(cl.node(rev))
2682 node = hex(cl.node(rev))
2683 args.update(phases.preparehookargs(node, old, new))
2683 args.update(phases.preparehookargs(node, old, new))
2684 repo.hook(
2684 repo.hook(
2685 b'txnclose-phase',
2685 b'txnclose-phase',
2686 throw=False,
2686 throw=False,
2687 **pycompat.strkwargs(args),
2687 **pycompat.strkwargs(args),
2688 )
2688 )
2689
2689
2690 repo.hook(
2690 repo.hook(
2691 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2691 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2692 )
2692 )
2693
2693
2694 repo = reporef()
2694 repo = reporef()
2695 assert repo is not None # help pytype
2695 assert repo is not None # help pytype
2696 repo._afterlock(hookfunc)
2696 repo._afterlock(hookfunc)
2697
2697
2698 tr.addfinalize(b'txnclose-hook', txnclosehook)
2698 tr.addfinalize(b'txnclose-hook', txnclosehook)
2699 # Include a leading "-" to make it happen before the transaction summary
2699 # Include a leading "-" to make it happen before the transaction summary
2700 # reports registered via scmutil.registersummarycallback() whose names
2700 # reports registered via scmutil.registersummarycallback() whose names
2701 # are 00-txnreport etc. That way, the caches will be warm when the
2701 # are 00-txnreport etc. That way, the caches will be warm when the
2702 # callbacks run.
2702 # callbacks run.
2703 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2703 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2704
2704
2705 def txnaborthook(tr2):
2705 def txnaborthook(tr2):
2706 """To be run if transaction is aborted"""
2706 """To be run if transaction is aborted"""
2707 repo = reporef()
2707 repo = reporef()
2708 assert repo is not None # help pytype
2708 assert repo is not None # help pytype
2709 repo.hook(
2709 repo.hook(
2710 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2710 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2711 )
2711 )
2712
2712
2713 tr.addabort(b'txnabort-hook', txnaborthook)
2713 tr.addabort(b'txnabort-hook', txnaborthook)
2714 # avoid eager cache invalidation. in-memory data should be identical
2714 # avoid eager cache invalidation. in-memory data should be identical
2715 # to stored data if transaction has no error.
2715 # to stored data if transaction has no error.
2716 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2716 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2717 self._transref = weakref.ref(tr)
2717 self._transref = weakref.ref(tr)
2718 scmutil.registersummarycallback(self, tr, desc)
2718 scmutil.registersummarycallback(self, tr, desc)
2719 # This only exist to deal with the need of rollback to have viable
2719 # This only exist to deal with the need of rollback to have viable
2720 # parents at the end of the operation. So backup viable parents at the
2720 # parents at the end of the operation. So backup viable parents at the
2721 # time of this operation.
2721 # time of this operation.
2722 #
2722 #
2723 # We only do it when the `wlock` is taken, otherwise other might be
2723 # We only do it when the `wlock` is taken, otherwise other might be
2724 # altering the dirstate under us.
2724 # altering the dirstate under us.
2725 #
2725 #
2726 # This is really not a great way to do this (first, because we cannot
2726 # This is really not a great way to do this (first, because we cannot
2727 # always do it). There are more viable alternative that exists
2727 # always do it). There are more viable alternative that exists
2728 #
2728 #
2729 # - backing only the working copy parent in a dedicated files and doing
2729 # - backing only the working copy parent in a dedicated files and doing
2730 # a clean "keep-update" to them on `hg rollback`.
2730 # a clean "keep-update" to them on `hg rollback`.
2731 #
2731 #
2732 # - slightly changing the behavior an applying a logic similar to "hg
2732 # - slightly changing the behavior an applying a logic similar to "hg
2733 # strip" to pick a working copy destination on `hg rollback`
2733 # strip" to pick a working copy destination on `hg rollback`
2734 if self.currentwlock() is not None:
2734 if self.currentwlock() is not None:
2735 ds = self.dirstate
2735 ds = self.dirstate
2736 if not self.vfs.exists(b'branch'):
2736 if not self.vfs.exists(b'branch'):
2737 # force a file to be written if None exist
2737 # force a file to be written if None exist
2738 ds.setbranch(b'default', None)
2738 ds.setbranch(b'default', None)
2739
2739
2740 def backup_dirstate(tr):
2740 def backup_dirstate(tr):
2741 for f in ds.all_file_names():
2741 for f in ds.all_file_names():
2742 # hardlink backup is okay because `dirstate` is always
2742 # hardlink backup is okay because `dirstate` is always
2743 # atomically written and possible data file are append only
2743 # atomically written and possible data file are append only
2744 # and resistant to trailing data.
2744 # and resistant to trailing data.
2745 tr.addbackup(f, hardlink=True, location=b'plain')
2745 tr.addbackup(f, hardlink=True, location=b'plain')
2746
2746
2747 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2747 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2748 return tr
2748 return tr
2749
2749
2750 def _journalfiles(self):
2750 def _journalfiles(self):
2751 return (
2751 return (
2752 (self.svfs, b'journal'),
2752 (self.svfs, b'journal'),
2753 (self.vfs, b'journal.desc'),
2753 (self.vfs, b'journal.desc'),
2754 )
2754 )
2755
2755
2756 def undofiles(self):
2756 def undofiles(self):
2757 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2757 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2758
2758
2759 @unfilteredmethod
2759 @unfilteredmethod
2760 def _writejournal(self, desc):
2760 def _writejournal(self, desc):
2761 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2761 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2762
2762
2763 def recover(self):
2763 def recover(self):
2764 with self.lock():
2764 with self.lock():
2765 if self.svfs.exists(b"journal"):
2765 if self.svfs.exists(b"journal"):
2766 self.ui.status(_(b"rolling back interrupted transaction\n"))
2766 self.ui.status(_(b"rolling back interrupted transaction\n"))
2767 vfsmap = self.vfs_map
2767 vfsmap = self.vfs_map
2768 transaction.rollback(
2768 transaction.rollback(
2769 self.svfs,
2769 self.svfs,
2770 vfsmap,
2770 vfsmap,
2771 b"journal",
2771 b"journal",
2772 self.ui.warn,
2772 self.ui.warn,
2773 checkambigfiles=_cachedfiles,
2773 checkambigfiles=_cachedfiles,
2774 )
2774 )
2775 self.invalidate()
2775 self.invalidate()
2776 return True
2776 return True
2777 else:
2777 else:
2778 self.ui.warn(_(b"no interrupted transaction available\n"))
2778 self.ui.warn(_(b"no interrupted transaction available\n"))
2779 return False
2779 return False
2780
2780
2781 def rollback(self, dryrun=False, force=False):
2781 def rollback(self, dryrun=False, force=False):
2782 wlock = lock = None
2782 wlock = lock = None
2783 try:
2783 try:
2784 wlock = self.wlock()
2784 wlock = self.wlock()
2785 lock = self.lock()
2785 lock = self.lock()
2786 if self.svfs.exists(b"undo"):
2786 if self.svfs.exists(b"undo"):
2787 return self._rollback(dryrun, force)
2787 return self._rollback(dryrun, force)
2788 else:
2788 else:
2789 self.ui.warn(_(b"no rollback information available\n"))
2789 self.ui.warn(_(b"no rollback information available\n"))
2790 return 1
2790 return 1
2791 finally:
2791 finally:
2792 release(lock, wlock)
2792 release(lock, wlock)
2793
2793
2794 @unfilteredmethod # Until we get smarter cache management
2794 @unfilteredmethod # Until we get smarter cache management
2795 def _rollback(self, dryrun, force):
2795 def _rollback(self, dryrun, force):
2796 ui = self.ui
2796 ui = self.ui
2797
2797
2798 parents = self.dirstate.parents()
2798 parents = self.dirstate.parents()
2799 try:
2799 try:
2800 args = self.vfs.read(b'undo.desc').splitlines()
2800 args = self.vfs.read(b'undo.desc').splitlines()
2801 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2801 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2802 if len(args) >= 3:
2802 if len(args) >= 3:
2803 detail = args[2]
2803 detail = args[2]
2804 oldtip = oldlen - 1
2804 oldtip = oldlen - 1
2805
2805
2806 if detail and ui.verbose:
2806 if detail and ui.verbose:
2807 msg = _(
2807 msg = _(
2808 b'repository tip rolled back to revision %d'
2808 b'repository tip rolled back to revision %d'
2809 b' (undo %s: %s)\n'
2809 b' (undo %s: %s)\n'
2810 ) % (oldtip, desc, detail)
2810 ) % (oldtip, desc, detail)
2811 else:
2811 else:
2812 msg = _(
2812 msg = _(
2813 b'repository tip rolled back to revision %d (undo %s)\n'
2813 b'repository tip rolled back to revision %d (undo %s)\n'
2814 ) % (oldtip, desc)
2814 ) % (oldtip, desc)
2815 parentgone = any(self[p].rev() > oldtip for p in parents)
2815 parentgone = any(self[p].rev() > oldtip for p in parents)
2816 except IOError:
2816 except IOError:
2817 msg = _(b'rolling back unknown transaction\n')
2817 msg = _(b'rolling back unknown transaction\n')
2818 desc = None
2818 desc = None
2819 parentgone = True
2819 parentgone = True
2820
2820
2821 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2821 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2822 raise error.Abort(
2822 raise error.Abort(
2823 _(
2823 _(
2824 b'rollback of last commit while not checked out '
2824 b'rollback of last commit while not checked out '
2825 b'may lose data'
2825 b'may lose data'
2826 ),
2826 ),
2827 hint=_(b'use -f to force'),
2827 hint=_(b'use -f to force'),
2828 )
2828 )
2829
2829
2830 ui.status(msg)
2830 ui.status(msg)
2831 if dryrun:
2831 if dryrun:
2832 return 0
2832 return 0
2833
2833
2834 self.destroying()
2834 self.destroying()
2835 vfsmap = self.vfs_map
2835 vfsmap = self.vfs_map
2836 skip_journal_pattern = None
2836 skip_journal_pattern = None
2837 if not parentgone:
2837 if not parentgone:
2838 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2838 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2839 transaction.rollback(
2839 transaction.rollback(
2840 self.svfs,
2840 self.svfs,
2841 vfsmap,
2841 vfsmap,
2842 b'undo',
2842 b'undo',
2843 ui.warn,
2843 ui.warn,
2844 checkambigfiles=_cachedfiles,
2844 checkambigfiles=_cachedfiles,
2845 skip_journal_pattern=skip_journal_pattern,
2845 skip_journal_pattern=skip_journal_pattern,
2846 )
2846 )
2847 self.invalidate()
2847 self.invalidate()
2848 self.dirstate.invalidate()
2848 self.dirstate.invalidate()
2849
2849
2850 if parentgone:
2850 if parentgone:
2851 # replace this with some explicit parent update in the future.
2851 # replace this with some explicit parent update in the future.
2852 has_node = self.changelog.index.has_node
2852 has_node = self.changelog.index.has_node
2853 if not all(has_node(p) for p in self.dirstate._pl):
2853 if not all(has_node(p) for p in self.dirstate._pl):
2854 # There was no dirstate to backup initially, we need to drop
2854 # There was no dirstate to backup initially, we need to drop
2855 # the existing one.
2855 # the existing one.
2856 with self.dirstate.changing_parents(self):
2856 with self.dirstate.changing_parents(self):
2857 self.dirstate.setparents(self.nullid)
2857 self.dirstate.setparents(self.nullid)
2858 self.dirstate.clear()
2858 self.dirstate.clear()
2859
2859
2860 parents = tuple([p.rev() for p in self[None].parents()])
2860 parents = tuple([p.rev() for p in self[None].parents()])
2861 if len(parents) > 1:
2861 if len(parents) > 1:
2862 ui.status(
2862 ui.status(
2863 _(
2863 _(
2864 b'working directory now based on '
2864 b'working directory now based on '
2865 b'revisions %d and %d\n'
2865 b'revisions %d and %d\n'
2866 )
2866 )
2867 % parents
2867 % parents
2868 )
2868 )
2869 else:
2869 else:
2870 ui.status(
2870 ui.status(
2871 _(b'working directory now based on revision %d\n') % parents
2871 _(b'working directory now based on revision %d\n') % parents
2872 )
2872 )
2873 mergestatemod.mergestate.clean(self)
2873 mergestatemod.mergestate.clean(self)
2874
2874
2875 # TODO: if we know which new heads may result from this rollback, pass
2875 # TODO: if we know which new heads may result from this rollback, pass
2876 # them to destroy(), which will prevent the branchhead cache from being
2876 # them to destroy(), which will prevent the branchhead cache from being
2877 # invalidated.
2877 # invalidated.
2878 self.destroyed()
2878 self.destroyed()
2879 return 0
2879 return 0
2880
2880
2881 def _buildcacheupdater(self, newtransaction):
2881 def _buildcacheupdater(self, newtransaction):
2882 """called during transaction to build the callback updating cache
2882 """called during transaction to build the callback updating cache
2883
2883
2884 Lives on the repository to help extension who might want to augment
2884 Lives on the repository to help extension who might want to augment
2885 this logic. For this purpose, the created transaction is passed to the
2885 this logic. For this purpose, the created transaction is passed to the
2886 method.
2886 method.
2887 """
2887 """
2888 # we must avoid cyclic reference between repo and transaction.
2888 # we must avoid cyclic reference between repo and transaction.
2889 reporef = weakref.ref(self)
2889 reporef = weakref.ref(self)
2890
2890
2891 def updater(tr):
2891 def updater(tr):
2892 repo = reporef()
2892 repo = reporef()
2893 assert repo is not None # help pytype
2893 assert repo is not None # help pytype
2894 repo.updatecaches(tr)
2894 repo.updatecaches(tr)
2895
2895
2896 return updater
2896 return updater
2897
2897
2898 @unfilteredmethod
2898 @unfilteredmethod
2899 def updatecaches(self, tr=None, full=False, caches=None):
2899 def updatecaches(self, tr=None, full=False, caches=None):
2900 """warm appropriate caches
2900 """warm appropriate caches
2901
2901
2902 If this function is called after a transaction closed. The transaction
2902 If this function is called after a transaction closed. The transaction
2903 will be available in the 'tr' argument. This can be used to selectively
2903 will be available in the 'tr' argument. This can be used to selectively
2904 update caches relevant to the changes in that transaction.
2904 update caches relevant to the changes in that transaction.
2905
2905
2906 If 'full' is set, make sure all caches the function knows about have
2906 If 'full' is set, make sure all caches the function knows about have
2907 up-to-date data. Even the ones usually loaded more lazily.
2907 up-to-date data. Even the ones usually loaded more lazily.
2908
2908
2909 The `full` argument can take a special "post-clone" value. In this case
2909 The `full` argument can take a special "post-clone" value. In this case
2910 the cache warming is made after a clone and of the slower cache might
2910 the cache warming is made after a clone and of the slower cache might
2911 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2911 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2912 as we plan for a cleaner way to deal with this for 5.9.
2912 as we plan for a cleaner way to deal with this for 5.9.
2913 """
2913 """
2914 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2914 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2915 # During strip, many caches are invalid but
2915 # During strip, many caches are invalid but
2916 # later call to `destroyed` will refresh them.
2916 # later call to `destroyed` will refresh them.
2917 return
2917 return
2918
2918
2919 unfi = self.unfiltered()
2919 unfi = self.unfiltered()
2920
2920
2921 if caches is None:
2921 if caches is None:
2922 caches = repository.CACHES_DEFAULT
2922 caches = repository.CACHES_DEFAULT
2923
2923
2924 if repository.CACHE_BRANCHMAP_SERVED in caches:
2924 if repository.CACHE_BRANCHMAP_SERVED in caches:
2925 if tr is None or tr.changes[b'origrepolen'] < len(self):
2925 if tr is None or tr.changes[b'origrepolen'] < len(self):
2926 # accessing the 'served' branchmap should refresh all the others,
2926 # accessing the 'served' branchmap should refresh all the others,
2927 self.ui.debug(b'updating the branch cache\n')
2927 self.ui.debug(b'updating the branch cache\n')
2928 self.filtered(b'served').branchmap()
2928 self.filtered(b'served').branchmap()
2929 self.filtered(b'served.hidden').branchmap()
2929 self.filtered(b'served.hidden').branchmap()
2930
2930
2931 if repository.CACHE_CHANGELOG_CACHE in caches:
2931 if repository.CACHE_CHANGELOG_CACHE in caches:
2932 self.changelog.update_caches(transaction=tr)
2932 self.changelog.update_caches(transaction=tr)
2933
2933
2934 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2934 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2935 self.manifestlog.update_caches(transaction=tr)
2935 self.manifestlog.update_caches(transaction=tr)
2936 for entry in self.store.walk():
2936 for entry in self.store.walk():
2937 if not entry.is_revlog:
2937 if not entry.is_revlog:
2938 continue
2938 continue
2939 if not entry.is_manifestlog:
2939 if not entry.is_manifestlog:
2940 continue
2940 continue
2941 manifestrevlog = entry.get_revlog_instance(self).get_revlog()
2941 manifestrevlog = entry.get_revlog_instance(self).get_revlog()
2942 if manifestrevlog is not None:
2942 if manifestrevlog is not None:
2943 manifestrevlog.update_caches(transaction=tr)
2943 manifestrevlog.update_caches(transaction=tr)
2944
2944
2945 if repository.CACHE_REV_BRANCH in caches:
2945 if repository.CACHE_REV_BRANCH in caches:
2946 rbc = unfi.revbranchcache()
2946 rbc = unfi.revbranchcache()
2947 for r in unfi.changelog:
2947 for r in unfi.changelog:
2948 rbc.branchinfo(r)
2948 rbc.branchinfo(r)
2949 rbc.write()
2949 rbc.write()
2950
2950
2951 if repository.CACHE_FULL_MANIFEST in caches:
2951 if repository.CACHE_FULL_MANIFEST in caches:
2952 # ensure the working copy parents are in the manifestfulltextcache
2952 # ensure the working copy parents are in the manifestfulltextcache
2953 for ctx in self[b'.'].parents():
2953 for ctx in self[b'.'].parents():
2954 ctx.manifest() # accessing the manifest is enough
2954 ctx.manifest() # accessing the manifest is enough
2955
2955
2956 if repository.CACHE_FILE_NODE_TAGS in caches:
2956 if repository.CACHE_FILE_NODE_TAGS in caches:
2957 # accessing fnode cache warms the cache
2957 # accessing fnode cache warms the cache
2958 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2958 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2959
2959
2960 if repository.CACHE_TAGS_DEFAULT in caches:
2960 if repository.CACHE_TAGS_DEFAULT in caches:
2961 # accessing tags warm the cache
2961 # accessing tags warm the cache
2962 self.tags()
2962 self.tags()
2963 if repository.CACHE_TAGS_SERVED in caches:
2963 if repository.CACHE_TAGS_SERVED in caches:
2964 self.filtered(b'served').tags()
2964 self.filtered(b'served').tags()
2965
2965
2966 if repository.CACHE_BRANCHMAP_ALL in caches:
2966 if repository.CACHE_BRANCHMAP_ALL in caches:
2967 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2967 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2968 # so we're forcing a write to cause these caches to be warmed up
2968 # so we're forcing a write to cause these caches to be warmed up
2969 # even if they haven't explicitly been requested yet (if they've
2969 # even if they haven't explicitly been requested yet (if they've
2970 # never been used by hg, they won't ever have been written, even if
2970 # never been used by hg, they won't ever have been written, even if
2971 # they're a subset of another kind of cache that *has* been used).
2971 # they're a subset of another kind of cache that *has* been used).
2972 for filt in repoview.filtertable.keys():
2972 for filt in repoview.filtertable.keys():
2973 filtered = self.filtered(filt)
2973 filtered = self.filtered(filt)
2974 self._branchcaches.update_disk(filtered)
2974 self._branchcaches.update_disk(filtered)
2975
2975
2976 # flush all possibly delayed write.
2976 # flush all possibly delayed write.
2977 self._branchcaches.write_delayed(self)
2977 self._branchcaches.write_dirty(self)
2978
2978
2979 def invalidatecaches(self):
2979 def invalidatecaches(self):
2980 if '_tagscache' in vars(self):
2980 if '_tagscache' in vars(self):
2981 # can't use delattr on proxy
2981 # can't use delattr on proxy
2982 del self.__dict__['_tagscache']
2982 del self.__dict__['_tagscache']
2983
2983
2984 self._branchcaches.clear()
2984 self._branchcaches.clear()
2985 self.invalidatevolatilesets()
2985 self.invalidatevolatilesets()
2986 self._sparsesignaturecache.clear()
2986 self._sparsesignaturecache.clear()
2987
2987
2988 def invalidatevolatilesets(self):
2988 def invalidatevolatilesets(self):
2989 self.filteredrevcache.clear()
2989 self.filteredrevcache.clear()
2990 obsolete.clearobscaches(self)
2990 obsolete.clearobscaches(self)
2991 self._quick_access_changeid_invalidate()
2991 self._quick_access_changeid_invalidate()
2992
2992
2993 def invalidatedirstate(self):
2993 def invalidatedirstate(self):
2994 """Invalidates the dirstate, causing the next call to dirstate
2994 """Invalidates the dirstate, causing the next call to dirstate
2995 to check if it was modified since the last time it was read,
2995 to check if it was modified since the last time it was read,
2996 rereading it if it has.
2996 rereading it if it has.
2997
2997
2998 This is different to dirstate.invalidate() that it doesn't always
2998 This is different to dirstate.invalidate() that it doesn't always
2999 rereads the dirstate. Use dirstate.invalidate() if you want to
2999 rereads the dirstate. Use dirstate.invalidate() if you want to
3000 explicitly read the dirstate again (i.e. restoring it to a previous
3000 explicitly read the dirstate again (i.e. restoring it to a previous
3001 known good state)."""
3001 known good state)."""
3002 unfi = self.unfiltered()
3002 unfi = self.unfiltered()
3003 if 'dirstate' in unfi.__dict__:
3003 if 'dirstate' in unfi.__dict__:
3004 assert not self.dirstate.is_changing_any
3004 assert not self.dirstate.is_changing_any
3005 del unfi.__dict__['dirstate']
3005 del unfi.__dict__['dirstate']
3006
3006
3007 def invalidate(self, clearfilecache=False):
3007 def invalidate(self, clearfilecache=False):
3008 """Invalidates both store and non-store parts other than dirstate
3008 """Invalidates both store and non-store parts other than dirstate
3009
3009
3010 If a transaction is running, invalidation of store is omitted,
3010 If a transaction is running, invalidation of store is omitted,
3011 because discarding in-memory changes might cause inconsistency
3011 because discarding in-memory changes might cause inconsistency
3012 (e.g. incomplete fncache causes unintentional failure, but
3012 (e.g. incomplete fncache causes unintentional failure, but
3013 redundant one doesn't).
3013 redundant one doesn't).
3014 """
3014 """
3015 unfiltered = self.unfiltered() # all file caches are stored unfiltered
3015 unfiltered = self.unfiltered() # all file caches are stored unfiltered
3016 for k in list(self._filecache.keys()):
3016 for k in list(self._filecache.keys()):
3017 if (
3017 if (
3018 k == b'changelog'
3018 k == b'changelog'
3019 and self.currenttransaction()
3019 and self.currenttransaction()
3020 and self.changelog.is_delaying
3020 and self.changelog.is_delaying
3021 ):
3021 ):
3022 # The changelog object may store unwritten revisions. We don't
3022 # The changelog object may store unwritten revisions. We don't
3023 # want to lose them.
3023 # want to lose them.
3024 # TODO: Solve the problem instead of working around it.
3024 # TODO: Solve the problem instead of working around it.
3025 continue
3025 continue
3026
3026
3027 if clearfilecache:
3027 if clearfilecache:
3028 del self._filecache[k]
3028 del self._filecache[k]
3029 try:
3029 try:
3030 # XXX ideally, the key would be a unicode string to match the
3030 # XXX ideally, the key would be a unicode string to match the
3031 # fact it refers to an attribut name. However changing this was
3031 # fact it refers to an attribut name. However changing this was
3032 # a bit a scope creep compared to the series cleaning up
3032 # a bit a scope creep compared to the series cleaning up
3033 # del/set/getattr so we kept thing simple here.
3033 # del/set/getattr so we kept thing simple here.
3034 delattr(unfiltered, pycompat.sysstr(k))
3034 delattr(unfiltered, pycompat.sysstr(k))
3035 except AttributeError:
3035 except AttributeError:
3036 pass
3036 pass
3037 self.invalidatecaches()
3037 self.invalidatecaches()
3038 if not self.currenttransaction():
3038 if not self.currenttransaction():
3039 # TODO: Changing contents of store outside transaction
3039 # TODO: Changing contents of store outside transaction
3040 # causes inconsistency. We should make in-memory store
3040 # causes inconsistency. We should make in-memory store
3041 # changes detectable, and abort if changed.
3041 # changes detectable, and abort if changed.
3042 self.store.invalidatecaches()
3042 self.store.invalidatecaches()
3043
3043
3044 def invalidateall(self):
3044 def invalidateall(self):
3045 """Fully invalidates both store and non-store parts, causing the
3045 """Fully invalidates both store and non-store parts, causing the
3046 subsequent operation to reread any outside changes."""
3046 subsequent operation to reread any outside changes."""
3047 # extension should hook this to invalidate its caches
3047 # extension should hook this to invalidate its caches
3048 self.invalidate()
3048 self.invalidate()
3049 self.invalidatedirstate()
3049 self.invalidatedirstate()
3050
3050
3051 @unfilteredmethod
3051 @unfilteredmethod
3052 def _refreshfilecachestats(self, tr):
3052 def _refreshfilecachestats(self, tr):
3053 """Reload stats of cached files so that they are flagged as valid"""
3053 """Reload stats of cached files so that they are flagged as valid"""
3054 for k, ce in self._filecache.items():
3054 for k, ce in self._filecache.items():
3055 k = pycompat.sysstr(k)
3055 k = pycompat.sysstr(k)
3056 if k == 'dirstate' or k not in self.__dict__:
3056 if k == 'dirstate' or k not in self.__dict__:
3057 continue
3057 continue
3058 ce.refresh()
3058 ce.refresh()
3059
3059
3060 def _lock(
3060 def _lock(
3061 self,
3061 self,
3062 vfs,
3062 vfs,
3063 lockname,
3063 lockname,
3064 wait,
3064 wait,
3065 releasefn,
3065 releasefn,
3066 acquirefn,
3066 acquirefn,
3067 desc,
3067 desc,
3068 ):
3068 ):
3069 timeout = 0
3069 timeout = 0
3070 warntimeout = 0
3070 warntimeout = 0
3071 if wait:
3071 if wait:
3072 timeout = self.ui.configint(b"ui", b"timeout")
3072 timeout = self.ui.configint(b"ui", b"timeout")
3073 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3073 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3074 # internal config: ui.signal-safe-lock
3074 # internal config: ui.signal-safe-lock
3075 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3075 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3076
3076
3077 l = lockmod.trylock(
3077 l = lockmod.trylock(
3078 self.ui,
3078 self.ui,
3079 vfs,
3079 vfs,
3080 lockname,
3080 lockname,
3081 timeout,
3081 timeout,
3082 warntimeout,
3082 warntimeout,
3083 releasefn=releasefn,
3083 releasefn=releasefn,
3084 acquirefn=acquirefn,
3084 acquirefn=acquirefn,
3085 desc=desc,
3085 desc=desc,
3086 signalsafe=signalsafe,
3086 signalsafe=signalsafe,
3087 )
3087 )
3088 return l
3088 return l
3089
3089
3090 def _afterlock(self, callback):
3090 def _afterlock(self, callback):
3091 """add a callback to be run when the repository is fully unlocked
3091 """add a callback to be run when the repository is fully unlocked
3092
3092
3093 The callback will be executed when the outermost lock is released
3093 The callback will be executed when the outermost lock is released
3094 (with wlock being higher level than 'lock')."""
3094 (with wlock being higher level than 'lock')."""
3095 for ref in (self._wlockref, self._lockref):
3095 for ref in (self._wlockref, self._lockref):
3096 l = ref and ref()
3096 l = ref and ref()
3097 if l and l.held:
3097 if l and l.held:
3098 l.postrelease.append(callback)
3098 l.postrelease.append(callback)
3099 break
3099 break
3100 else: # no lock have been found.
3100 else: # no lock have been found.
3101 callback(True)
3101 callback(True)
3102
3102
3103 def lock(self, wait=True):
3103 def lock(self, wait=True):
3104 """Lock the repository store (.hg/store) and return a weak reference
3104 """Lock the repository store (.hg/store) and return a weak reference
3105 to the lock. Use this before modifying the store (e.g. committing or
3105 to the lock. Use this before modifying the store (e.g. committing or
3106 stripping). If you are opening a transaction, get a lock as well.)
3106 stripping). If you are opening a transaction, get a lock as well.)
3107
3107
3108 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3108 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3109 'wlock' first to avoid a dead-lock hazard."""
3109 'wlock' first to avoid a dead-lock hazard."""
3110 l = self._currentlock(self._lockref)
3110 l = self._currentlock(self._lockref)
3111 if l is not None:
3111 if l is not None:
3112 l.lock()
3112 l.lock()
3113 return l
3113 return l
3114
3114
3115 l = self._lock(
3115 l = self._lock(
3116 vfs=self.svfs,
3116 vfs=self.svfs,
3117 lockname=b"lock",
3117 lockname=b"lock",
3118 wait=wait,
3118 wait=wait,
3119 releasefn=None,
3119 releasefn=None,
3120 acquirefn=self.invalidate,
3120 acquirefn=self.invalidate,
3121 desc=_(b'repository %s') % self.origroot,
3121 desc=_(b'repository %s') % self.origroot,
3122 )
3122 )
3123 self._lockref = weakref.ref(l)
3123 self._lockref = weakref.ref(l)
3124 return l
3124 return l
3125
3125
3126 def wlock(self, wait=True):
3126 def wlock(self, wait=True):
3127 """Lock the non-store parts of the repository (everything under
3127 """Lock the non-store parts of the repository (everything under
3128 .hg except .hg/store) and return a weak reference to the lock.
3128 .hg except .hg/store) and return a weak reference to the lock.
3129
3129
3130 Use this before modifying files in .hg.
3130 Use this before modifying files in .hg.
3131
3131
3132 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3132 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3133 'wlock' first to avoid a dead-lock hazard."""
3133 'wlock' first to avoid a dead-lock hazard."""
3134 l = self._wlockref() if self._wlockref else None
3134 l = self._wlockref() if self._wlockref else None
3135 if l is not None and l.held:
3135 if l is not None and l.held:
3136 l.lock()
3136 l.lock()
3137 return l
3137 return l
3138
3138
3139 # We do not need to check for non-waiting lock acquisition. Such
3139 # We do not need to check for non-waiting lock acquisition. Such
3140 # acquisition would not cause dead-lock as they would just fail.
3140 # acquisition would not cause dead-lock as they would just fail.
3141 if wait and (
3141 if wait and (
3142 self.ui.configbool(b'devel', b'all-warnings')
3142 self.ui.configbool(b'devel', b'all-warnings')
3143 or self.ui.configbool(b'devel', b'check-locks')
3143 or self.ui.configbool(b'devel', b'check-locks')
3144 ):
3144 ):
3145 if self._currentlock(self._lockref) is not None:
3145 if self._currentlock(self._lockref) is not None:
3146 self.ui.develwarn(b'"wlock" acquired after "lock"')
3146 self.ui.develwarn(b'"wlock" acquired after "lock"')
3147
3147
3148 def unlock():
3148 def unlock():
3149 if self.dirstate.is_changing_any:
3149 if self.dirstate.is_changing_any:
3150 msg = b"wlock release in the middle of a changing parents"
3150 msg = b"wlock release in the middle of a changing parents"
3151 self.ui.develwarn(msg)
3151 self.ui.develwarn(msg)
3152 self.dirstate.invalidate()
3152 self.dirstate.invalidate()
3153 else:
3153 else:
3154 if self.dirstate._dirty:
3154 if self.dirstate._dirty:
3155 msg = b"dirty dirstate on wlock release"
3155 msg = b"dirty dirstate on wlock release"
3156 self.ui.develwarn(msg)
3156 self.ui.develwarn(msg)
3157 self.dirstate.write(None)
3157 self.dirstate.write(None)
3158
3158
3159 unfi = self.unfiltered()
3159 unfi = self.unfiltered()
3160 if 'dirstate' in unfi.__dict__:
3160 if 'dirstate' in unfi.__dict__:
3161 del unfi.__dict__['dirstate']
3161 del unfi.__dict__['dirstate']
3162
3162
3163 l = self._lock(
3163 l = self._lock(
3164 self.vfs,
3164 self.vfs,
3165 b"wlock",
3165 b"wlock",
3166 wait,
3166 wait,
3167 unlock,
3167 unlock,
3168 self.invalidatedirstate,
3168 self.invalidatedirstate,
3169 _(b'working directory of %s') % self.origroot,
3169 _(b'working directory of %s') % self.origroot,
3170 )
3170 )
3171 self._wlockref = weakref.ref(l)
3171 self._wlockref = weakref.ref(l)
3172 return l
3172 return l
3173
3173
3174 def _currentlock(self, lockref):
3174 def _currentlock(self, lockref):
3175 """Returns the lock if it's held, or None if it's not."""
3175 """Returns the lock if it's held, or None if it's not."""
3176 if lockref is None:
3176 if lockref is None:
3177 return None
3177 return None
3178 l = lockref()
3178 l = lockref()
3179 if l is None or not l.held:
3179 if l is None or not l.held:
3180 return None
3180 return None
3181 return l
3181 return l
3182
3182
3183 def currentwlock(self):
3183 def currentwlock(self):
3184 """Returns the wlock if it's held, or None if it's not."""
3184 """Returns the wlock if it's held, or None if it's not."""
3185 return self._currentlock(self._wlockref)
3185 return self._currentlock(self._wlockref)
3186
3186
3187 def currentlock(self):
3187 def currentlock(self):
3188 """Returns the lock if it's held, or None if it's not."""
3188 """Returns the lock if it's held, or None if it's not."""
3189 return self._currentlock(self._lockref)
3189 return self._currentlock(self._lockref)
3190
3190
3191 def checkcommitpatterns(self, wctx, match, status, fail):
3191 def checkcommitpatterns(self, wctx, match, status, fail):
3192 """check for commit arguments that aren't committable"""
3192 """check for commit arguments that aren't committable"""
3193 if match.isexact() or match.prefix():
3193 if match.isexact() or match.prefix():
3194 matched = set(status.modified + status.added + status.removed)
3194 matched = set(status.modified + status.added + status.removed)
3195
3195
3196 for f in match.files():
3196 for f in match.files():
3197 f = self.dirstate.normalize(f)
3197 f = self.dirstate.normalize(f)
3198 if f == b'.' or f in matched or f in wctx.substate:
3198 if f == b'.' or f in matched or f in wctx.substate:
3199 continue
3199 continue
3200 if f in status.deleted:
3200 if f in status.deleted:
3201 fail(f, _(b'file not found!'))
3201 fail(f, _(b'file not found!'))
3202 # Is it a directory that exists or used to exist?
3202 # Is it a directory that exists or used to exist?
3203 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3203 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3204 d = f + b'/'
3204 d = f + b'/'
3205 for mf in matched:
3205 for mf in matched:
3206 if mf.startswith(d):
3206 if mf.startswith(d):
3207 break
3207 break
3208 else:
3208 else:
3209 fail(f, _(b"no match under directory!"))
3209 fail(f, _(b"no match under directory!"))
3210 elif f not in self.dirstate:
3210 elif f not in self.dirstate:
3211 fail(f, _(b"file not tracked!"))
3211 fail(f, _(b"file not tracked!"))
3212
3212
3213 @unfilteredmethod
3213 @unfilteredmethod
3214 def commit(
3214 def commit(
3215 self,
3215 self,
3216 text=b"",
3216 text=b"",
3217 user=None,
3217 user=None,
3218 date=None,
3218 date=None,
3219 match=None,
3219 match=None,
3220 force=False,
3220 force=False,
3221 editor=None,
3221 editor=None,
3222 extra=None,
3222 extra=None,
3223 ):
3223 ):
3224 """Add a new revision to current repository.
3224 """Add a new revision to current repository.
3225
3225
3226 Revision information is gathered from the working directory,
3226 Revision information is gathered from the working directory,
3227 match can be used to filter the committed files. If editor is
3227 match can be used to filter the committed files. If editor is
3228 supplied, it is called to get a commit message.
3228 supplied, it is called to get a commit message.
3229 """
3229 """
3230 if extra is None:
3230 if extra is None:
3231 extra = {}
3231 extra = {}
3232
3232
3233 def fail(f, msg):
3233 def fail(f, msg):
3234 raise error.InputError(b'%s: %s' % (f, msg))
3234 raise error.InputError(b'%s: %s' % (f, msg))
3235
3235
3236 if not match:
3236 if not match:
3237 match = matchmod.always()
3237 match = matchmod.always()
3238
3238
3239 if not force:
3239 if not force:
3240 match.bad = fail
3240 match.bad = fail
3241
3241
3242 # lock() for recent changelog (see issue4368)
3242 # lock() for recent changelog (see issue4368)
3243 with self.wlock(), self.lock():
3243 with self.wlock(), self.lock():
3244 wctx = self[None]
3244 wctx = self[None]
3245 merge = len(wctx.parents()) > 1
3245 merge = len(wctx.parents()) > 1
3246
3246
3247 if not force and merge and not match.always():
3247 if not force and merge and not match.always():
3248 raise error.Abort(
3248 raise error.Abort(
3249 _(
3249 _(
3250 b'cannot partially commit a merge '
3250 b'cannot partially commit a merge '
3251 b'(do not specify files or patterns)'
3251 b'(do not specify files or patterns)'
3252 )
3252 )
3253 )
3253 )
3254
3254
3255 status = self.status(match=match, clean=force)
3255 status = self.status(match=match, clean=force)
3256 if force:
3256 if force:
3257 status.modified.extend(
3257 status.modified.extend(
3258 status.clean
3258 status.clean
3259 ) # mq may commit clean files
3259 ) # mq may commit clean files
3260
3260
3261 # check subrepos
3261 # check subrepos
3262 subs, commitsubs, newstate = subrepoutil.precommit(
3262 subs, commitsubs, newstate = subrepoutil.precommit(
3263 self.ui, wctx, status, match, force=force
3263 self.ui, wctx, status, match, force=force
3264 )
3264 )
3265
3265
3266 # make sure all explicit patterns are matched
3266 # make sure all explicit patterns are matched
3267 if not force:
3267 if not force:
3268 self.checkcommitpatterns(wctx, match, status, fail)
3268 self.checkcommitpatterns(wctx, match, status, fail)
3269
3269
3270 cctx = context.workingcommitctx(
3270 cctx = context.workingcommitctx(
3271 self, status, text, user, date, extra
3271 self, status, text, user, date, extra
3272 )
3272 )
3273
3273
3274 ms = mergestatemod.mergestate.read(self)
3274 ms = mergestatemod.mergestate.read(self)
3275 mergeutil.checkunresolved(ms)
3275 mergeutil.checkunresolved(ms)
3276
3276
3277 # internal config: ui.allowemptycommit
3277 # internal config: ui.allowemptycommit
3278 if cctx.isempty() and not self.ui.configbool(
3278 if cctx.isempty() and not self.ui.configbool(
3279 b'ui', b'allowemptycommit'
3279 b'ui', b'allowemptycommit'
3280 ):
3280 ):
3281 self.ui.debug(b'nothing to commit, clearing merge state\n')
3281 self.ui.debug(b'nothing to commit, clearing merge state\n')
3282 ms.reset()
3282 ms.reset()
3283 return None
3283 return None
3284
3284
3285 if merge and cctx.deleted():
3285 if merge and cctx.deleted():
3286 raise error.Abort(_(b"cannot commit merge with missing files"))
3286 raise error.Abort(_(b"cannot commit merge with missing files"))
3287
3287
3288 if editor:
3288 if editor:
3289 cctx._text = editor(self, cctx, subs)
3289 cctx._text = editor(self, cctx, subs)
3290 edited = text != cctx._text
3290 edited = text != cctx._text
3291
3291
3292 # Save commit message in case this transaction gets rolled back
3292 # Save commit message in case this transaction gets rolled back
3293 # (e.g. by a pretxncommit hook). Leave the content alone on
3293 # (e.g. by a pretxncommit hook). Leave the content alone on
3294 # the assumption that the user will use the same editor again.
3294 # the assumption that the user will use the same editor again.
3295 msg_path = self.savecommitmessage(cctx._text)
3295 msg_path = self.savecommitmessage(cctx._text)
3296
3296
3297 # commit subs and write new state
3297 # commit subs and write new state
3298 if subs:
3298 if subs:
3299 uipathfn = scmutil.getuipathfn(self)
3299 uipathfn = scmutil.getuipathfn(self)
3300 for s in sorted(commitsubs):
3300 for s in sorted(commitsubs):
3301 sub = wctx.sub(s)
3301 sub = wctx.sub(s)
3302 self.ui.status(
3302 self.ui.status(
3303 _(b'committing subrepository %s\n')
3303 _(b'committing subrepository %s\n')
3304 % uipathfn(subrepoutil.subrelpath(sub))
3304 % uipathfn(subrepoutil.subrelpath(sub))
3305 )
3305 )
3306 sr = sub.commit(cctx._text, user, date)
3306 sr = sub.commit(cctx._text, user, date)
3307 newstate[s] = (newstate[s][0], sr)
3307 newstate[s] = (newstate[s][0], sr)
3308 subrepoutil.writestate(self, newstate)
3308 subrepoutil.writestate(self, newstate)
3309
3309
3310 p1, p2 = self.dirstate.parents()
3310 p1, p2 = self.dirstate.parents()
3311 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3311 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3312 try:
3312 try:
3313 self.hook(
3313 self.hook(
3314 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3314 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3315 )
3315 )
3316 with self.transaction(b'commit'):
3316 with self.transaction(b'commit'):
3317 ret = self.commitctx(cctx, True)
3317 ret = self.commitctx(cctx, True)
3318 # update bookmarks, dirstate and mergestate
3318 # update bookmarks, dirstate and mergestate
3319 bookmarks.update(self, [p1, p2], ret)
3319 bookmarks.update(self, [p1, p2], ret)
3320 cctx.markcommitted(ret)
3320 cctx.markcommitted(ret)
3321 ms.reset()
3321 ms.reset()
3322 except: # re-raises
3322 except: # re-raises
3323 if edited:
3323 if edited:
3324 self.ui.write(
3324 self.ui.write(
3325 _(b'note: commit message saved in %s\n') % msg_path
3325 _(b'note: commit message saved in %s\n') % msg_path
3326 )
3326 )
3327 self.ui.write(
3327 self.ui.write(
3328 _(
3328 _(
3329 b"note: use 'hg commit --logfile "
3329 b"note: use 'hg commit --logfile "
3330 b"%s --edit' to reuse it\n"
3330 b"%s --edit' to reuse it\n"
3331 )
3331 )
3332 % msg_path
3332 % msg_path
3333 )
3333 )
3334 raise
3334 raise
3335
3335
3336 def commithook(unused_success):
3336 def commithook(unused_success):
3337 # hack for command that use a temporary commit (eg: histedit)
3337 # hack for command that use a temporary commit (eg: histedit)
3338 # temporary commit got stripped before hook release
3338 # temporary commit got stripped before hook release
3339 if self.changelog.hasnode(ret):
3339 if self.changelog.hasnode(ret):
3340 self.hook(
3340 self.hook(
3341 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3341 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3342 )
3342 )
3343
3343
3344 self._afterlock(commithook)
3344 self._afterlock(commithook)
3345 return ret
3345 return ret
3346
3346
3347 @unfilteredmethod
3347 @unfilteredmethod
3348 def commitctx(self, ctx, error=False, origctx=None):
3348 def commitctx(self, ctx, error=False, origctx=None):
3349 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3349 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3350
3350
3351 @unfilteredmethod
3351 @unfilteredmethod
3352 def destroying(self):
3352 def destroying(self):
3353 """Inform the repository that nodes are about to be destroyed.
3353 """Inform the repository that nodes are about to be destroyed.
3354 Intended for use by strip and rollback, so there's a common
3354 Intended for use by strip and rollback, so there's a common
3355 place for anything that has to be done before destroying history.
3355 place for anything that has to be done before destroying history.
3356
3356
3357 This is mostly useful for saving state that is in memory and waiting
3357 This is mostly useful for saving state that is in memory and waiting
3358 to be flushed when the current lock is released. Because a call to
3358 to be flushed when the current lock is released. Because a call to
3359 destroyed is imminent, the repo will be invalidated causing those
3359 destroyed is imminent, the repo will be invalidated causing those
3360 changes to stay in memory (waiting for the next unlock), or vanish
3360 changes to stay in memory (waiting for the next unlock), or vanish
3361 completely.
3361 completely.
3362 """
3362 """
3363 # When using the same lock to commit and strip, the phasecache is left
3363 # When using the same lock to commit and strip, the phasecache is left
3364 # dirty after committing. Then when we strip, the repo is invalidated,
3364 # dirty after committing. Then when we strip, the repo is invalidated,
3365 # causing those changes to disappear.
3365 # causing those changes to disappear.
3366 if '_phasecache' in vars(self):
3366 if '_phasecache' in vars(self):
3367 self._phasecache.write(self)
3367 self._phasecache.write(self)
3368
3368
3369 @unfilteredmethod
3369 @unfilteredmethod
3370 def destroyed(self):
3370 def destroyed(self):
3371 """Inform the repository that nodes have been destroyed.
3371 """Inform the repository that nodes have been destroyed.
3372 Intended for use by strip and rollback, so there's a common
3372 Intended for use by strip and rollback, so there's a common
3373 place for anything that has to be done after destroying history.
3373 place for anything that has to be done after destroying history.
3374 """
3374 """
3375 # refresh all repository caches
3375 # refresh all repository caches
3376 self.updatecaches()
3376 self.updatecaches()
3377
3377
3378 # Ensure the persistent tag cache is updated. Doing it now
3378 # Ensure the persistent tag cache is updated. Doing it now
3379 # means that the tag cache only has to worry about destroyed
3379 # means that the tag cache only has to worry about destroyed
3380 # heads immediately after a strip/rollback. That in turn
3380 # heads immediately after a strip/rollback. That in turn
3381 # guarantees that "cachetip == currenttip" (comparing both rev
3381 # guarantees that "cachetip == currenttip" (comparing both rev
3382 # and node) always means no nodes have been added or destroyed.
3382 # and node) always means no nodes have been added or destroyed.
3383
3383
3384 # XXX this is suboptimal when qrefresh'ing: we strip the current
3384 # XXX this is suboptimal when qrefresh'ing: we strip the current
3385 # head, refresh the tag cache, then immediately add a new head.
3385 # head, refresh the tag cache, then immediately add a new head.
3386 # But I think doing it this way is necessary for the "instant
3386 # But I think doing it this way is necessary for the "instant
3387 # tag cache retrieval" case to work.
3387 # tag cache retrieval" case to work.
3388 self.invalidate()
3388 self.invalidate()
3389
3389
3390 def status(
3390 def status(
3391 self,
3391 self,
3392 node1=b'.',
3392 node1=b'.',
3393 node2=None,
3393 node2=None,
3394 match=None,
3394 match=None,
3395 ignored=False,
3395 ignored=False,
3396 clean=False,
3396 clean=False,
3397 unknown=False,
3397 unknown=False,
3398 listsubrepos=False,
3398 listsubrepos=False,
3399 ):
3399 ):
3400 '''a convenience method that calls node1.status(node2)'''
3400 '''a convenience method that calls node1.status(node2)'''
3401 return self[node1].status(
3401 return self[node1].status(
3402 node2, match, ignored, clean, unknown, listsubrepos
3402 node2, match, ignored, clean, unknown, listsubrepos
3403 )
3403 )
3404
3404
3405 def addpostdsstatus(self, ps):
3405 def addpostdsstatus(self, ps):
3406 """Add a callback to run within the wlock, at the point at which status
3406 """Add a callback to run within the wlock, at the point at which status
3407 fixups happen.
3407 fixups happen.
3408
3408
3409 On status completion, callback(wctx, status) will be called with the
3409 On status completion, callback(wctx, status) will be called with the
3410 wlock held, unless the dirstate has changed from underneath or the wlock
3410 wlock held, unless the dirstate has changed from underneath or the wlock
3411 couldn't be grabbed.
3411 couldn't be grabbed.
3412
3412
3413 Callbacks should not capture and use a cached copy of the dirstate --
3413 Callbacks should not capture and use a cached copy of the dirstate --
3414 it might change in the meanwhile. Instead, they should access the
3414 it might change in the meanwhile. Instead, they should access the
3415 dirstate via wctx.repo().dirstate.
3415 dirstate via wctx.repo().dirstate.
3416
3416
3417 This list is emptied out after each status run -- extensions should
3417 This list is emptied out after each status run -- extensions should
3418 make sure it adds to this list each time dirstate.status is called.
3418 make sure it adds to this list each time dirstate.status is called.
3419 Extensions should also make sure they don't call this for statuses
3419 Extensions should also make sure they don't call this for statuses
3420 that don't involve the dirstate.
3420 that don't involve the dirstate.
3421 """
3421 """
3422
3422
3423 # The list is located here for uniqueness reasons -- it is actually
3423 # The list is located here for uniqueness reasons -- it is actually
3424 # managed by the workingctx, but that isn't unique per-repo.
3424 # managed by the workingctx, but that isn't unique per-repo.
3425 self._postdsstatus.append(ps)
3425 self._postdsstatus.append(ps)
3426
3426
3427 def postdsstatus(self):
3427 def postdsstatus(self):
3428 """Used by workingctx to get the list of post-dirstate-status hooks."""
3428 """Used by workingctx to get the list of post-dirstate-status hooks."""
3429 return self._postdsstatus
3429 return self._postdsstatus
3430
3430
3431 def clearpostdsstatus(self):
3431 def clearpostdsstatus(self):
3432 """Used by workingctx to clear post-dirstate-status hooks."""
3432 """Used by workingctx to clear post-dirstate-status hooks."""
3433 del self._postdsstatus[:]
3433 del self._postdsstatus[:]
3434
3434
3435 def heads(self, start=None):
3435 def heads(self, start=None):
3436 if start is None:
3436 if start is None:
3437 cl = self.changelog
3437 cl = self.changelog
3438 headrevs = reversed(cl.headrevs())
3438 headrevs = reversed(cl.headrevs())
3439 return [cl.node(rev) for rev in headrevs]
3439 return [cl.node(rev) for rev in headrevs]
3440
3440
3441 heads = self.changelog.heads(start)
3441 heads = self.changelog.heads(start)
3442 # sort the output in rev descending order
3442 # sort the output in rev descending order
3443 return sorted(heads, key=self.changelog.rev, reverse=True)
3443 return sorted(heads, key=self.changelog.rev, reverse=True)
3444
3444
3445 def branchheads(self, branch=None, start=None, closed=False):
3445 def branchheads(self, branch=None, start=None, closed=False):
3446 """return a (possibly filtered) list of heads for the given branch
3446 """return a (possibly filtered) list of heads for the given branch
3447
3447
3448 Heads are returned in topological order, from newest to oldest.
3448 Heads are returned in topological order, from newest to oldest.
3449 If branch is None, use the dirstate branch.
3449 If branch is None, use the dirstate branch.
3450 If start is not None, return only heads reachable from start.
3450 If start is not None, return only heads reachable from start.
3451 If closed is True, return heads that are marked as closed as well.
3451 If closed is True, return heads that are marked as closed as well.
3452 """
3452 """
3453 if branch is None:
3453 if branch is None:
3454 branch = self[None].branch()
3454 branch = self[None].branch()
3455 branches = self.branchmap()
3455 branches = self.branchmap()
3456 if not branches.hasbranch(branch):
3456 if not branches.hasbranch(branch):
3457 return []
3457 return []
3458 # the cache returns heads ordered lowest to highest
3458 # the cache returns heads ordered lowest to highest
3459 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3459 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3460 if start is not None:
3460 if start is not None:
3461 # filter out the heads that cannot be reached from startrev
3461 # filter out the heads that cannot be reached from startrev
3462 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3462 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3463 bheads = [h for h in bheads if h in fbheads]
3463 bheads = [h for h in bheads if h in fbheads]
3464 return bheads
3464 return bheads
3465
3465
3466 def branches(self, nodes):
3466 def branches(self, nodes):
3467 if not nodes:
3467 if not nodes:
3468 nodes = [self.changelog.tip()]
3468 nodes = [self.changelog.tip()]
3469 b = []
3469 b = []
3470 for n in nodes:
3470 for n in nodes:
3471 t = n
3471 t = n
3472 while True:
3472 while True:
3473 p = self.changelog.parents(n)
3473 p = self.changelog.parents(n)
3474 if p[1] != self.nullid or p[0] == self.nullid:
3474 if p[1] != self.nullid or p[0] == self.nullid:
3475 b.append((t, n, p[0], p[1]))
3475 b.append((t, n, p[0], p[1]))
3476 break
3476 break
3477 n = p[0]
3477 n = p[0]
3478 return b
3478 return b
3479
3479
3480 def between(self, pairs):
3480 def between(self, pairs):
3481 r = []
3481 r = []
3482
3482
3483 for top, bottom in pairs:
3483 for top, bottom in pairs:
3484 n, l, i = top, [], 0
3484 n, l, i = top, [], 0
3485 f = 1
3485 f = 1
3486
3486
3487 while n != bottom and n != self.nullid:
3487 while n != bottom and n != self.nullid:
3488 p = self.changelog.parents(n)[0]
3488 p = self.changelog.parents(n)[0]
3489 if i == f:
3489 if i == f:
3490 l.append(n)
3490 l.append(n)
3491 f = f * 2
3491 f = f * 2
3492 n = p
3492 n = p
3493 i += 1
3493 i += 1
3494
3494
3495 r.append(l)
3495 r.append(l)
3496
3496
3497 return r
3497 return r
3498
3498
3499 def checkpush(self, pushop):
3499 def checkpush(self, pushop):
3500 """Extensions can override this function if additional checks have
3500 """Extensions can override this function if additional checks have
3501 to be performed before pushing, or call it if they override push
3501 to be performed before pushing, or call it if they override push
3502 command.
3502 command.
3503 """
3503 """
3504
3504
3505 @unfilteredpropertycache
3505 @unfilteredpropertycache
3506 def prepushoutgoinghooks(self):
3506 def prepushoutgoinghooks(self):
3507 """Return util.hooks consists of a pushop with repo, remote, outgoing
3507 """Return util.hooks consists of a pushop with repo, remote, outgoing
3508 methods, which are called before pushing changesets.
3508 methods, which are called before pushing changesets.
3509 """
3509 """
3510 return util.hooks()
3510 return util.hooks()
3511
3511
3512 def pushkey(self, namespace, key, old, new):
3512 def pushkey(self, namespace, key, old, new):
3513 try:
3513 try:
3514 tr = self.currenttransaction()
3514 tr = self.currenttransaction()
3515 hookargs = {}
3515 hookargs = {}
3516 if tr is not None:
3516 if tr is not None:
3517 hookargs.update(tr.hookargs)
3517 hookargs.update(tr.hookargs)
3518 hookargs = pycompat.strkwargs(hookargs)
3518 hookargs = pycompat.strkwargs(hookargs)
3519 hookargs['namespace'] = namespace
3519 hookargs['namespace'] = namespace
3520 hookargs['key'] = key
3520 hookargs['key'] = key
3521 hookargs['old'] = old
3521 hookargs['old'] = old
3522 hookargs['new'] = new
3522 hookargs['new'] = new
3523 self.hook(b'prepushkey', throw=True, **hookargs)
3523 self.hook(b'prepushkey', throw=True, **hookargs)
3524 except error.HookAbort as exc:
3524 except error.HookAbort as exc:
3525 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3525 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3526 if exc.hint:
3526 if exc.hint:
3527 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3527 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3528 return False
3528 return False
3529 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3529 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3530 ret = pushkey.push(self, namespace, key, old, new)
3530 ret = pushkey.push(self, namespace, key, old, new)
3531
3531
3532 def runhook(unused_success):
3532 def runhook(unused_success):
3533 self.hook(
3533 self.hook(
3534 b'pushkey',
3534 b'pushkey',
3535 namespace=namespace,
3535 namespace=namespace,
3536 key=key,
3536 key=key,
3537 old=old,
3537 old=old,
3538 new=new,
3538 new=new,
3539 ret=ret,
3539 ret=ret,
3540 )
3540 )
3541
3541
3542 self._afterlock(runhook)
3542 self._afterlock(runhook)
3543 return ret
3543 return ret
3544
3544
3545 def listkeys(self, namespace):
3545 def listkeys(self, namespace):
3546 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3546 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3547 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3547 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3548 values = pushkey.list(self, namespace)
3548 values = pushkey.list(self, namespace)
3549 self.hook(b'listkeys', namespace=namespace, values=values)
3549 self.hook(b'listkeys', namespace=namespace, values=values)
3550 return values
3550 return values
3551
3551
3552 def debugwireargs(self, one, two, three=None, four=None, five=None):
3552 def debugwireargs(self, one, two, three=None, four=None, five=None):
3553 '''used to test argument passing over the wire'''
3553 '''used to test argument passing over the wire'''
3554 return b"%s %s %s %s %s" % (
3554 return b"%s %s %s %s %s" % (
3555 one,
3555 one,
3556 two,
3556 two,
3557 pycompat.bytestr(three),
3557 pycompat.bytestr(three),
3558 pycompat.bytestr(four),
3558 pycompat.bytestr(four),
3559 pycompat.bytestr(five),
3559 pycompat.bytestr(five),
3560 )
3560 )
3561
3561
3562 def savecommitmessage(self, text):
3562 def savecommitmessage(self, text):
3563 fp = self.vfs(b'last-message.txt', b'wb')
3563 fp = self.vfs(b'last-message.txt', b'wb')
3564 try:
3564 try:
3565 fp.write(text)
3565 fp.write(text)
3566 finally:
3566 finally:
3567 fp.close()
3567 fp.close()
3568 return self.pathto(fp.name[len(self.root) + 1 :])
3568 return self.pathto(fp.name[len(self.root) + 1 :])
3569
3569
3570 def register_wanted_sidedata(self, category):
3570 def register_wanted_sidedata(self, category):
3571 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3571 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3572 # Only revlogv2 repos can want sidedata.
3572 # Only revlogv2 repos can want sidedata.
3573 return
3573 return
3574 self._wanted_sidedata.add(pycompat.bytestr(category))
3574 self._wanted_sidedata.add(pycompat.bytestr(category))
3575
3575
3576 def register_sidedata_computer(
3576 def register_sidedata_computer(
3577 self, kind, category, keys, computer, flags, replace=False
3577 self, kind, category, keys, computer, flags, replace=False
3578 ):
3578 ):
3579 if kind not in revlogconst.ALL_KINDS:
3579 if kind not in revlogconst.ALL_KINDS:
3580 msg = _(b"unexpected revlog kind '%s'.")
3580 msg = _(b"unexpected revlog kind '%s'.")
3581 raise error.ProgrammingError(msg % kind)
3581 raise error.ProgrammingError(msg % kind)
3582 category = pycompat.bytestr(category)
3582 category = pycompat.bytestr(category)
3583 already_registered = category in self._sidedata_computers.get(kind, [])
3583 already_registered = category in self._sidedata_computers.get(kind, [])
3584 if already_registered and not replace:
3584 if already_registered and not replace:
3585 msg = _(
3585 msg = _(
3586 b"cannot register a sidedata computer twice for category '%s'."
3586 b"cannot register a sidedata computer twice for category '%s'."
3587 )
3587 )
3588 raise error.ProgrammingError(msg % category)
3588 raise error.ProgrammingError(msg % category)
3589 if replace and not already_registered:
3589 if replace and not already_registered:
3590 msg = _(
3590 msg = _(
3591 b"cannot replace a sidedata computer that isn't registered "
3591 b"cannot replace a sidedata computer that isn't registered "
3592 b"for category '%s'."
3592 b"for category '%s'."
3593 )
3593 )
3594 raise error.ProgrammingError(msg % category)
3594 raise error.ProgrammingError(msg % category)
3595 self._sidedata_computers.setdefault(kind, {})
3595 self._sidedata_computers.setdefault(kind, {})
3596 self._sidedata_computers[kind][category] = (keys, computer, flags)
3596 self._sidedata_computers[kind][category] = (keys, computer, flags)
3597
3597
3598
3598
3599 def undoname(fn: bytes) -> bytes:
3599 def undoname(fn: bytes) -> bytes:
3600 base, name = os.path.split(fn)
3600 base, name = os.path.split(fn)
3601 assert name.startswith(b'journal')
3601 assert name.startswith(b'journal')
3602 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3602 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3603
3603
3604
3604
3605 def instance(ui, path: bytes, create, intents=None, createopts=None):
3605 def instance(ui, path: bytes, create, intents=None, createopts=None):
3606 # prevent cyclic import localrepo -> upgrade -> localrepo
3606 # prevent cyclic import localrepo -> upgrade -> localrepo
3607 from . import upgrade
3607 from . import upgrade
3608
3608
3609 localpath = urlutil.urllocalpath(path)
3609 localpath = urlutil.urllocalpath(path)
3610 if create:
3610 if create:
3611 createrepository(ui, localpath, createopts=createopts)
3611 createrepository(ui, localpath, createopts=createopts)
3612
3612
3613 def repo_maker():
3613 def repo_maker():
3614 return makelocalrepository(ui, localpath, intents=intents)
3614 return makelocalrepository(ui, localpath, intents=intents)
3615
3615
3616 repo = repo_maker()
3616 repo = repo_maker()
3617 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3617 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3618 return repo
3618 return repo
3619
3619
3620
3620
3621 def islocal(path: bytes) -> bool:
3621 def islocal(path: bytes) -> bool:
3622 return True
3622 return True
3623
3623
3624
3624
3625 def defaultcreateopts(ui, createopts=None):
3625 def defaultcreateopts(ui, createopts=None):
3626 """Populate the default creation options for a repository.
3626 """Populate the default creation options for a repository.
3627
3627
3628 A dictionary of explicitly requested creation options can be passed
3628 A dictionary of explicitly requested creation options can be passed
3629 in. Missing keys will be populated.
3629 in. Missing keys will be populated.
3630 """
3630 """
3631 createopts = dict(createopts or {})
3631 createopts = dict(createopts or {})
3632
3632
3633 if b'backend' not in createopts:
3633 if b'backend' not in createopts:
3634 # experimental config: storage.new-repo-backend
3634 # experimental config: storage.new-repo-backend
3635 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3635 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3636
3636
3637 return createopts
3637 return createopts
3638
3638
3639
3639
3640 def clone_requirements(ui, createopts, srcrepo):
3640 def clone_requirements(ui, createopts, srcrepo):
3641 """clone the requirements of a local repo for a local clone
3641 """clone the requirements of a local repo for a local clone
3642
3642
3643 The store requirements are unchanged while the working copy requirements
3643 The store requirements are unchanged while the working copy requirements
3644 depends on the configuration
3644 depends on the configuration
3645 """
3645 """
3646 target_requirements = set()
3646 target_requirements = set()
3647 if not srcrepo.requirements:
3647 if not srcrepo.requirements:
3648 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3648 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3649 # with it.
3649 # with it.
3650 return target_requirements
3650 return target_requirements
3651 createopts = defaultcreateopts(ui, createopts=createopts)
3651 createopts = defaultcreateopts(ui, createopts=createopts)
3652 for r in newreporequirements(ui, createopts):
3652 for r in newreporequirements(ui, createopts):
3653 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3653 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3654 target_requirements.add(r)
3654 target_requirements.add(r)
3655
3655
3656 for r in srcrepo.requirements:
3656 for r in srcrepo.requirements:
3657 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3657 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3658 target_requirements.add(r)
3658 target_requirements.add(r)
3659 return target_requirements
3659 return target_requirements
3660
3660
3661
3661
3662 def newreporequirements(ui, createopts):
3662 def newreporequirements(ui, createopts):
3663 """Determine the set of requirements for a new local repository.
3663 """Determine the set of requirements for a new local repository.
3664
3664
3665 Extensions can wrap this function to specify custom requirements for
3665 Extensions can wrap this function to specify custom requirements for
3666 new repositories.
3666 new repositories.
3667 """
3667 """
3668
3668
3669 if b'backend' not in createopts:
3669 if b'backend' not in createopts:
3670 raise error.ProgrammingError(
3670 raise error.ProgrammingError(
3671 b'backend key not present in createopts; '
3671 b'backend key not present in createopts; '
3672 b'was defaultcreateopts() called?'
3672 b'was defaultcreateopts() called?'
3673 )
3673 )
3674
3674
3675 if createopts[b'backend'] != b'revlogv1':
3675 if createopts[b'backend'] != b'revlogv1':
3676 raise error.Abort(
3676 raise error.Abort(
3677 _(
3677 _(
3678 b'unable to determine repository requirements for '
3678 b'unable to determine repository requirements for '
3679 b'storage backend: %s'
3679 b'storage backend: %s'
3680 )
3680 )
3681 % createopts[b'backend']
3681 % createopts[b'backend']
3682 )
3682 )
3683
3683
3684 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3684 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3685 if ui.configbool(b'format', b'usestore'):
3685 if ui.configbool(b'format', b'usestore'):
3686 requirements.add(requirementsmod.STORE_REQUIREMENT)
3686 requirements.add(requirementsmod.STORE_REQUIREMENT)
3687 if ui.configbool(b'format', b'usefncache'):
3687 if ui.configbool(b'format', b'usefncache'):
3688 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3688 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3689 if ui.configbool(b'format', b'dotencode'):
3689 if ui.configbool(b'format', b'dotencode'):
3690 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3690 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3691
3691
3692 compengines = ui.configlist(b'format', b'revlog-compression')
3692 compengines = ui.configlist(b'format', b'revlog-compression')
3693 for compengine in compengines:
3693 for compengine in compengines:
3694 if compengine in util.compengines:
3694 if compengine in util.compengines:
3695 engine = util.compengines[compengine]
3695 engine = util.compengines[compengine]
3696 if engine.available() and engine.revlogheader():
3696 if engine.available() and engine.revlogheader():
3697 break
3697 break
3698 else:
3698 else:
3699 raise error.Abort(
3699 raise error.Abort(
3700 _(
3700 _(
3701 b'compression engines %s defined by '
3701 b'compression engines %s defined by '
3702 b'format.revlog-compression not available'
3702 b'format.revlog-compression not available'
3703 )
3703 )
3704 % b', '.join(b'"%s"' % e for e in compengines),
3704 % b', '.join(b'"%s"' % e for e in compengines),
3705 hint=_(
3705 hint=_(
3706 b'run "hg debuginstall" to list available '
3706 b'run "hg debuginstall" to list available '
3707 b'compression engines'
3707 b'compression engines'
3708 ),
3708 ),
3709 )
3709 )
3710
3710
3711 # zlib is the historical default and doesn't need an explicit requirement.
3711 # zlib is the historical default and doesn't need an explicit requirement.
3712 if compengine == b'zstd':
3712 if compengine == b'zstd':
3713 requirements.add(b'revlog-compression-zstd')
3713 requirements.add(b'revlog-compression-zstd')
3714 elif compengine != b'zlib':
3714 elif compengine != b'zlib':
3715 requirements.add(b'exp-compression-%s' % compengine)
3715 requirements.add(b'exp-compression-%s' % compengine)
3716
3716
3717 if scmutil.gdinitconfig(ui):
3717 if scmutil.gdinitconfig(ui):
3718 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3718 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3719 if ui.configbool(b'format', b'sparse-revlog'):
3719 if ui.configbool(b'format', b'sparse-revlog'):
3720 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3720 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3721
3721
3722 # experimental config: format.use-dirstate-v2
3722 # experimental config: format.use-dirstate-v2
3723 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3723 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3724 if ui.configbool(b'format', b'use-dirstate-v2'):
3724 if ui.configbool(b'format', b'use-dirstate-v2'):
3725 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3725 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3726
3726
3727 # experimental config: format.exp-use-copies-side-data-changeset
3727 # experimental config: format.exp-use-copies-side-data-changeset
3728 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3728 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3729 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3729 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3730 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3730 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3731 if ui.configbool(b'experimental', b'treemanifest'):
3731 if ui.configbool(b'experimental', b'treemanifest'):
3732 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3732 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3733
3733
3734 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3734 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3735 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3735 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3736 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3736 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3737
3737
3738 revlogv2 = ui.config(b'experimental', b'revlogv2')
3738 revlogv2 = ui.config(b'experimental', b'revlogv2')
3739 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3739 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3740 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3740 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3741 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3741 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3742 # experimental config: format.internal-phase
3742 # experimental config: format.internal-phase
3743 if ui.configbool(b'format', b'use-internal-phase'):
3743 if ui.configbool(b'format', b'use-internal-phase'):
3744 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3744 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3745
3745
3746 # experimental config: format.exp-archived-phase
3746 # experimental config: format.exp-archived-phase
3747 if ui.configbool(b'format', b'exp-archived-phase'):
3747 if ui.configbool(b'format', b'exp-archived-phase'):
3748 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3748 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3749
3749
3750 if createopts.get(b'narrowfiles'):
3750 if createopts.get(b'narrowfiles'):
3751 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3751 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3752
3752
3753 if createopts.get(b'lfs'):
3753 if createopts.get(b'lfs'):
3754 requirements.add(b'lfs')
3754 requirements.add(b'lfs')
3755
3755
3756 if ui.configbool(b'format', b'bookmarks-in-store'):
3756 if ui.configbool(b'format', b'bookmarks-in-store'):
3757 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3757 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3758
3758
3759 # The feature is disabled unless a fast implementation is available.
3759 # The feature is disabled unless a fast implementation is available.
3760 persistent_nodemap_default = policy.importrust('revlog') is not None
3760 persistent_nodemap_default = policy.importrust('revlog') is not None
3761 if ui.configbool(
3761 if ui.configbool(
3762 b'format', b'use-persistent-nodemap', persistent_nodemap_default
3762 b'format', b'use-persistent-nodemap', persistent_nodemap_default
3763 ):
3763 ):
3764 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3764 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3765
3765
3766 # if share-safe is enabled, let's create the new repository with the new
3766 # if share-safe is enabled, let's create the new repository with the new
3767 # requirement
3767 # requirement
3768 if ui.configbool(b'format', b'use-share-safe'):
3768 if ui.configbool(b'format', b'use-share-safe'):
3769 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3769 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3770
3770
3771 # if we are creating a share-repoΒΉ we have to handle requirement
3771 # if we are creating a share-repoΒΉ we have to handle requirement
3772 # differently.
3772 # differently.
3773 #
3773 #
3774 # [1] (i.e. reusing the store from another repository, just having a
3774 # [1] (i.e. reusing the store from another repository, just having a
3775 # working copy)
3775 # working copy)
3776 if b'sharedrepo' in createopts:
3776 if b'sharedrepo' in createopts:
3777 source_requirements = set(createopts[b'sharedrepo'].requirements)
3777 source_requirements = set(createopts[b'sharedrepo'].requirements)
3778
3778
3779 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3779 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3780 # share to an old school repository, we have to copy the
3780 # share to an old school repository, we have to copy the
3781 # requirements and hope for the best.
3781 # requirements and hope for the best.
3782 requirements = source_requirements
3782 requirements = source_requirements
3783 else:
3783 else:
3784 # We have control on the working copy only, so "copy" the non
3784 # We have control on the working copy only, so "copy" the non
3785 # working copy part over, ignoring previous logic.
3785 # working copy part over, ignoring previous logic.
3786 to_drop = set()
3786 to_drop = set()
3787 for req in requirements:
3787 for req in requirements:
3788 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3788 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3789 continue
3789 continue
3790 if req in source_requirements:
3790 if req in source_requirements:
3791 continue
3791 continue
3792 to_drop.add(req)
3792 to_drop.add(req)
3793 requirements -= to_drop
3793 requirements -= to_drop
3794 requirements |= source_requirements
3794 requirements |= source_requirements
3795
3795
3796 if createopts.get(b'sharedrelative'):
3796 if createopts.get(b'sharedrelative'):
3797 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3797 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3798 else:
3798 else:
3799 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3799 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3800
3800
3801 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3801 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3802 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3802 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3803 msg = _(b"ignoring unknown tracked key version: %d\n")
3803 msg = _(b"ignoring unknown tracked key version: %d\n")
3804 hint = _(
3804 hint = _(
3805 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3805 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3806 )
3806 )
3807 if version != 1:
3807 if version != 1:
3808 ui.warn(msg % version, hint=hint)
3808 ui.warn(msg % version, hint=hint)
3809 else:
3809 else:
3810 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3810 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3811
3811
3812 return requirements
3812 return requirements
3813
3813
3814
3814
3815 def checkrequirementscompat(ui, requirements):
3815 def checkrequirementscompat(ui, requirements):
3816 """Checks compatibility of repository requirements enabled and disabled.
3816 """Checks compatibility of repository requirements enabled and disabled.
3817
3817
3818 Returns a set of requirements which needs to be dropped because dependend
3818 Returns a set of requirements which needs to be dropped because dependend
3819 requirements are not enabled. Also warns users about it"""
3819 requirements are not enabled. Also warns users about it"""
3820
3820
3821 dropped = set()
3821 dropped = set()
3822
3822
3823 if requirementsmod.STORE_REQUIREMENT not in requirements:
3823 if requirementsmod.STORE_REQUIREMENT not in requirements:
3824 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3824 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3825 ui.warn(
3825 ui.warn(
3826 _(
3826 _(
3827 b'ignoring enabled \'format.bookmarks-in-store\' config '
3827 b'ignoring enabled \'format.bookmarks-in-store\' config '
3828 b'beacuse it is incompatible with disabled '
3828 b'beacuse it is incompatible with disabled '
3829 b'\'format.usestore\' config\n'
3829 b'\'format.usestore\' config\n'
3830 )
3830 )
3831 )
3831 )
3832 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3832 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3833
3833
3834 if (
3834 if (
3835 requirementsmod.SHARED_REQUIREMENT in requirements
3835 requirementsmod.SHARED_REQUIREMENT in requirements
3836 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3836 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3837 ):
3837 ):
3838 raise error.Abort(
3838 raise error.Abort(
3839 _(
3839 _(
3840 b"cannot create shared repository as source was created"
3840 b"cannot create shared repository as source was created"
3841 b" with 'format.usestore' config disabled"
3841 b" with 'format.usestore' config disabled"
3842 )
3842 )
3843 )
3843 )
3844
3844
3845 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3845 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3846 if ui.hasconfig(b'format', b'use-share-safe'):
3846 if ui.hasconfig(b'format', b'use-share-safe'):
3847 msg = _(
3847 msg = _(
3848 b"ignoring enabled 'format.use-share-safe' config because "
3848 b"ignoring enabled 'format.use-share-safe' config because "
3849 b"it is incompatible with disabled 'format.usestore'"
3849 b"it is incompatible with disabled 'format.usestore'"
3850 b" config\n"
3850 b" config\n"
3851 )
3851 )
3852 ui.warn(msg)
3852 ui.warn(msg)
3853 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3853 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3854
3854
3855 return dropped
3855 return dropped
3856
3856
3857
3857
3858 def filterknowncreateopts(ui, createopts):
3858 def filterknowncreateopts(ui, createopts):
3859 """Filters a dict of repo creation options against options that are known.
3859 """Filters a dict of repo creation options against options that are known.
3860
3860
3861 Receives a dict of repo creation options and returns a dict of those
3861 Receives a dict of repo creation options and returns a dict of those
3862 options that we don't know how to handle.
3862 options that we don't know how to handle.
3863
3863
3864 This function is called as part of repository creation. If the
3864 This function is called as part of repository creation. If the
3865 returned dict contains any items, repository creation will not
3865 returned dict contains any items, repository creation will not
3866 be allowed, as it means there was a request to create a repository
3866 be allowed, as it means there was a request to create a repository
3867 with options not recognized by loaded code.
3867 with options not recognized by loaded code.
3868
3868
3869 Extensions can wrap this function to filter out creation options
3869 Extensions can wrap this function to filter out creation options
3870 they know how to handle.
3870 they know how to handle.
3871 """
3871 """
3872 known = {
3872 known = {
3873 b'backend',
3873 b'backend',
3874 b'lfs',
3874 b'lfs',
3875 b'narrowfiles',
3875 b'narrowfiles',
3876 b'sharedrepo',
3876 b'sharedrepo',
3877 b'sharedrelative',
3877 b'sharedrelative',
3878 b'shareditems',
3878 b'shareditems',
3879 b'shallowfilestore',
3879 b'shallowfilestore',
3880 }
3880 }
3881
3881
3882 return {k: v for k, v in createopts.items() if k not in known}
3882 return {k: v for k, v in createopts.items() if k not in known}
3883
3883
3884
3884
3885 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3885 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3886 """Create a new repository in a vfs.
3886 """Create a new repository in a vfs.
3887
3887
3888 ``path`` path to the new repo's working directory.
3888 ``path`` path to the new repo's working directory.
3889 ``createopts`` options for the new repository.
3889 ``createopts`` options for the new repository.
3890 ``requirement`` predefined set of requirements.
3890 ``requirement`` predefined set of requirements.
3891 (incompatible with ``createopts``)
3891 (incompatible with ``createopts``)
3892
3892
3893 The following keys for ``createopts`` are recognized:
3893 The following keys for ``createopts`` are recognized:
3894
3894
3895 backend
3895 backend
3896 The storage backend to use.
3896 The storage backend to use.
3897 lfs
3897 lfs
3898 Repository will be created with ``lfs`` requirement. The lfs extension
3898 Repository will be created with ``lfs`` requirement. The lfs extension
3899 will automatically be loaded when the repository is accessed.
3899 will automatically be loaded when the repository is accessed.
3900 narrowfiles
3900 narrowfiles
3901 Set up repository to support narrow file storage.
3901 Set up repository to support narrow file storage.
3902 sharedrepo
3902 sharedrepo
3903 Repository object from which storage should be shared.
3903 Repository object from which storage should be shared.
3904 sharedrelative
3904 sharedrelative
3905 Boolean indicating if the path to the shared repo should be
3905 Boolean indicating if the path to the shared repo should be
3906 stored as relative. By default, the pointer to the "parent" repo
3906 stored as relative. By default, the pointer to the "parent" repo
3907 is stored as an absolute path.
3907 is stored as an absolute path.
3908 shareditems
3908 shareditems
3909 Set of items to share to the new repository (in addition to storage).
3909 Set of items to share to the new repository (in addition to storage).
3910 shallowfilestore
3910 shallowfilestore
3911 Indicates that storage for files should be shallow (not all ancestor
3911 Indicates that storage for files should be shallow (not all ancestor
3912 revisions are known).
3912 revisions are known).
3913 """
3913 """
3914
3914
3915 if requirements is not None:
3915 if requirements is not None:
3916 if createopts is not None:
3916 if createopts is not None:
3917 msg = b'cannot specify both createopts and requirements'
3917 msg = b'cannot specify both createopts and requirements'
3918 raise error.ProgrammingError(msg)
3918 raise error.ProgrammingError(msg)
3919 createopts = {}
3919 createopts = {}
3920 else:
3920 else:
3921 createopts = defaultcreateopts(ui, createopts=createopts)
3921 createopts = defaultcreateopts(ui, createopts=createopts)
3922
3922
3923 unknownopts = filterknowncreateopts(ui, createopts)
3923 unknownopts = filterknowncreateopts(ui, createopts)
3924
3924
3925 if not isinstance(unknownopts, dict):
3925 if not isinstance(unknownopts, dict):
3926 raise error.ProgrammingError(
3926 raise error.ProgrammingError(
3927 b'filterknowncreateopts() did not return a dict'
3927 b'filterknowncreateopts() did not return a dict'
3928 )
3928 )
3929
3929
3930 if unknownopts:
3930 if unknownopts:
3931 raise error.Abort(
3931 raise error.Abort(
3932 _(
3932 _(
3933 b'unable to create repository because of unknown '
3933 b'unable to create repository because of unknown '
3934 b'creation option: %s'
3934 b'creation option: %s'
3935 )
3935 )
3936 % b', '.join(sorted(unknownopts)),
3936 % b', '.join(sorted(unknownopts)),
3937 hint=_(b'is a required extension not loaded?'),
3937 hint=_(b'is a required extension not loaded?'),
3938 )
3938 )
3939
3939
3940 requirements = newreporequirements(ui, createopts=createopts)
3940 requirements = newreporequirements(ui, createopts=createopts)
3941 requirements -= checkrequirementscompat(ui, requirements)
3941 requirements -= checkrequirementscompat(ui, requirements)
3942
3942
3943 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3943 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3944
3944
3945 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3945 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3946 if hgvfs.exists():
3946 if hgvfs.exists():
3947 raise error.RepoError(_(b'repository %s already exists') % path)
3947 raise error.RepoError(_(b'repository %s already exists') % path)
3948
3948
3949 if b'sharedrepo' in createopts:
3949 if b'sharedrepo' in createopts:
3950 sharedpath = createopts[b'sharedrepo'].sharedpath
3950 sharedpath = createopts[b'sharedrepo'].sharedpath
3951
3951
3952 if createopts.get(b'sharedrelative'):
3952 if createopts.get(b'sharedrelative'):
3953 try:
3953 try:
3954 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3954 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3955 sharedpath = util.pconvert(sharedpath)
3955 sharedpath = util.pconvert(sharedpath)
3956 except (IOError, ValueError) as e:
3956 except (IOError, ValueError) as e:
3957 # ValueError is raised on Windows if the drive letters differ
3957 # ValueError is raised on Windows if the drive letters differ
3958 # on each path.
3958 # on each path.
3959 raise error.Abort(
3959 raise error.Abort(
3960 _(b'cannot calculate relative path'),
3960 _(b'cannot calculate relative path'),
3961 hint=stringutil.forcebytestr(e),
3961 hint=stringutil.forcebytestr(e),
3962 )
3962 )
3963
3963
3964 if not wdirvfs.exists():
3964 if not wdirvfs.exists():
3965 wdirvfs.makedirs()
3965 wdirvfs.makedirs()
3966
3966
3967 hgvfs.makedir(notindexed=True)
3967 hgvfs.makedir(notindexed=True)
3968 if b'sharedrepo' not in createopts:
3968 if b'sharedrepo' not in createopts:
3969 hgvfs.mkdir(b'cache')
3969 hgvfs.mkdir(b'cache')
3970 hgvfs.mkdir(b'wcache')
3970 hgvfs.mkdir(b'wcache')
3971
3971
3972 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3972 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3973 if has_store and b'sharedrepo' not in createopts:
3973 if has_store and b'sharedrepo' not in createopts:
3974 hgvfs.mkdir(b'store')
3974 hgvfs.mkdir(b'store')
3975
3975
3976 # We create an invalid changelog outside the store so very old
3976 # We create an invalid changelog outside the store so very old
3977 # Mercurial versions (which didn't know about the requirements
3977 # Mercurial versions (which didn't know about the requirements
3978 # file) encounter an error on reading the changelog. This
3978 # file) encounter an error on reading the changelog. This
3979 # effectively locks out old clients and prevents them from
3979 # effectively locks out old clients and prevents them from
3980 # mucking with a repo in an unknown format.
3980 # mucking with a repo in an unknown format.
3981 #
3981 #
3982 # The revlog header has version 65535, which won't be recognized by
3982 # The revlog header has version 65535, which won't be recognized by
3983 # such old clients.
3983 # such old clients.
3984 hgvfs.append(
3984 hgvfs.append(
3985 b'00changelog.i',
3985 b'00changelog.i',
3986 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3986 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3987 b'layout',
3987 b'layout',
3988 )
3988 )
3989
3989
3990 # Filter the requirements into working copy and store ones
3990 # Filter the requirements into working copy and store ones
3991 wcreq, storereq = scmutil.filterrequirements(requirements)
3991 wcreq, storereq = scmutil.filterrequirements(requirements)
3992 # write working copy ones
3992 # write working copy ones
3993 scmutil.writerequires(hgvfs, wcreq)
3993 scmutil.writerequires(hgvfs, wcreq)
3994 # If there are store requirements and the current repository
3994 # If there are store requirements and the current repository
3995 # is not a shared one, write stored requirements
3995 # is not a shared one, write stored requirements
3996 # For new shared repository, we don't need to write the store
3996 # For new shared repository, we don't need to write the store
3997 # requirements as they are already present in store requires
3997 # requirements as they are already present in store requires
3998 if storereq and b'sharedrepo' not in createopts:
3998 if storereq and b'sharedrepo' not in createopts:
3999 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3999 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
4000 scmutil.writerequires(storevfs, storereq)
4000 scmutil.writerequires(storevfs, storereq)
4001
4001
4002 # Write out file telling readers where to find the shared store.
4002 # Write out file telling readers where to find the shared store.
4003 if b'sharedrepo' in createopts:
4003 if b'sharedrepo' in createopts:
4004 hgvfs.write(b'sharedpath', sharedpath)
4004 hgvfs.write(b'sharedpath', sharedpath)
4005
4005
4006 if createopts.get(b'shareditems'):
4006 if createopts.get(b'shareditems'):
4007 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
4007 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
4008 hgvfs.write(b'shared', shared)
4008 hgvfs.write(b'shared', shared)
4009
4009
4010
4010
4011 def poisonrepository(repo):
4011 def poisonrepository(repo):
4012 """Poison a repository instance so it can no longer be used."""
4012 """Poison a repository instance so it can no longer be used."""
4013 # Perform any cleanup on the instance.
4013 # Perform any cleanup on the instance.
4014 repo.close()
4014 repo.close()
4015
4015
4016 # Our strategy is to replace the type of the object with one that
4016 # Our strategy is to replace the type of the object with one that
4017 # has all attribute lookups result in error.
4017 # has all attribute lookups result in error.
4018 #
4018 #
4019 # But we have to allow the close() method because some constructors
4019 # But we have to allow the close() method because some constructors
4020 # of repos call close() on repo references.
4020 # of repos call close() on repo references.
4021 class poisonedrepository:
4021 class poisonedrepository:
4022 def __getattribute__(self, item):
4022 def __getattribute__(self, item):
4023 if item == 'close':
4023 if item == 'close':
4024 return object.__getattribute__(self, item)
4024 return object.__getattribute__(self, item)
4025
4025
4026 raise error.ProgrammingError(
4026 raise error.ProgrammingError(
4027 b'repo instances should not be used after unshare'
4027 b'repo instances should not be used after unshare'
4028 )
4028 )
4029
4029
4030 def close(self):
4030 def close(self):
4031 pass
4031 pass
4032
4032
4033 # We may have a repoview, which intercepts __setattr__. So be sure
4033 # We may have a repoview, which intercepts __setattr__. So be sure
4034 # we operate at the lowest level possible.
4034 # we operate at the lowest level possible.
4035 object.__setattr__(repo, '__class__', poisonedrepository)
4035 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now