##// END OF EJS Templates
localrepo: subclass the new `repository.ipeercommandexecutor` Protocol class...
Matt Harbison -
r53393:e123c8a2 default
parent child Browse files
Show More
@@ -1,2360 +1,2363
1 # repository.py - Interfaces and base classes for repositories and peers.
1 # repository.py - Interfaces and base classes for repositories and peers.
2 # coding: utf-8
2 # coding: utf-8
3 #
3 #
4 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import annotations
9 from __future__ import annotations
10
10
11 import abc
11 import abc
12 import typing
12 import typing
13
13
14 from typing import (
14 from typing import (
15 Any,
15 Any,
16 Callable,
16 Callable,
17 Collection,
17 Collection,
18 Iterable,
18 Iterable,
19 Iterator,
19 Iterator,
20 Mapping,
20 Mapping,
21 Protocol,
21 Protocol,
22 Set,
22 Set,
23 )
23 )
24
24
25 from ..i18n import _
25 from ..i18n import _
26 from .. import error
26 from .. import error
27
27
28 if typing.TYPE_CHECKING:
28 if typing.TYPE_CHECKING:
29 from typing import (
29 from typing import (
30 ByteString, # TODO: change to Buffer for 3.14
30 ByteString, # TODO: change to Buffer for 3.14
31 )
31 )
32
32
33 # Almost all mercurial modules are only imported in the type checking phase
33 # Almost all mercurial modules are only imported in the type checking phase
34 # to avoid circular imports
34 # to avoid circular imports
35 from .. import (
35 from .. import (
36 match as matchmod,
36 match as matchmod,
37 pathutil,
37 pathutil,
38 util,
38 util,
39 )
39 )
40 from ..utils import (
40 from ..utils import (
41 urlutil,
41 urlutil,
42 )
42 )
43
43
44 from . import dirstate as intdirstate
44 from . import dirstate as intdirstate
45
45
46 # TODO: make a protocol class for this
46 # TODO: make a protocol class for this
47 NodeConstants = Any
47 NodeConstants = Any
48
48
49 # TODO: create a Protocol class, since importing uimod here causes a cycle
49 # TODO: create a Protocol class, since importing uimod here causes a cycle
50 # that confuses pytype.
50 # that confuses pytype.
51 Ui = Any
51 Ui = Any
52
52
53 # TODO: make a protocol class for this
53 # TODO: make a protocol class for this
54 Vfs = Any
54 Vfs = Any
55
55
56 # Local repository feature string.
56 # Local repository feature string.
57
57
58 # Revlogs are being used for file storage.
58 # Revlogs are being used for file storage.
59 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
59 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
60 # The storage part of the repository is shared from an external source.
60 # The storage part of the repository is shared from an external source.
61 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
61 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
62 # LFS supported for backing file storage.
62 # LFS supported for backing file storage.
63 REPO_FEATURE_LFS = b'lfs'
63 REPO_FEATURE_LFS = b'lfs'
64 # Repository supports being stream cloned.
64 # Repository supports being stream cloned.
65 REPO_FEATURE_STREAM_CLONE = b'streamclone'
65 REPO_FEATURE_STREAM_CLONE = b'streamclone'
66 # Repository supports (at least) some sidedata to be stored
66 # Repository supports (at least) some sidedata to be stored
67 REPO_FEATURE_SIDE_DATA = b'side-data'
67 REPO_FEATURE_SIDE_DATA = b'side-data'
68 # Files storage may lack data for all ancestors.
68 # Files storage may lack data for all ancestors.
69 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
69 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
70
70
71 REVISION_FLAG_CENSORED = 1 << 15
71 REVISION_FLAG_CENSORED = 1 << 15
72 REVISION_FLAG_ELLIPSIS = 1 << 14
72 REVISION_FLAG_ELLIPSIS = 1 << 14
73 REVISION_FLAG_EXTSTORED = 1 << 13
73 REVISION_FLAG_EXTSTORED = 1 << 13
74 REVISION_FLAG_HASCOPIESINFO = 1 << 12
74 REVISION_FLAG_HASCOPIESINFO = 1 << 12
75
75
76 REVISION_FLAGS_KNOWN = (
76 REVISION_FLAGS_KNOWN = (
77 REVISION_FLAG_CENSORED
77 REVISION_FLAG_CENSORED
78 | REVISION_FLAG_ELLIPSIS
78 | REVISION_FLAG_ELLIPSIS
79 | REVISION_FLAG_EXTSTORED
79 | REVISION_FLAG_EXTSTORED
80 | REVISION_FLAG_HASCOPIESINFO
80 | REVISION_FLAG_HASCOPIESINFO
81 )
81 )
82
82
83 CG_DELTAMODE_STD = b'default'
83 CG_DELTAMODE_STD = b'default'
84 CG_DELTAMODE_PREV = b'previous'
84 CG_DELTAMODE_PREV = b'previous'
85 CG_DELTAMODE_FULL = b'fulltext'
85 CG_DELTAMODE_FULL = b'fulltext'
86 CG_DELTAMODE_P1 = b'p1'
86 CG_DELTAMODE_P1 = b'p1'
87
87
88
88
89 ## Cache related constants:
89 ## Cache related constants:
90 #
90 #
91 # Used to control which cache should be warmed in a repo.updatecaches(…) call.
91 # Used to control which cache should be warmed in a repo.updatecaches(…) call.
92
92
93 # Warm branchmaps of all known repoview's filter-level
93 # Warm branchmaps of all known repoview's filter-level
94 CACHE_BRANCHMAP_ALL = b"branchmap-all"
94 CACHE_BRANCHMAP_ALL = b"branchmap-all"
95 # Warm branchmaps of repoview's filter-level used by server
95 # Warm branchmaps of repoview's filter-level used by server
96 CACHE_BRANCHMAP_SERVED = b"branchmap-served"
96 CACHE_BRANCHMAP_SERVED = b"branchmap-served"
97 # Warm internal changelog cache (eg: persistent nodemap)
97 # Warm internal changelog cache (eg: persistent nodemap)
98 CACHE_CHANGELOG_CACHE = b"changelog-cache"
98 CACHE_CHANGELOG_CACHE = b"changelog-cache"
99 # check of a branchmap can use the "pure topo" mode
99 # check of a branchmap can use the "pure topo" mode
100 CACHE_BRANCHMAP_DETECT_PURE_TOPO = b"branchmap-detect-pure-topo"
100 CACHE_BRANCHMAP_DETECT_PURE_TOPO = b"branchmap-detect-pure-topo"
101 # Warm full manifest cache
101 # Warm full manifest cache
102 CACHE_FULL_MANIFEST = b"full-manifest"
102 CACHE_FULL_MANIFEST = b"full-manifest"
103 # Warm file-node-tags cache
103 # Warm file-node-tags cache
104 CACHE_FILE_NODE_TAGS = b"file-node-tags"
104 CACHE_FILE_NODE_TAGS = b"file-node-tags"
105 # Warm internal manifestlog cache (eg: persistent nodemap)
105 # Warm internal manifestlog cache (eg: persistent nodemap)
106 CACHE_MANIFESTLOG_CACHE = b"manifestlog-cache"
106 CACHE_MANIFESTLOG_CACHE = b"manifestlog-cache"
107 # Warn rev branch cache
107 # Warn rev branch cache
108 CACHE_REV_BRANCH = b"rev-branch-cache"
108 CACHE_REV_BRANCH = b"rev-branch-cache"
109 # Warm tags' cache for default repoview'
109 # Warm tags' cache for default repoview'
110 CACHE_TAGS_DEFAULT = b"tags-default"
110 CACHE_TAGS_DEFAULT = b"tags-default"
111 # Warm tags' cache for repoview's filter-level used by server
111 # Warm tags' cache for repoview's filter-level used by server
112 CACHE_TAGS_SERVED = b"tags-served"
112 CACHE_TAGS_SERVED = b"tags-served"
113
113
114 # the cache to warm by default after a simple transaction
114 # the cache to warm by default after a simple transaction
115 # (this is a mutable set to let extension update it)
115 # (this is a mutable set to let extension update it)
116 CACHES_DEFAULT = {
116 CACHES_DEFAULT = {
117 CACHE_BRANCHMAP_SERVED,
117 CACHE_BRANCHMAP_SERVED,
118 }
118 }
119
119
120 # the caches to warm when warming all of them
120 # the caches to warm when warming all of them
121 # (this is a mutable set to let extension update it)
121 # (this is a mutable set to let extension update it)
122 CACHES_ALL = {
122 CACHES_ALL = {
123 CACHE_BRANCHMAP_SERVED,
123 CACHE_BRANCHMAP_SERVED,
124 CACHE_BRANCHMAP_ALL,
124 CACHE_BRANCHMAP_ALL,
125 CACHE_BRANCHMAP_DETECT_PURE_TOPO,
125 CACHE_BRANCHMAP_DETECT_PURE_TOPO,
126 CACHE_REV_BRANCH,
126 CACHE_REV_BRANCH,
127 CACHE_CHANGELOG_CACHE,
127 CACHE_CHANGELOG_CACHE,
128 CACHE_FILE_NODE_TAGS,
128 CACHE_FILE_NODE_TAGS,
129 CACHE_FULL_MANIFEST,
129 CACHE_FULL_MANIFEST,
130 CACHE_MANIFESTLOG_CACHE,
130 CACHE_MANIFESTLOG_CACHE,
131 CACHE_TAGS_DEFAULT,
131 CACHE_TAGS_DEFAULT,
132 CACHE_TAGS_SERVED,
132 CACHE_TAGS_SERVED,
133 }
133 }
134
134
135 # the cache to warm by default on simple call
135 # the cache to warm by default on simple call
136 # (this is a mutable set to let extension update it)
136 # (this is a mutable set to let extension update it)
137 CACHES_POST_CLONE = CACHES_ALL.copy()
137 CACHES_POST_CLONE = CACHES_ALL.copy()
138 CACHES_POST_CLONE.discard(CACHE_FILE_NODE_TAGS)
138 CACHES_POST_CLONE.discard(CACHE_FILE_NODE_TAGS)
139 CACHES_POST_CLONE.discard(CACHE_REV_BRANCH)
139 CACHES_POST_CLONE.discard(CACHE_REV_BRANCH)
140
140
141
141
142 class _ipeerconnection(Protocol):
142 class _ipeerconnection(Protocol):
143 """Represents a "connection" to a repository.
143 """Represents a "connection" to a repository.
144
144
145 This is the base interface for representing a connection to a repository.
145 This is the base interface for representing a connection to a repository.
146 It holds basic properties and methods applicable to all peer types.
146 It holds basic properties and methods applicable to all peer types.
147
147
148 This is not a complete interface definition and should not be used
148 This is not a complete interface definition and should not be used
149 outside of this module.
149 outside of this module.
150 """
150 """
151
151
152 ui: Ui
152 ui: Ui
153 """ui.ui instance"""
153 """ui.ui instance"""
154
154
155 path: urlutil.path | None
155 path: urlutil.path | None
156 """a urlutil.path instance or None"""
156 """a urlutil.path instance or None"""
157
157
158 def url(self):
158 def url(self):
159 """Returns a URL string representing this peer.
159 """Returns a URL string representing this peer.
160
160
161 Currently, implementations expose the raw URL used to construct the
161 Currently, implementations expose the raw URL used to construct the
162 instance. It may contain credentials as part of the URL. The
162 instance. It may contain credentials as part of the URL. The
163 expectations of the value aren't well-defined and this could lead to
163 expectations of the value aren't well-defined and this could lead to
164 data leakage.
164 data leakage.
165
165
166 TODO audit/clean consumers and more clearly define the contents of this
166 TODO audit/clean consumers and more clearly define the contents of this
167 value.
167 value.
168 """
168 """
169
169
170 def local(self):
170 def local(self):
171 """Returns a local repository instance.
171 """Returns a local repository instance.
172
172
173 If the peer represents a local repository, returns an object that
173 If the peer represents a local repository, returns an object that
174 can be used to interface with it. Otherwise returns ``None``.
174 can be used to interface with it. Otherwise returns ``None``.
175 """
175 """
176
176
177 def canpush(self):
177 def canpush(self):
178 """Returns a boolean indicating if this peer can be pushed to."""
178 """Returns a boolean indicating if this peer can be pushed to."""
179
179
180 def close(self):
180 def close(self):
181 """Close the connection to this peer.
181 """Close the connection to this peer.
182
182
183 This is called when the peer will no longer be used. Resources
183 This is called when the peer will no longer be used. Resources
184 associated with the peer should be cleaned up.
184 associated with the peer should be cleaned up.
185 """
185 """
186
186
187
187
188 class ipeercapabilities(Protocol):
188 class ipeercapabilities(Protocol):
189 """Peer sub-interface related to capabilities."""
189 """Peer sub-interface related to capabilities."""
190
190
191 def capable(self, name):
191 def capable(self, name):
192 """Determine support for a named capability.
192 """Determine support for a named capability.
193
193
194 Returns ``False`` if capability not supported.
194 Returns ``False`` if capability not supported.
195
195
196 Returns ``True`` if boolean capability is supported. Returns a string
196 Returns ``True`` if boolean capability is supported. Returns a string
197 if capability support is non-boolean.
197 if capability support is non-boolean.
198
198
199 Capability strings may or may not map to wire protocol capabilities.
199 Capability strings may or may not map to wire protocol capabilities.
200 """
200 """
201
201
202 def requirecap(self, name, purpose):
202 def requirecap(self, name, purpose):
203 """Require a capability to be present.
203 """Require a capability to be present.
204
204
205 Raises a ``CapabilityError`` if the capability isn't present.
205 Raises a ``CapabilityError`` if the capability isn't present.
206 """
206 """
207
207
208
208
209 class ipeercommands(Protocol):
209 class ipeercommands(Protocol):
210 """Client-side interface for communicating over the wire protocol.
210 """Client-side interface for communicating over the wire protocol.
211
211
212 This interface is used as a gateway to the Mercurial wire protocol.
212 This interface is used as a gateway to the Mercurial wire protocol.
213 methods commonly call wire protocol commands of the same name.
213 methods commonly call wire protocol commands of the same name.
214 """
214 """
215
215
216 def branchmap(self):
216 def branchmap(self):
217 """Obtain heads in named branches.
217 """Obtain heads in named branches.
218
218
219 Returns a dict mapping branch name to an iterable of nodes that are
219 Returns a dict mapping branch name to an iterable of nodes that are
220 heads on that branch.
220 heads on that branch.
221 """
221 """
222
222
223 def capabilities(self):
223 def capabilities(self):
224 """Obtain capabilities of the peer.
224 """Obtain capabilities of the peer.
225
225
226 Returns a set of string capabilities.
226 Returns a set of string capabilities.
227 """
227 """
228
228
229 def get_cached_bundle_inline(self, path):
229 def get_cached_bundle_inline(self, path):
230 """Retrieve a clonebundle across the wire.
230 """Retrieve a clonebundle across the wire.
231
231
232 Returns a chunkbuffer
232 Returns a chunkbuffer
233 """
233 """
234
234
235 def clonebundles(self):
235 def clonebundles(self):
236 """Obtains the clone bundles manifest for the repo.
236 """Obtains the clone bundles manifest for the repo.
237
237
238 Returns the manifest as unparsed bytes.
238 Returns the manifest as unparsed bytes.
239 """
239 """
240
240
241 def debugwireargs(self, one, two, three=None, four=None, five=None):
241 def debugwireargs(self, one, two, three=None, four=None, five=None):
242 """Used to facilitate debugging of arguments passed over the wire."""
242 """Used to facilitate debugging of arguments passed over the wire."""
243
243
244 def getbundle(self, source, **kwargs):
244 def getbundle(self, source, **kwargs):
245 """Obtain remote repository data as a bundle.
245 """Obtain remote repository data as a bundle.
246
246
247 This command is how the bulk of repository data is transferred from
247 This command is how the bulk of repository data is transferred from
248 the peer to the local repository
248 the peer to the local repository
249
249
250 Returns a generator of bundle data.
250 Returns a generator of bundle data.
251 """
251 """
252
252
253 def heads(self):
253 def heads(self):
254 """Determine all known head revisions in the peer.
254 """Determine all known head revisions in the peer.
255
255
256 Returns an iterable of binary nodes.
256 Returns an iterable of binary nodes.
257 """
257 """
258
258
259 def known(self, nodes):
259 def known(self, nodes):
260 """Determine whether multiple nodes are known.
260 """Determine whether multiple nodes are known.
261
261
262 Accepts an iterable of nodes whose presence to check for.
262 Accepts an iterable of nodes whose presence to check for.
263
263
264 Returns an iterable of booleans indicating of the corresponding node
264 Returns an iterable of booleans indicating of the corresponding node
265 at that index is known to the peer.
265 at that index is known to the peer.
266 """
266 """
267
267
268 def listkeys(self, namespace):
268 def listkeys(self, namespace):
269 """Obtain all keys in a pushkey namespace.
269 """Obtain all keys in a pushkey namespace.
270
270
271 Returns an iterable of key names.
271 Returns an iterable of key names.
272 """
272 """
273
273
274 def lookup(self, key):
274 def lookup(self, key):
275 """Resolve a value to a known revision.
275 """Resolve a value to a known revision.
276
276
277 Returns a binary node of the resolved revision on success.
277 Returns a binary node of the resolved revision on success.
278 """
278 """
279
279
280 def pushkey(self, namespace, key, old, new):
280 def pushkey(self, namespace, key, old, new):
281 """Set a value using the ``pushkey`` protocol.
281 """Set a value using the ``pushkey`` protocol.
282
282
283 Arguments correspond to the pushkey namespace and key to operate on and
283 Arguments correspond to the pushkey namespace and key to operate on and
284 the old and new values for that key.
284 the old and new values for that key.
285
285
286 Returns a string with the peer result. The value inside varies by the
286 Returns a string with the peer result. The value inside varies by the
287 namespace.
287 namespace.
288 """
288 """
289
289
290 def stream_out(self):
290 def stream_out(self):
291 """Obtain streaming clone data.
291 """Obtain streaming clone data.
292
292
293 Successful result should be a generator of data chunks.
293 Successful result should be a generator of data chunks.
294 """
294 """
295
295
296 def unbundle(self, bundle, heads, url):
296 def unbundle(self, bundle, heads, url):
297 """Transfer repository data to the peer.
297 """Transfer repository data to the peer.
298
298
299 This is how the bulk of data during a push is transferred.
299 This is how the bulk of data during a push is transferred.
300
300
301 Returns the integer number of heads added to the peer.
301 Returns the integer number of heads added to the peer.
302 """
302 """
303
303
304
304
305 class ipeerlegacycommands(Protocol):
305 class ipeerlegacycommands(Protocol):
306 """Interface for implementing support for legacy wire protocol commands.
306 """Interface for implementing support for legacy wire protocol commands.
307
307
308 Wire protocol commands transition to legacy status when they are no longer
308 Wire protocol commands transition to legacy status when they are no longer
309 used by modern clients. To facilitate identifying which commands are
309 used by modern clients. To facilitate identifying which commands are
310 legacy, the interfaces are split.
310 legacy, the interfaces are split.
311 """
311 """
312
312
313 def between(self, pairs):
313 def between(self, pairs):
314 """Obtain nodes between pairs of nodes.
314 """Obtain nodes between pairs of nodes.
315
315
316 ``pairs`` is an iterable of node pairs.
316 ``pairs`` is an iterable of node pairs.
317
317
318 Returns an iterable of iterables of nodes corresponding to each
318 Returns an iterable of iterables of nodes corresponding to each
319 requested pair.
319 requested pair.
320 """
320 """
321
321
322 def branches(self, nodes):
322 def branches(self, nodes):
323 """Obtain ancestor changesets of specific nodes back to a branch point.
323 """Obtain ancestor changesets of specific nodes back to a branch point.
324
324
325 For each requested node, the peer finds the first ancestor node that is
325 For each requested node, the peer finds the first ancestor node that is
326 a DAG root or is a merge.
326 a DAG root or is a merge.
327
327
328 Returns an iterable of iterables with the resolved values for each node.
328 Returns an iterable of iterables with the resolved values for each node.
329 """
329 """
330
330
331 def changegroup(self, nodes, source):
331 def changegroup(self, nodes, source):
332 """Obtain a changegroup with data for descendants of specified nodes."""
332 """Obtain a changegroup with data for descendants of specified nodes."""
333
333
334 def changegroupsubset(self, bases, heads, source):
334 def changegroupsubset(self, bases, heads, source):
335 pass
335 pass
336
336
337
337
338 class ipeercommandexecutor(Protocol):
338 class ipeercommandexecutor(Protocol):
339 """Represents a mechanism to execute remote commands.
339 """Represents a mechanism to execute remote commands.
340
340
341 This is the primary interface for requesting that wire protocol commands
341 This is the primary interface for requesting that wire protocol commands
342 be executed. Instances of this interface are active in a context manager
342 be executed. Instances of this interface are active in a context manager
343 and have a well-defined lifetime. When the context manager exits, all
343 and have a well-defined lifetime. When the context manager exits, all
344 outstanding requests are waited on.
344 outstanding requests are waited on.
345 """
345 """
346
346
347 @abc.abstractmethod
347 def callcommand(self, name, args):
348 def callcommand(self, name, args):
348 """Request that a named command be executed.
349 """Request that a named command be executed.
349
350
350 Receives the command name and a dictionary of command arguments.
351 Receives the command name and a dictionary of command arguments.
351
352
352 Returns a ``concurrent.futures.Future`` that will resolve to the
353 Returns a ``concurrent.futures.Future`` that will resolve to the
353 result of that command request. That exact value is left up to
354 result of that command request. That exact value is left up to
354 the implementation and possibly varies by command.
355 the implementation and possibly varies by command.
355
356
356 Not all commands can coexist with other commands in an executor
357 Not all commands can coexist with other commands in an executor
357 instance: it depends on the underlying wire protocol transport being
358 instance: it depends on the underlying wire protocol transport being
358 used and the command itself.
359 used and the command itself.
359
360
360 Implementations MAY call ``sendcommands()`` automatically if the
361 Implementations MAY call ``sendcommands()`` automatically if the
361 requested command can not coexist with other commands in this executor.
362 requested command can not coexist with other commands in this executor.
362
363
363 Implementations MAY call ``sendcommands()`` automatically when the
364 Implementations MAY call ``sendcommands()`` automatically when the
364 future's ``result()`` is called. So, consumers using multiple
365 future's ``result()`` is called. So, consumers using multiple
365 commands with an executor MUST ensure that ``result()`` is not called
366 commands with an executor MUST ensure that ``result()`` is not called
366 until all command requests have been issued.
367 until all command requests have been issued.
367 """
368 """
368
369
370 @abc.abstractmethod
369 def sendcommands(self):
371 def sendcommands(self):
370 """Trigger submission of queued command requests.
372 """Trigger submission of queued command requests.
371
373
372 Not all transports submit commands as soon as they are requested to
374 Not all transports submit commands as soon as they are requested to
373 run. When called, this method forces queued command requests to be
375 run. When called, this method forces queued command requests to be
374 issued. It will no-op if all commands have already been sent.
376 issued. It will no-op if all commands have already been sent.
375
377
376 When called, no more new commands may be issued with this executor.
378 When called, no more new commands may be issued with this executor.
377 """
379 """
378
380
381 @abc.abstractmethod
379 def close(self):
382 def close(self):
380 """Signal that this command request is finished.
383 """Signal that this command request is finished.
381
384
382 When called, no more new commands may be issued. All outstanding
385 When called, no more new commands may be issued. All outstanding
383 commands that have previously been issued are waited on before
386 commands that have previously been issued are waited on before
384 returning. This not only includes waiting for the futures to resolve,
387 returning. This not only includes waiting for the futures to resolve,
385 but also waiting for all response data to arrive. In other words,
388 but also waiting for all response data to arrive. In other words,
386 calling this waits for all on-wire state for issued command requests
389 calling this waits for all on-wire state for issued command requests
387 to finish.
390 to finish.
388
391
389 When used as a context manager, this method is called when exiting the
392 When used as a context manager, this method is called when exiting the
390 context manager.
393 context manager.
391
394
392 This method may call ``sendcommands()`` if there are buffered commands.
395 This method may call ``sendcommands()`` if there are buffered commands.
393 """
396 """
394
397
395
398
396 class ipeerrequests(Protocol):
399 class ipeerrequests(Protocol):
397 """Interface for executing commands on a peer."""
400 """Interface for executing commands on a peer."""
398
401
399 limitedarguments: bool
402 limitedarguments: bool
400 """True if the peer cannot receive large argument value for commands."""
403 """True if the peer cannot receive large argument value for commands."""
401
404
402 def commandexecutor(self):
405 def commandexecutor(self):
403 """A context manager that resolves to an ipeercommandexecutor.
406 """A context manager that resolves to an ipeercommandexecutor.
404
407
405 The object this resolves to can be used to issue command requests
408 The object this resolves to can be used to issue command requests
406 to the peer.
409 to the peer.
407
410
408 Callers should call its ``callcommand`` method to issue command
411 Callers should call its ``callcommand`` method to issue command
409 requests.
412 requests.
410
413
411 A new executor should be obtained for each distinct set of commands
414 A new executor should be obtained for each distinct set of commands
412 (possibly just a single command) that the consumer wants to execute
415 (possibly just a single command) that the consumer wants to execute
413 as part of a single operation or round trip. This is because some
416 as part of a single operation or round trip. This is because some
414 peers are half-duplex and/or don't support persistent connections.
417 peers are half-duplex and/or don't support persistent connections.
415 e.g. in the case of HTTP peers, commands sent to an executor represent
418 e.g. in the case of HTTP peers, commands sent to an executor represent
416 a single HTTP request. While some peers may support multiple command
419 a single HTTP request. While some peers may support multiple command
417 sends over the wire per executor, consumers need to code to the least
420 sends over the wire per executor, consumers need to code to the least
418 capable peer. So it should be assumed that command executors buffer
421 capable peer. So it should be assumed that command executors buffer
419 called commands until they are told to send them and that each
422 called commands until they are told to send them and that each
420 command executor could result in a new connection or wire-level request
423 command executor could result in a new connection or wire-level request
421 being issued.
424 being issued.
422 """
425 """
423
426
424
427
425 class peer(_ipeerconnection, ipeercapabilities, ipeerrequests, Protocol):
428 class peer(_ipeerconnection, ipeercapabilities, ipeerrequests, Protocol):
426 """Unified interface for peer repositories.
429 """Unified interface for peer repositories.
427
430
428 All peer instances must conform to this interface.
431 All peer instances must conform to this interface.
429 """
432 """
430
433
431 limitedarguments: bool = False
434 limitedarguments: bool = False
432
435
433 def __init__(self, ui, path=None, remotehidden=False):
436 def __init__(self, ui, path=None, remotehidden=False):
434 self.ui = ui
437 self.ui = ui
435 self.path = path
438 self.path = path
436
439
437 def capable(self, name):
440 def capable(self, name):
438 # TODO: this class should maybe subclass ipeercommands too, otherwise it
441 # TODO: this class should maybe subclass ipeercommands too, otherwise it
439 # is assuming whatever uses this as a mixin also has this interface.
442 # is assuming whatever uses this as a mixin also has this interface.
440 caps = self.capabilities() # pytype: disable=attribute-error
443 caps = self.capabilities() # pytype: disable=attribute-error
441 if name in caps:
444 if name in caps:
442 return True
445 return True
443
446
444 name = b'%s=' % name
447 name = b'%s=' % name
445 for cap in caps:
448 for cap in caps:
446 if cap.startswith(name):
449 if cap.startswith(name):
447 return cap[len(name) :]
450 return cap[len(name) :]
448
451
449 return False
452 return False
450
453
451 def requirecap(self, name, purpose):
454 def requirecap(self, name, purpose):
452 if self.capable(name):
455 if self.capable(name):
453 return
456 return
454
457
455 raise error.CapabilityError(
458 raise error.CapabilityError(
456 _(
459 _(
457 b'cannot %s; remote repository does not support the '
460 b'cannot %s; remote repository does not support the '
458 b'\'%s\' capability'
461 b'\'%s\' capability'
459 )
462 )
460 % (purpose, name)
463 % (purpose, name)
461 )
464 )
462
465
463
466
464 class iverifyproblem(Protocol):
467 class iverifyproblem(Protocol):
465 """Represents a problem with the integrity of the repository.
468 """Represents a problem with the integrity of the repository.
466
469
467 Instances of this interface are emitted to describe an integrity issue
470 Instances of this interface are emitted to describe an integrity issue
468 with a repository (e.g. corrupt storage, missing data, etc).
471 with a repository (e.g. corrupt storage, missing data, etc).
469
472
470 Instances are essentially messages associated with severity.
473 Instances are essentially messages associated with severity.
471 """
474 """
472
475
473 warning: bytes | None
476 warning: bytes | None
474 """Message indicating a non-fatal problem."""
477 """Message indicating a non-fatal problem."""
475
478
476 error: bytes | None
479 error: bytes | None
477 """Message indicating a fatal problem."""
480 """Message indicating a fatal problem."""
478
481
479 node: bytes | None
482 node: bytes | None
480 """Revision encountering the problem.
483 """Revision encountering the problem.
481
484
482 ``None`` means the problem doesn't apply to a single revision.
485 ``None`` means the problem doesn't apply to a single revision.
483 """
486 """
484
487
485
488
486 class irevisiondelta(Protocol):
489 class irevisiondelta(Protocol):
487 """Represents a delta between one revision and another.
490 """Represents a delta between one revision and another.
488
491
489 Instances convey enough information to allow a revision to be exchanged
492 Instances convey enough information to allow a revision to be exchanged
490 with another repository.
493 with another repository.
491
494
492 Instances represent the fulltext revision data or a delta against
495 Instances represent the fulltext revision data or a delta against
493 another revision. Therefore the ``revision`` and ``delta`` attributes
496 another revision. Therefore the ``revision`` and ``delta`` attributes
494 are mutually exclusive.
497 are mutually exclusive.
495
498
496 Typically used for changegroup generation.
499 Typically used for changegroup generation.
497 """
500 """
498
501
499 node: bytes
502 node: bytes
500 """20 byte node of this revision."""
503 """20 byte node of this revision."""
501
504
502 p1node: bytes
505 p1node: bytes
503 """20 byte node of 1st parent of this revision."""
506 """20 byte node of 1st parent of this revision."""
504
507
505 p2node: bytes
508 p2node: bytes
506 """20 byte node of 2nd parent of this revision."""
509 """20 byte node of 2nd parent of this revision."""
507
510
508 # TODO: is this really optional? revlog.revlogrevisiondelta defaults to None
511 # TODO: is this really optional? revlog.revlogrevisiondelta defaults to None
509 linknode: bytes | None
512 linknode: bytes | None
510 """20 byte node of the changelog revision this node is linked to."""
513 """20 byte node of the changelog revision this node is linked to."""
511
514
512 flags: int
515 flags: int
513 """2 bytes of integer flags that apply to this revision.
516 """2 bytes of integer flags that apply to this revision.
514
517
515 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
518 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
516 """
519 """
517
520
518 basenode: bytes
521 basenode: bytes
519 """20 byte node of the revision this data is a delta against.
522 """20 byte node of the revision this data is a delta against.
520
523
521 ``nullid`` indicates that the revision is a full revision and not
524 ``nullid`` indicates that the revision is a full revision and not
522 a delta.
525 a delta.
523 """
526 """
524
527
525 baserevisionsize: int | None
528 baserevisionsize: int | None
526 """Size of base revision this delta is against.
529 """Size of base revision this delta is against.
527
530
528 May be ``None`` if ``basenode`` is ``nullid``.
531 May be ``None`` if ``basenode`` is ``nullid``.
529 """
532 """
530
533
531 # TODO: is this really optional? (Seems possible in
534 # TODO: is this really optional? (Seems possible in
532 # storageutil.emitrevisions()).
535 # storageutil.emitrevisions()).
533 revision: bytes | None
536 revision: bytes | None
534 """Raw fulltext of revision data for this node."""
537 """Raw fulltext of revision data for this node."""
535
538
536 delta: bytes | None
539 delta: bytes | None
537 """Delta between ``basenode`` and ``node``.
540 """Delta between ``basenode`` and ``node``.
538
541
539 Stored in the bdiff delta format.
542 Stored in the bdiff delta format.
540 """
543 """
541
544
542 sidedata: bytes | None
545 sidedata: bytes | None
543 """Raw sidedata bytes for the given revision."""
546 """Raw sidedata bytes for the given revision."""
544
547
545 protocol_flags: int
548 protocol_flags: int
546 """Single byte of integer flags that can influence the protocol.
549 """Single byte of integer flags that can influence the protocol.
547
550
548 This is a bitwise composition of the ``storageutil.CG_FLAG*`` constants.
551 This is a bitwise composition of the ``storageutil.CG_FLAG*`` constants.
549 """
552 """
550
553
551
554
552 class ifilerevisionssequence(Protocol):
555 class ifilerevisionssequence(Protocol):
553 """Contains index data for all revisions of a file.
556 """Contains index data for all revisions of a file.
554
557
555 Types implementing this behave like lists of tuples. The index
558 Types implementing this behave like lists of tuples. The index
556 in the list corresponds to the revision number. The values contain
559 in the list corresponds to the revision number. The values contain
557 index metadata.
560 index metadata.
558
561
559 The *null* revision (revision number -1) is always the last item
562 The *null* revision (revision number -1) is always the last item
560 in the index.
563 in the index.
561 """
564 """
562
565
563 def __len__(self):
566 def __len__(self):
564 """The total number of revisions."""
567 """The total number of revisions."""
565
568
566 def __getitem__(self, rev):
569 def __getitem__(self, rev):
567 """Returns the object having a specific revision number.
570 """Returns the object having a specific revision number.
568
571
569 Returns an 8-tuple with the following fields:
572 Returns an 8-tuple with the following fields:
570
573
571 offset+flags
574 offset+flags
572 Contains the offset and flags for the revision. 64-bit unsigned
575 Contains the offset and flags for the revision. 64-bit unsigned
573 integer where first 6 bytes are the offset and the next 2 bytes
576 integer where first 6 bytes are the offset and the next 2 bytes
574 are flags. The offset can be 0 if it is not used by the store.
577 are flags. The offset can be 0 if it is not used by the store.
575 compressed size
578 compressed size
576 Size of the revision data in the store. It can be 0 if it isn't
579 Size of the revision data in the store. It can be 0 if it isn't
577 needed by the store.
580 needed by the store.
578 uncompressed size
581 uncompressed size
579 Fulltext size. It can be 0 if it isn't needed by the store.
582 Fulltext size. It can be 0 if it isn't needed by the store.
580 base revision
583 base revision
581 Revision number of revision the delta for storage is encoded
584 Revision number of revision the delta for storage is encoded
582 against. -1 indicates not encoded against a base revision.
585 against. -1 indicates not encoded against a base revision.
583 link revision
586 link revision
584 Revision number of changelog revision this entry is related to.
587 Revision number of changelog revision this entry is related to.
585 p1 revision
588 p1 revision
586 Revision number of 1st parent. -1 if no 1st parent.
589 Revision number of 1st parent. -1 if no 1st parent.
587 p2 revision
590 p2 revision
588 Revision number of 2nd parent. -1 if no 1st parent.
591 Revision number of 2nd parent. -1 if no 1st parent.
589 node
592 node
590 Binary node value for this revision number.
593 Binary node value for this revision number.
591
594
592 Negative values should index off the end of the sequence. ``-1``
595 Negative values should index off the end of the sequence. ``-1``
593 should return the null revision. ``-2`` should return the most
596 should return the null revision. ``-2`` should return the most
594 recent revision.
597 recent revision.
595 """
598 """
596
599
597 def __contains__(self, rev):
600 def __contains__(self, rev):
598 """Whether a revision number exists."""
601 """Whether a revision number exists."""
599
602
600 def insert(self, i, entry):
603 def insert(self, i, entry):
601 """Add an item to the index at specific revision."""
604 """Add an item to the index at specific revision."""
602
605
603
606
604 class ifileindex(Protocol):
607 class ifileindex(Protocol):
605 """Storage interface for index data of a single file.
608 """Storage interface for index data of a single file.
606
609
607 File storage data is divided into index metadata and data storage.
610 File storage data is divided into index metadata and data storage.
608 This interface defines the index portion of the interface.
611 This interface defines the index portion of the interface.
609
612
610 The index logically consists of:
613 The index logically consists of:
611
614
612 * A mapping between revision numbers and nodes.
615 * A mapping between revision numbers and nodes.
613 * DAG data (storing and querying the relationship between nodes).
616 * DAG data (storing and querying the relationship between nodes).
614 * Metadata to facilitate storage.
617 * Metadata to facilitate storage.
615 """
618 """
616
619
617 nullid: bytes
620 nullid: bytes
618 """node for the null revision for use as delta base."""
621 """node for the null revision for use as delta base."""
619
622
620 @abc.abstractmethod
623 @abc.abstractmethod
621 def __len__(self) -> int:
624 def __len__(self) -> int:
622 """Obtain the number of revisions stored for this file."""
625 """Obtain the number of revisions stored for this file."""
623
626
624 @abc.abstractmethod
627 @abc.abstractmethod
625 def __iter__(self) -> Iterator[int]:
628 def __iter__(self) -> Iterator[int]:
626 """Iterate over revision numbers for this file."""
629 """Iterate over revision numbers for this file."""
627
630
628 @abc.abstractmethod
631 @abc.abstractmethod
629 def hasnode(self, node):
632 def hasnode(self, node):
630 """Returns a bool indicating if a node is known to this store.
633 """Returns a bool indicating if a node is known to this store.
631
634
632 Implementations must only return True for full, binary node values:
635 Implementations must only return True for full, binary node values:
633 hex nodes, revision numbers, and partial node matches must be
636 hex nodes, revision numbers, and partial node matches must be
634 rejected.
637 rejected.
635
638
636 The null node is never present.
639 The null node is never present.
637 """
640 """
638
641
639 @abc.abstractmethod
642 @abc.abstractmethod
640 def revs(self, start=0, stop=None):
643 def revs(self, start=0, stop=None):
641 """Iterate over revision numbers for this file, with control."""
644 """Iterate over revision numbers for this file, with control."""
642
645
643 @abc.abstractmethod
646 @abc.abstractmethod
644 def parents(self, node):
647 def parents(self, node):
645 """Returns a 2-tuple of parent nodes for a revision.
648 """Returns a 2-tuple of parent nodes for a revision.
646
649
647 Values will be ``nullid`` if the parent is empty.
650 Values will be ``nullid`` if the parent is empty.
648 """
651 """
649
652
650 @abc.abstractmethod
653 @abc.abstractmethod
651 def parentrevs(self, rev):
654 def parentrevs(self, rev):
652 """Like parents() but operates on revision numbers."""
655 """Like parents() but operates on revision numbers."""
653
656
654 @abc.abstractmethod
657 @abc.abstractmethod
655 def rev(self, node):
658 def rev(self, node):
656 """Obtain the revision number given a node.
659 """Obtain the revision number given a node.
657
660
658 Raises ``error.LookupError`` if the node is not known.
661 Raises ``error.LookupError`` if the node is not known.
659 """
662 """
660
663
661 @abc.abstractmethod
664 @abc.abstractmethod
662 def node(self, rev):
665 def node(self, rev):
663 """Obtain the node value given a revision number.
666 """Obtain the node value given a revision number.
664
667
665 Raises ``IndexError`` if the node is not known.
668 Raises ``IndexError`` if the node is not known.
666 """
669 """
667
670
668 @abc.abstractmethod
671 @abc.abstractmethod
669 def lookup(self, node):
672 def lookup(self, node):
670 """Attempt to resolve a value to a node.
673 """Attempt to resolve a value to a node.
671
674
672 Value can be a binary node, hex node, revision number, or a string
675 Value can be a binary node, hex node, revision number, or a string
673 that can be converted to an integer.
676 that can be converted to an integer.
674
677
675 Raises ``error.LookupError`` if a node could not be resolved.
678 Raises ``error.LookupError`` if a node could not be resolved.
676 """
679 """
677
680
678 @abc.abstractmethod
681 @abc.abstractmethod
679 def linkrev(self, rev):
682 def linkrev(self, rev):
680 """Obtain the changeset revision number a revision is linked to."""
683 """Obtain the changeset revision number a revision is linked to."""
681
684
682 @abc.abstractmethod
685 @abc.abstractmethod
683 def iscensored(self, rev):
686 def iscensored(self, rev):
684 """Return whether a revision's content has been censored."""
687 """Return whether a revision's content has been censored."""
685
688
686 @abc.abstractmethod
689 @abc.abstractmethod
687 def commonancestorsheads(self, node1, node2):
690 def commonancestorsheads(self, node1, node2):
688 """Obtain an iterable of nodes containing heads of common ancestors.
691 """Obtain an iterable of nodes containing heads of common ancestors.
689
692
690 See ``ancestor.commonancestorsheads()``.
693 See ``ancestor.commonancestorsheads()``.
691 """
694 """
692
695
693 @abc.abstractmethod
696 @abc.abstractmethod
694 def descendants(self, revs):
697 def descendants(self, revs):
695 """Obtain descendant revision numbers for a set of revision numbers.
698 """Obtain descendant revision numbers for a set of revision numbers.
696
699
697 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
700 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
698 """
701 """
699
702
700 @abc.abstractmethod
703 @abc.abstractmethod
701 def heads(self, start=None, stop=None):
704 def heads(self, start=None, stop=None):
702 """Obtain a list of nodes that are DAG heads, with control.
705 """Obtain a list of nodes that are DAG heads, with control.
703
706
704 The set of revisions examined can be limited by specifying
707 The set of revisions examined can be limited by specifying
705 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
708 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
706 iterable of nodes. DAG traversal starts at earlier revision
709 iterable of nodes. DAG traversal starts at earlier revision
707 ``start`` and iterates forward until any node in ``stop`` is
710 ``start`` and iterates forward until any node in ``stop`` is
708 encountered.
711 encountered.
709 """
712 """
710
713
711 @abc.abstractmethod
714 @abc.abstractmethod
712 def children(self, node):
715 def children(self, node):
713 """Obtain nodes that are children of a node.
716 """Obtain nodes that are children of a node.
714
717
715 Returns a list of nodes.
718 Returns a list of nodes.
716 """
719 """
717
720
718
721
719 class ifiledata(Protocol):
722 class ifiledata(Protocol):
720 """Storage interface for data storage of a specific file.
723 """Storage interface for data storage of a specific file.
721
724
722 This complements ``ifileindex`` and provides an interface for accessing
725 This complements ``ifileindex`` and provides an interface for accessing
723 data for a tracked file.
726 data for a tracked file.
724 """
727 """
725
728
726 @abc.abstractmethod
729 @abc.abstractmethod
727 def size(self, rev):
730 def size(self, rev):
728 """Obtain the fulltext size of file data.
731 """Obtain the fulltext size of file data.
729
732
730 Any metadata is excluded from size measurements.
733 Any metadata is excluded from size measurements.
731 """
734 """
732
735
733 @abc.abstractmethod
736 @abc.abstractmethod
734 def revision(self, node):
737 def revision(self, node):
735 """Obtain fulltext data for a node.
738 """Obtain fulltext data for a node.
736
739
737 By default, any storage transformations are applied before the data
740 By default, any storage transformations are applied before the data
738 is returned. If ``raw`` is True, non-raw storage transformations
741 is returned. If ``raw`` is True, non-raw storage transformations
739 are not applied.
742 are not applied.
740
743
741 The fulltext data may contain a header containing metadata. Most
744 The fulltext data may contain a header containing metadata. Most
742 consumers should use ``read()`` to obtain the actual file data.
745 consumers should use ``read()`` to obtain the actual file data.
743 """
746 """
744
747
745 @abc.abstractmethod
748 @abc.abstractmethod
746 def rawdata(self, node):
749 def rawdata(self, node):
747 """Obtain raw data for a node."""
750 """Obtain raw data for a node."""
748
751
749 @abc.abstractmethod
752 @abc.abstractmethod
750 def read(self, node):
753 def read(self, node):
751 """Resolve file fulltext data.
754 """Resolve file fulltext data.
752
755
753 This is similar to ``revision()`` except any metadata in the data
756 This is similar to ``revision()`` except any metadata in the data
754 headers is stripped.
757 headers is stripped.
755 """
758 """
756
759
757 @abc.abstractmethod
760 @abc.abstractmethod
758 def renamed(self, node):
761 def renamed(self, node):
759 """Obtain copy metadata for a node.
762 """Obtain copy metadata for a node.
760
763
761 Returns ``False`` if no copy metadata is stored or a 2-tuple of
764 Returns ``False`` if no copy metadata is stored or a 2-tuple of
762 (path, node) from which this revision was copied.
765 (path, node) from which this revision was copied.
763 """
766 """
764
767
765 @abc.abstractmethod
768 @abc.abstractmethod
766 def cmp(self, node, fulltext):
769 def cmp(self, node, fulltext):
767 """Compare fulltext to another revision.
770 """Compare fulltext to another revision.
768
771
769 Returns True if the fulltext is different from what is stored.
772 Returns True if the fulltext is different from what is stored.
770
773
771 This takes copy metadata into account.
774 This takes copy metadata into account.
772
775
773 TODO better document the copy metadata and censoring logic.
776 TODO better document the copy metadata and censoring logic.
774 """
777 """
775
778
776 @abc.abstractmethod
779 @abc.abstractmethod
777 def emitrevisions(
780 def emitrevisions(
778 self,
781 self,
779 nodes,
782 nodes,
780 nodesorder=None,
783 nodesorder=None,
781 revisiondata=False,
784 revisiondata=False,
782 assumehaveparentrevisions=False,
785 assumehaveparentrevisions=False,
783 deltamode=CG_DELTAMODE_STD,
786 deltamode=CG_DELTAMODE_STD,
784 ):
787 ):
785 """Produce ``irevisiondelta`` for revisions.
788 """Produce ``irevisiondelta`` for revisions.
786
789
787 Given an iterable of nodes, emits objects conforming to the
790 Given an iterable of nodes, emits objects conforming to the
788 ``irevisiondelta`` interface that describe revisions in storage.
791 ``irevisiondelta`` interface that describe revisions in storage.
789
792
790 This method is a generator.
793 This method is a generator.
791
794
792 The input nodes may be unordered. Implementations must ensure that a
795 The input nodes may be unordered. Implementations must ensure that a
793 node's parents are emitted before the node itself. Transitively, this
796 node's parents are emitted before the node itself. Transitively, this
794 means that a node may only be emitted once all its ancestors in
797 means that a node may only be emitted once all its ancestors in
795 ``nodes`` have also been emitted.
798 ``nodes`` have also been emitted.
796
799
797 By default, emits "index" data (the ``node``, ``p1node``, and
800 By default, emits "index" data (the ``node``, ``p1node``, and
798 ``p2node`` attributes). If ``revisiondata`` is set, revision data
801 ``p2node`` attributes). If ``revisiondata`` is set, revision data
799 will also be present on the emitted objects.
802 will also be present on the emitted objects.
800
803
801 With default argument values, implementations can choose to emit
804 With default argument values, implementations can choose to emit
802 either fulltext revision data or a delta. When emitting deltas,
805 either fulltext revision data or a delta. When emitting deltas,
803 implementations must consider whether the delta's base revision
806 implementations must consider whether the delta's base revision
804 fulltext is available to the receiver.
807 fulltext is available to the receiver.
805
808
806 The base revision fulltext is guaranteed to be available if any of
809 The base revision fulltext is guaranteed to be available if any of
807 the following are met:
810 the following are met:
808
811
809 * Its fulltext revision was emitted by this method call.
812 * Its fulltext revision was emitted by this method call.
810 * A delta for that revision was emitted by this method call.
813 * A delta for that revision was emitted by this method call.
811 * ``assumehaveparentrevisions`` is True and the base revision is a
814 * ``assumehaveparentrevisions`` is True and the base revision is a
812 parent of the node.
815 parent of the node.
813
816
814 ``nodesorder`` can be used to control the order that revisions are
817 ``nodesorder`` can be used to control the order that revisions are
815 emitted. By default, revisions can be reordered as long as they are
818 emitted. By default, revisions can be reordered as long as they are
816 in DAG topological order (see above). If the value is ``nodes``,
819 in DAG topological order (see above). If the value is ``nodes``,
817 the iteration order from ``nodes`` should be used. If the value is
820 the iteration order from ``nodes`` should be used. If the value is
818 ``storage``, then the native order from the backing storage layer
821 ``storage``, then the native order from the backing storage layer
819 is used. (Not all storage layers will have strong ordering and behavior
822 is used. (Not all storage layers will have strong ordering and behavior
820 of this mode is storage-dependent.) ``nodes`` ordering can force
823 of this mode is storage-dependent.) ``nodes`` ordering can force
821 revisions to be emitted before their ancestors, so consumers should
824 revisions to be emitted before their ancestors, so consumers should
822 use it with care.
825 use it with care.
823
826
824 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
827 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
825 be set and it is the caller's responsibility to resolve it, if needed.
828 be set and it is the caller's responsibility to resolve it, if needed.
826
829
827 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
830 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
828 all revision data should be emitted as deltas against the revision
831 all revision data should be emitted as deltas against the revision
829 emitted just prior. The initial revision should be a delta against its
832 emitted just prior. The initial revision should be a delta against its
830 1st parent.
833 1st parent.
831 """
834 """
832
835
833
836
834 class ifilemutation(Protocol):
837 class ifilemutation(Protocol):
835 """Storage interface for mutation events of a tracked file."""
838 """Storage interface for mutation events of a tracked file."""
836
839
837 @abc.abstractmethod
840 @abc.abstractmethod
838 def add(self, filedata, meta, transaction, linkrev, p1, p2):
841 def add(self, filedata, meta, transaction, linkrev, p1, p2):
839 """Add a new revision to the store.
842 """Add a new revision to the store.
840
843
841 Takes file data, dictionary of metadata, a transaction, linkrev,
844 Takes file data, dictionary of metadata, a transaction, linkrev,
842 and parent nodes.
845 and parent nodes.
843
846
844 Returns the node that was added.
847 Returns the node that was added.
845
848
846 May no-op if a revision matching the supplied data is already stored.
849 May no-op if a revision matching the supplied data is already stored.
847 """
850 """
848
851
849 @abc.abstractmethod
852 @abc.abstractmethod
850 def addrevision(
853 def addrevision(
851 self,
854 self,
852 revisiondata,
855 revisiondata,
853 transaction,
856 transaction,
854 linkrev,
857 linkrev,
855 p1,
858 p1,
856 p2,
859 p2,
857 node=None,
860 node=None,
858 flags=0,
861 flags=0,
859 cachedelta=None,
862 cachedelta=None,
860 ):
863 ):
861 """Add a new revision to the store and return its number.
864 """Add a new revision to the store and return its number.
862
865
863 This is similar to ``add()`` except it operates at a lower level.
866 This is similar to ``add()`` except it operates at a lower level.
864
867
865 The data passed in already contains a metadata header, if any.
868 The data passed in already contains a metadata header, if any.
866
869
867 ``node`` and ``flags`` can be used to define the expected node and
870 ``node`` and ``flags`` can be used to define the expected node and
868 the flags to use with storage. ``flags`` is a bitwise value composed
871 the flags to use with storage. ``flags`` is a bitwise value composed
869 of the various ``REVISION_FLAG_*`` constants.
872 of the various ``REVISION_FLAG_*`` constants.
870
873
871 ``add()`` is usually called when adding files from e.g. the working
874 ``add()`` is usually called when adding files from e.g. the working
872 directory. ``addrevision()`` is often called by ``add()`` and for
875 directory. ``addrevision()`` is often called by ``add()`` and for
873 scenarios where revision data has already been computed, such as when
876 scenarios where revision data has already been computed, such as when
874 applying raw data from a peer repo.
877 applying raw data from a peer repo.
875 """
878 """
876
879
877 @abc.abstractmethod
880 @abc.abstractmethod
878 def addgroup(
881 def addgroup(
879 self,
882 self,
880 deltas,
883 deltas,
881 linkmapper,
884 linkmapper,
882 transaction,
885 transaction,
883 addrevisioncb=None,
886 addrevisioncb=None,
884 duplicaterevisioncb=None,
887 duplicaterevisioncb=None,
885 maybemissingparents=False,
888 maybemissingparents=False,
886 ):
889 ):
887 """Process a series of deltas for storage.
890 """Process a series of deltas for storage.
888
891
889 ``deltas`` is an iterable of 7-tuples of
892 ``deltas`` is an iterable of 7-tuples of
890 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
893 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
891 to add.
894 to add.
892
895
893 The ``delta`` field contains ``mpatch`` data to apply to a base
896 The ``delta`` field contains ``mpatch`` data to apply to a base
894 revision, identified by ``deltabase``. The base node can be
897 revision, identified by ``deltabase``. The base node can be
895 ``nullid``, in which case the header from the delta can be ignored
898 ``nullid``, in which case the header from the delta can be ignored
896 and the delta used as the fulltext.
899 and the delta used as the fulltext.
897
900
898 ``alwayscache`` instructs the lower layers to cache the content of the
901 ``alwayscache`` instructs the lower layers to cache the content of the
899 newly added revision, even if it needs to be explicitly computed.
902 newly added revision, even if it needs to be explicitly computed.
900 This used to be the default when ``addrevisioncb`` was provided up to
903 This used to be the default when ``addrevisioncb`` was provided up to
901 Mercurial 5.8.
904 Mercurial 5.8.
902
905
903 ``addrevisioncb`` should be called for each new rev as it is committed.
906 ``addrevisioncb`` should be called for each new rev as it is committed.
904 ``duplicaterevisioncb`` should be called for all revs with a
907 ``duplicaterevisioncb`` should be called for all revs with a
905 pre-existing node.
908 pre-existing node.
906
909
907 ``maybemissingparents`` is a bool indicating whether the incoming
910 ``maybemissingparents`` is a bool indicating whether the incoming
908 data may reference parents/ancestor revisions that aren't present.
911 data may reference parents/ancestor revisions that aren't present.
909 This flag is set when receiving data into a "shallow" store that
912 This flag is set when receiving data into a "shallow" store that
910 doesn't hold all history.
913 doesn't hold all history.
911
914
912 Returns a list of nodes that were processed. A node will be in the list
915 Returns a list of nodes that were processed. A node will be in the list
913 even if it existed in the store previously.
916 even if it existed in the store previously.
914 """
917 """
915
918
916 @abc.abstractmethod
919 @abc.abstractmethod
917 def censorrevision(self, tr, node, tombstone=b''):
920 def censorrevision(self, tr, node, tombstone=b''):
918 """Remove the content of a single revision.
921 """Remove the content of a single revision.
919
922
920 The specified ``node`` will have its content purged from storage.
923 The specified ``node`` will have its content purged from storage.
921 Future attempts to access the revision data for this node will
924 Future attempts to access the revision data for this node will
922 result in failure.
925 result in failure.
923
926
924 A ``tombstone`` message can optionally be stored. This message may be
927 A ``tombstone`` message can optionally be stored. This message may be
925 displayed to users when they attempt to access the missing revision
928 displayed to users when they attempt to access the missing revision
926 data.
929 data.
927
930
928 Storage backends may have stored deltas against the previous content
931 Storage backends may have stored deltas against the previous content
929 in this revision. As part of censoring a revision, these storage
932 in this revision. As part of censoring a revision, these storage
930 backends are expected to rewrite any internally stored deltas such
933 backends are expected to rewrite any internally stored deltas such
931 that they no longer reference the deleted content.
934 that they no longer reference the deleted content.
932 """
935 """
933
936
934 @abc.abstractmethod
937 @abc.abstractmethod
935 def getstrippoint(self, minlink):
938 def getstrippoint(self, minlink):
936 """Find the minimum revision that must be stripped to strip a linkrev.
939 """Find the minimum revision that must be stripped to strip a linkrev.
937
940
938 Returns a 2-tuple containing the minimum revision number and a set
941 Returns a 2-tuple containing the minimum revision number and a set
939 of all revisions numbers that would be broken by this strip.
942 of all revisions numbers that would be broken by this strip.
940
943
941 TODO this is highly revlog centric and should be abstracted into
944 TODO this is highly revlog centric and should be abstracted into
942 a higher-level deletion API. ``repair.strip()`` relies on this.
945 a higher-level deletion API. ``repair.strip()`` relies on this.
943 """
946 """
944
947
945 @abc.abstractmethod
948 @abc.abstractmethod
946 def strip(self, minlink, transaction):
949 def strip(self, minlink, transaction):
947 """Remove storage of items starting at a linkrev.
950 """Remove storage of items starting at a linkrev.
948
951
949 This uses ``getstrippoint()`` to determine the first node to remove.
952 This uses ``getstrippoint()`` to determine the first node to remove.
950 Then it effectively truncates storage for all revisions after that.
953 Then it effectively truncates storage for all revisions after that.
951
954
952 TODO this is highly revlog centric and should be abstracted into a
955 TODO this is highly revlog centric and should be abstracted into a
953 higher-level deletion API.
956 higher-level deletion API.
954 """
957 """
955
958
956
959
957 class ifilestorage(ifileindex, ifiledata, ifilemutation, Protocol):
960 class ifilestorage(ifileindex, ifiledata, ifilemutation, Protocol):
958 """Complete storage interface for a single tracked file."""
961 """Complete storage interface for a single tracked file."""
959
962
960 @abc.abstractmethod
963 @abc.abstractmethod
961 def files(self):
964 def files(self):
962 """Obtain paths that are backing storage for this file.
965 """Obtain paths that are backing storage for this file.
963
966
964 TODO this is used heavily by verify code and there should probably
967 TODO this is used heavily by verify code and there should probably
965 be a better API for that.
968 be a better API for that.
966 """
969 """
967
970
968 @abc.abstractmethod
971 @abc.abstractmethod
969 def storageinfo(
972 def storageinfo(
970 self,
973 self,
971 exclusivefiles=False,
974 exclusivefiles=False,
972 sharedfiles=False,
975 sharedfiles=False,
973 revisionscount=False,
976 revisionscount=False,
974 trackedsize=False,
977 trackedsize=False,
975 storedsize=False,
978 storedsize=False,
976 ):
979 ):
977 """Obtain information about storage for this file's data.
980 """Obtain information about storage for this file's data.
978
981
979 Returns a dict describing storage for this tracked path. The keys
982 Returns a dict describing storage for this tracked path. The keys
980 in the dict map to arguments of the same. The arguments are bools
983 in the dict map to arguments of the same. The arguments are bools
981 indicating whether to calculate and obtain that data.
984 indicating whether to calculate and obtain that data.
982
985
983 exclusivefiles
986 exclusivefiles
984 Iterable of (vfs, path) describing files that are exclusively
987 Iterable of (vfs, path) describing files that are exclusively
985 used to back storage for this tracked path.
988 used to back storage for this tracked path.
986
989
987 sharedfiles
990 sharedfiles
988 Iterable of (vfs, path) describing files that are used to back
991 Iterable of (vfs, path) describing files that are used to back
989 storage for this tracked path. Those files may also provide storage
992 storage for this tracked path. Those files may also provide storage
990 for other stored entities.
993 for other stored entities.
991
994
992 revisionscount
995 revisionscount
993 Number of revisions available for retrieval.
996 Number of revisions available for retrieval.
994
997
995 trackedsize
998 trackedsize
996 Total size in bytes of all tracked revisions. This is a sum of the
999 Total size in bytes of all tracked revisions. This is a sum of the
997 length of the fulltext of all revisions.
1000 length of the fulltext of all revisions.
998
1001
999 storedsize
1002 storedsize
1000 Total size in bytes used to store data for all tracked revisions.
1003 Total size in bytes used to store data for all tracked revisions.
1001 This is commonly less than ``trackedsize`` due to internal usage
1004 This is commonly less than ``trackedsize`` due to internal usage
1002 of deltas rather than fulltext revisions.
1005 of deltas rather than fulltext revisions.
1003
1006
1004 Not all storage backends may support all queries are have a reasonable
1007 Not all storage backends may support all queries are have a reasonable
1005 value to use. In that case, the value should be set to ``None`` and
1008 value to use. In that case, the value should be set to ``None`` and
1006 callers are expected to handle this special value.
1009 callers are expected to handle this special value.
1007 """
1010 """
1008
1011
1009 @abc.abstractmethod
1012 @abc.abstractmethod
1010 def verifyintegrity(self, state) -> Iterable[iverifyproblem]:
1013 def verifyintegrity(self, state) -> Iterable[iverifyproblem]:
1011 """Verifies the integrity of file storage.
1014 """Verifies the integrity of file storage.
1012
1015
1013 ``state`` is a dict holding state of the verifier process. It can be
1016 ``state`` is a dict holding state of the verifier process. It can be
1014 used to communicate data between invocations of multiple storage
1017 used to communicate data between invocations of multiple storage
1015 primitives.
1018 primitives.
1016
1019
1017 If individual revisions cannot have their revision content resolved,
1020 If individual revisions cannot have their revision content resolved,
1018 the method is expected to set the ``skipread`` key to a set of nodes
1021 the method is expected to set the ``skipread`` key to a set of nodes
1019 that encountered problems. If set, the method can also add the node(s)
1022 that encountered problems. If set, the method can also add the node(s)
1020 to ``safe_renamed`` in order to indicate nodes that may perform the
1023 to ``safe_renamed`` in order to indicate nodes that may perform the
1021 rename checks with currently accessible data.
1024 rename checks with currently accessible data.
1022
1025
1023 The method yields objects conforming to the ``iverifyproblem``
1026 The method yields objects conforming to the ``iverifyproblem``
1024 interface.
1027 interface.
1025 """
1028 """
1026
1029
1027
1030
1028 class idirs(Protocol):
1031 class idirs(Protocol):
1029 """Interface representing a collection of directories from paths.
1032 """Interface representing a collection of directories from paths.
1030
1033
1031 This interface is essentially a derived data structure representing
1034 This interface is essentially a derived data structure representing
1032 directories from a collection of paths.
1035 directories from a collection of paths.
1033 """
1036 """
1034
1037
1035 def addpath(self, path):
1038 def addpath(self, path):
1036 """Add a path to the collection.
1039 """Add a path to the collection.
1037
1040
1038 All directories in the path will be added to the collection.
1041 All directories in the path will be added to the collection.
1039 """
1042 """
1040
1043
1041 def delpath(self, path):
1044 def delpath(self, path):
1042 """Remove a path from the collection.
1045 """Remove a path from the collection.
1043
1046
1044 If the removal was the last path in a particular directory, the
1047 If the removal was the last path in a particular directory, the
1045 directory is removed from the collection.
1048 directory is removed from the collection.
1046 """
1049 """
1047
1050
1048 def __iter__(self):
1051 def __iter__(self):
1049 """Iterate over the directories in this collection of paths."""
1052 """Iterate over the directories in this collection of paths."""
1050
1053
1051 def __contains__(self, path):
1054 def __contains__(self, path):
1052 """Whether a specific directory is in this collection."""
1055 """Whether a specific directory is in this collection."""
1053
1056
1054
1057
1055 class imanifestdict(Protocol):
1058 class imanifestdict(Protocol):
1056 """Interface representing a manifest data structure.
1059 """Interface representing a manifest data structure.
1057
1060
1058 A manifest is effectively a dict mapping paths to entries. Each entry
1061 A manifest is effectively a dict mapping paths to entries. Each entry
1059 consists of a binary node and extra flags affecting that entry.
1062 consists of a binary node and extra flags affecting that entry.
1060 """
1063 """
1061
1064
1062 @abc.abstractmethod
1065 @abc.abstractmethod
1063 def __getitem__(self, key: bytes) -> bytes:
1066 def __getitem__(self, key: bytes) -> bytes:
1064 """Returns the binary node value for a path in the manifest.
1067 """Returns the binary node value for a path in the manifest.
1065
1068
1066 Raises ``KeyError`` if the path does not exist in the manifest.
1069 Raises ``KeyError`` if the path does not exist in the manifest.
1067
1070
1068 Equivalent to ``self.find(path)[0]``.
1071 Equivalent to ``self.find(path)[0]``.
1069 """
1072 """
1070
1073
1071 @abc.abstractmethod
1074 @abc.abstractmethod
1072 def find(self, path: bytes) -> tuple[bytes, bytes]:
1075 def find(self, path: bytes) -> tuple[bytes, bytes]:
1073 """Returns the entry for a path in the manifest.
1076 """Returns the entry for a path in the manifest.
1074
1077
1075 Returns a 2-tuple of (node, flags).
1078 Returns a 2-tuple of (node, flags).
1076
1079
1077 Raises ``KeyError`` if the path does not exist in the manifest.
1080 Raises ``KeyError`` if the path does not exist in the manifest.
1078 """
1081 """
1079
1082
1080 @abc.abstractmethod
1083 @abc.abstractmethod
1081 def __len__(self) -> int:
1084 def __len__(self) -> int:
1082 """Return the number of entries in the manifest."""
1085 """Return the number of entries in the manifest."""
1083
1086
1084 @abc.abstractmethod
1087 @abc.abstractmethod
1085 def __nonzero__(self) -> bool:
1088 def __nonzero__(self) -> bool:
1086 """Returns True if the manifest has entries, False otherwise."""
1089 """Returns True if the manifest has entries, False otherwise."""
1087
1090
1088 __bool__ = __nonzero__
1091 __bool__ = __nonzero__
1089
1092
1090 @abc.abstractmethod
1093 @abc.abstractmethod
1091 def set(self, path: bytes, node: bytes, flags: bytes) -> None:
1094 def set(self, path: bytes, node: bytes, flags: bytes) -> None:
1092 """Define the node value and flags for a path in the manifest.
1095 """Define the node value and flags for a path in the manifest.
1093
1096
1094 Equivalent to __setitem__ followed by setflag, but can be more efficient.
1097 Equivalent to __setitem__ followed by setflag, but can be more efficient.
1095 """
1098 """
1096
1099
1097 @abc.abstractmethod
1100 @abc.abstractmethod
1098 def __setitem__(self, path: bytes, node: bytes) -> None:
1101 def __setitem__(self, path: bytes, node: bytes) -> None:
1099 """Define the node value for a path in the manifest.
1102 """Define the node value for a path in the manifest.
1100
1103
1101 If the path is already in the manifest, its flags will be copied to
1104 If the path is already in the manifest, its flags will be copied to
1102 the new entry.
1105 the new entry.
1103 """
1106 """
1104
1107
1105 @abc.abstractmethod
1108 @abc.abstractmethod
1106 def __contains__(self, path: bytes) -> bool:
1109 def __contains__(self, path: bytes) -> bool:
1107 """Whether a path exists in the manifest."""
1110 """Whether a path exists in the manifest."""
1108
1111
1109 @abc.abstractmethod
1112 @abc.abstractmethod
1110 def __delitem__(self, path: bytes) -> None:
1113 def __delitem__(self, path: bytes) -> None:
1111 """Remove a path from the manifest.
1114 """Remove a path from the manifest.
1112
1115
1113 Raises ``KeyError`` if the path is not in the manifest.
1116 Raises ``KeyError`` if the path is not in the manifest.
1114 """
1117 """
1115
1118
1116 @abc.abstractmethod
1119 @abc.abstractmethod
1117 def __iter__(self) -> Iterator[bytes]:
1120 def __iter__(self) -> Iterator[bytes]:
1118 """Iterate over paths in the manifest."""
1121 """Iterate over paths in the manifest."""
1119
1122
1120 @abc.abstractmethod
1123 @abc.abstractmethod
1121 def iterkeys(self) -> Iterator[bytes]:
1124 def iterkeys(self) -> Iterator[bytes]:
1122 """Iterate over paths in the manifest."""
1125 """Iterate over paths in the manifest."""
1123
1126
1124 @abc.abstractmethod
1127 @abc.abstractmethod
1125 def keys(self) -> list[bytes]:
1128 def keys(self) -> list[bytes]:
1126 """Obtain a list of paths in the manifest."""
1129 """Obtain a list of paths in the manifest."""
1127
1130
1128 @abc.abstractmethod
1131 @abc.abstractmethod
1129 def filesnotin(self, other, match=None) -> Set[bytes]:
1132 def filesnotin(self, other, match=None) -> Set[bytes]:
1130 """Obtain the set of paths in this manifest but not in another.
1133 """Obtain the set of paths in this manifest but not in another.
1131
1134
1132 ``match`` is an optional matcher function to be applied to both
1135 ``match`` is an optional matcher function to be applied to both
1133 manifests.
1136 manifests.
1134
1137
1135 Returns a set of paths.
1138 Returns a set of paths.
1136 """
1139 """
1137
1140
1138 @abc.abstractmethod
1141 @abc.abstractmethod
1139 def dirs(self) -> pathutil.dirs:
1142 def dirs(self) -> pathutil.dirs:
1140 """Returns an object implementing the ``idirs`` interface."""
1143 """Returns an object implementing the ``idirs`` interface."""
1141
1144
1142 @abc.abstractmethod
1145 @abc.abstractmethod
1143 def hasdir(self, dir: bytes) -> bool:
1146 def hasdir(self, dir: bytes) -> bool:
1144 """Returns a bool indicating if a directory is in this manifest."""
1147 """Returns a bool indicating if a directory is in this manifest."""
1145
1148
1146 @abc.abstractmethod
1149 @abc.abstractmethod
1147 def walk(self, match: matchmod.basematcher) -> Iterator[bytes]:
1150 def walk(self, match: matchmod.basematcher) -> Iterator[bytes]:
1148 """Generator of paths in manifest satisfying a matcher.
1151 """Generator of paths in manifest satisfying a matcher.
1149
1152
1150 If the matcher has explicit files listed and they don't exist in
1153 If the matcher has explicit files listed and they don't exist in
1151 the manifest, ``match.bad()`` is called for each missing file.
1154 the manifest, ``match.bad()`` is called for each missing file.
1152 """
1155 """
1153
1156
1154 @abc.abstractmethod
1157 @abc.abstractmethod
1155 def diff(
1158 def diff(
1156 self,
1159 self,
1157 other: Any, # TODO: 'manifestdict' or (better) equivalent interface
1160 other: Any, # TODO: 'manifestdict' or (better) equivalent interface
1158 match: matchmod.basematcher | None = None,
1161 match: matchmod.basematcher | None = None,
1159 clean: bool = False,
1162 clean: bool = False,
1160 ) -> dict[
1163 ) -> dict[
1161 bytes,
1164 bytes,
1162 tuple[tuple[bytes | None, bytes], tuple[bytes | None, bytes]] | None,
1165 tuple[tuple[bytes | None, bytes], tuple[bytes | None, bytes]] | None,
1163 ]:
1166 ]:
1164 """Find differences between this manifest and another.
1167 """Find differences between this manifest and another.
1165
1168
1166 This manifest is compared to ``other``.
1169 This manifest is compared to ``other``.
1167
1170
1168 If ``match`` is provided, the two manifests are filtered against this
1171 If ``match`` is provided, the two manifests are filtered against this
1169 matcher and only entries satisfying the matcher are compared.
1172 matcher and only entries satisfying the matcher are compared.
1170
1173
1171 If ``clean`` is True, unchanged files are included in the returned
1174 If ``clean`` is True, unchanged files are included in the returned
1172 object.
1175 object.
1173
1176
1174 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
1177 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
1175 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
1178 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
1176 represents the node and flags for this manifest and ``(node2, flag2)``
1179 represents the node and flags for this manifest and ``(node2, flag2)``
1177 are the same for the other manifest.
1180 are the same for the other manifest.
1178 """
1181 """
1179
1182
1180 @abc.abstractmethod
1183 @abc.abstractmethod
1181 def setflag(self, path: bytes, flag: bytes) -> None:
1184 def setflag(self, path: bytes, flag: bytes) -> None:
1182 """Set the flag value for a given path.
1185 """Set the flag value for a given path.
1183
1186
1184 Raises ``KeyError`` if the path is not already in the manifest.
1187 Raises ``KeyError`` if the path is not already in the manifest.
1185 """
1188 """
1186
1189
1187 @abc.abstractmethod
1190 @abc.abstractmethod
1188 def get(self, path: bytes, default=None) -> bytes | None:
1191 def get(self, path: bytes, default=None) -> bytes | None:
1189 """Obtain the node value for a path or a default value if missing."""
1192 """Obtain the node value for a path or a default value if missing."""
1190
1193
1191 @abc.abstractmethod
1194 @abc.abstractmethod
1192 def flags(self, path: bytes) -> bytes:
1195 def flags(self, path: bytes) -> bytes:
1193 """Return the flags value for a path (default: empty bytestring)."""
1196 """Return the flags value for a path (default: empty bytestring)."""
1194
1197
1195 @abc.abstractmethod
1198 @abc.abstractmethod
1196 def copy(self) -> 'imanifestdict':
1199 def copy(self) -> 'imanifestdict':
1197 """Return a copy of this manifest."""
1200 """Return a copy of this manifest."""
1198
1201
1199 @abc.abstractmethod
1202 @abc.abstractmethod
1200 def items(self) -> Iterator[tuple[bytes, bytes]]:
1203 def items(self) -> Iterator[tuple[bytes, bytes]]:
1201 """Returns an iterable of (path, node) for items in this manifest."""
1204 """Returns an iterable of (path, node) for items in this manifest."""
1202
1205
1203 @abc.abstractmethod
1206 @abc.abstractmethod
1204 def iteritems(self) -> Iterator[tuple[bytes, bytes]]:
1207 def iteritems(self) -> Iterator[tuple[bytes, bytes]]:
1205 """Identical to items()."""
1208 """Identical to items()."""
1206
1209
1207 @abc.abstractmethod
1210 @abc.abstractmethod
1208 def iterentries(self) -> Iterator[tuple[bytes, bytes, bytes]]:
1211 def iterentries(self) -> Iterator[tuple[bytes, bytes, bytes]]:
1209 """Returns an iterable of (path, node, flags) for this manifest.
1212 """Returns an iterable of (path, node, flags) for this manifest.
1210
1213
1211 Similar to ``iteritems()`` except items are a 3-tuple and include
1214 Similar to ``iteritems()`` except items are a 3-tuple and include
1212 flags.
1215 flags.
1213 """
1216 """
1214
1217
1215 @abc.abstractmethod
1218 @abc.abstractmethod
1216 def text(self) -> ByteString:
1219 def text(self) -> ByteString:
1217 """Obtain the raw data representation for this manifest.
1220 """Obtain the raw data representation for this manifest.
1218
1221
1219 Result is used to create a manifest revision.
1222 Result is used to create a manifest revision.
1220 """
1223 """
1221
1224
1222 @abc.abstractmethod
1225 @abc.abstractmethod
1223 def fastdelta(
1226 def fastdelta(
1224 self, base: ByteString, changes: Iterable[tuple[bytes, bool]]
1227 self, base: ByteString, changes: Iterable[tuple[bytes, bool]]
1225 ) -> tuple[ByteString, ByteString]:
1228 ) -> tuple[ByteString, ByteString]:
1226 """Obtain a delta between this manifest and another given changes.
1229 """Obtain a delta between this manifest and another given changes.
1227
1230
1228 ``base`` in the raw data representation for another manifest.
1231 ``base`` in the raw data representation for another manifest.
1229
1232
1230 ``changes`` is an iterable of ``(path, to_delete)``.
1233 ``changes`` is an iterable of ``(path, to_delete)``.
1231
1234
1232 Returns a 2-tuple containing ``bytearray(self.text())`` and the
1235 Returns a 2-tuple containing ``bytearray(self.text())`` and the
1233 delta between ``base`` and this manifest.
1236 delta between ``base`` and this manifest.
1234
1237
1235 If this manifest implementation can't support ``fastdelta()``,
1238 If this manifest implementation can't support ``fastdelta()``,
1236 raise ``mercurial.manifest.FastdeltaUnavailable``.
1239 raise ``mercurial.manifest.FastdeltaUnavailable``.
1237 """
1240 """
1238
1241
1239
1242
1240 class imanifestrevisionbase(Protocol):
1243 class imanifestrevisionbase(Protocol):
1241 """Base interface representing a single revision of a manifest.
1244 """Base interface representing a single revision of a manifest.
1242
1245
1243 Should not be used as a primary interface: should always be inherited
1246 Should not be used as a primary interface: should always be inherited
1244 as part of a larger interface.
1247 as part of a larger interface.
1245 """
1248 """
1246
1249
1247 def copy(self):
1250 def copy(self):
1248 """Obtain a copy of this manifest instance.
1251 """Obtain a copy of this manifest instance.
1249
1252
1250 Returns an object conforming to the ``imanifestrevisionwritable``
1253 Returns an object conforming to the ``imanifestrevisionwritable``
1251 interface. The instance will be associated with the same
1254 interface. The instance will be associated with the same
1252 ``imanifestlog`` collection as this instance.
1255 ``imanifestlog`` collection as this instance.
1253 """
1256 """
1254
1257
1255 def read(self):
1258 def read(self):
1256 """Obtain the parsed manifest data structure.
1259 """Obtain the parsed manifest data structure.
1257
1260
1258 The returned object conforms to the ``imanifestdict`` interface.
1261 The returned object conforms to the ``imanifestdict`` interface.
1259 """
1262 """
1260
1263
1261
1264
1262 class imanifestrevisionstored(imanifestrevisionbase, Protocol):
1265 class imanifestrevisionstored(imanifestrevisionbase, Protocol):
1263 """Interface representing a manifest revision committed to storage."""
1266 """Interface representing a manifest revision committed to storage."""
1264
1267
1265 @abc.abstractmethod
1268 @abc.abstractmethod
1266 def node(self) -> bytes:
1269 def node(self) -> bytes:
1267 """The binary node for this manifest."""
1270 """The binary node for this manifest."""
1268
1271
1269 parents: list[bytes]
1272 parents: list[bytes]
1270 """List of binary nodes that are parents for this manifest revision."""
1273 """List of binary nodes that are parents for this manifest revision."""
1271
1274
1272 @abc.abstractmethod
1275 @abc.abstractmethod
1273 def readdelta(self, shallow: bool = False):
1276 def readdelta(self, shallow: bool = False):
1274 """Obtain the manifest data structure representing changes from parent.
1277 """Obtain the manifest data structure representing changes from parent.
1275
1278
1276 This manifest is compared to its 1st parent. A new manifest
1279 This manifest is compared to its 1st parent. A new manifest
1277 representing those differences is constructed.
1280 representing those differences is constructed.
1278
1281
1279 If `shallow` is True, this will read the delta for this directory,
1282 If `shallow` is True, this will read the delta for this directory,
1280 without recursively reading subdirectory manifests. Instead, any
1283 without recursively reading subdirectory manifests. Instead, any
1281 subdirectory entry will be reported as it appears in the manifest, i.e.
1284 subdirectory entry will be reported as it appears in the manifest, i.e.
1282 the subdirectory will be reported among files and distinguished only by
1285 the subdirectory will be reported among files and distinguished only by
1283 its 't' flag. This only apply if the underlying manifest support it.
1286 its 't' flag. This only apply if the underlying manifest support it.
1284
1287
1285 The returned object conforms to the ``imanifestdict`` interface.
1288 The returned object conforms to the ``imanifestdict`` interface.
1286 """
1289 """
1287
1290
1288 @abc.abstractmethod
1291 @abc.abstractmethod
1289 def read_any_fast_delta(
1292 def read_any_fast_delta(
1290 self,
1293 self,
1291 valid_bases: Collection[int] | None = None,
1294 valid_bases: Collection[int] | None = None,
1292 *,
1295 *,
1293 shallow: bool = False,
1296 shallow: bool = False,
1294 ):
1297 ):
1295 """read some manifest information as fast if possible
1298 """read some manifest information as fast if possible
1296
1299
1297 This might return a "delta", a manifest object containing only file
1300 This might return a "delta", a manifest object containing only file
1298 changed compared to another revisions. The `valid_bases` argument
1301 changed compared to another revisions. The `valid_bases` argument
1299 control the set of revision that might be used as a base.
1302 control the set of revision that might be used as a base.
1300
1303
1301 If no delta can be retrieved quickly, a full read of the manifest will
1304 If no delta can be retrieved quickly, a full read of the manifest will
1302 be performed instead.
1305 be performed instead.
1303
1306
1304 The function return a tuple with two elements. The first one is the
1307 The function return a tuple with two elements. The first one is the
1305 delta base used (or None if we did a full read), the second one is the
1308 delta base used (or None if we did a full read), the second one is the
1306 manifest information.
1309 manifest information.
1307
1310
1308 If `shallow` is True, this will read the delta for this directory,
1311 If `shallow` is True, this will read the delta for this directory,
1309 without recursively reading subdirectory manifests. Instead, any
1312 without recursively reading subdirectory manifests. Instead, any
1310 subdirectory entry will be reported as it appears in the manifest, i.e.
1313 subdirectory entry will be reported as it appears in the manifest, i.e.
1311 the subdirectory will be reported among files and distinguished only by
1314 the subdirectory will be reported among files and distinguished only by
1312 its 't' flag. This only apply if the underlying manifest support it.
1315 its 't' flag. This only apply if the underlying manifest support it.
1313
1316
1314 The returned object conforms to the ``imanifestdict`` interface.
1317 The returned object conforms to the ``imanifestdict`` interface.
1315 """
1318 """
1316
1319
1317 @abc.abstractmethod
1320 @abc.abstractmethod
1318 def read_delta_parents(self, *, shallow: bool = False, exact: bool = True):
1321 def read_delta_parents(self, *, shallow: bool = False, exact: bool = True):
1319 """return a diff from this revision against both parents.
1322 """return a diff from this revision against both parents.
1320
1323
1321 If `exact` is False, this might return a superset of the diff, containing
1324 If `exact` is False, this might return a superset of the diff, containing
1322 files that are actually present as is in one of the parents.
1325 files that are actually present as is in one of the parents.
1323
1326
1324 If `shallow` is True, this will read the delta for this directory,
1327 If `shallow` is True, this will read the delta for this directory,
1325 without recursively reading subdirectory manifests. Instead, any
1328 without recursively reading subdirectory manifests. Instead, any
1326 subdirectory entry will be reported as it appears in the manifest, i.e.
1329 subdirectory entry will be reported as it appears in the manifest, i.e.
1327 the subdirectory will be reported among files and distinguished only by
1330 the subdirectory will be reported among files and distinguished only by
1328 its 't' flag. This only apply if the underlying manifest support it.
1331 its 't' flag. This only apply if the underlying manifest support it.
1329
1332
1330 The returned object conforms to the ``imanifestdict`` interface."""
1333 The returned object conforms to the ``imanifestdict`` interface."""
1331
1334
1332 @abc.abstractmethod
1335 @abc.abstractmethod
1333 def read_delta_new_entries(self, *, shallow: bool = False):
1336 def read_delta_new_entries(self, *, shallow: bool = False):
1334 """Return a manifest containing just the entries that might be new to
1337 """Return a manifest containing just the entries that might be new to
1335 the repository.
1338 the repository.
1336
1339
1337 This is often equivalent to a diff against both parents, but without
1340 This is often equivalent to a diff against both parents, but without
1338 garantee. For performance reason, It might contains more files in some cases.
1341 garantee. For performance reason, It might contains more files in some cases.
1339
1342
1340 If `shallow` is True, this will read the delta for this directory,
1343 If `shallow` is True, this will read the delta for this directory,
1341 without recursively reading subdirectory manifests. Instead, any
1344 without recursively reading subdirectory manifests. Instead, any
1342 subdirectory entry will be reported as it appears in the manifest, i.e.
1345 subdirectory entry will be reported as it appears in the manifest, i.e.
1343 the subdirectory will be reported among files and distinguished only by
1346 the subdirectory will be reported among files and distinguished only by
1344 its 't' flag. This only apply if the underlying manifest support it.
1347 its 't' flag. This only apply if the underlying manifest support it.
1345
1348
1346 The returned object conforms to the ``imanifestdict`` interface."""
1349 The returned object conforms to the ``imanifestdict`` interface."""
1347
1350
1348 @abc.abstractmethod
1351 @abc.abstractmethod
1349 def readfast(self, shallow: bool = False):
1352 def readfast(self, shallow: bool = False):
1350 """Calls either ``read()`` or ``readdelta()``.
1353 """Calls either ``read()`` or ``readdelta()``.
1351
1354
1352 The faster of the two options is called.
1355 The faster of the two options is called.
1353 """
1356 """
1354
1357
1355 @abc.abstractmethod
1358 @abc.abstractmethod
1356 def find(self, key: bytes) -> tuple[bytes, bytes]:
1359 def find(self, key: bytes) -> tuple[bytes, bytes]:
1357 """Calls ``self.read().find(key)``.
1360 """Calls ``self.read().find(key)``.
1358
1361
1359 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1362 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1360 """
1363 """
1361
1364
1362
1365
1363 class imanifestrevisionwritable(imanifestrevisionbase, Protocol):
1366 class imanifestrevisionwritable(imanifestrevisionbase, Protocol):
1364 """Interface representing a manifest revision that can be committed."""
1367 """Interface representing a manifest revision that can be committed."""
1365
1368
1366 @abc.abstractmethod
1369 @abc.abstractmethod
1367 def write(
1370 def write(
1368 self, transaction, linkrev, p1node, p2node, added, removed, match=None
1371 self, transaction, linkrev, p1node, p2node, added, removed, match=None
1369 ):
1372 ):
1370 """Add this revision to storage.
1373 """Add this revision to storage.
1371
1374
1372 Takes a transaction object, the changeset revision number it will
1375 Takes a transaction object, the changeset revision number it will
1373 be associated with, its parent nodes, and lists of added and
1376 be associated with, its parent nodes, and lists of added and
1374 removed paths.
1377 removed paths.
1375
1378
1376 If match is provided, storage can choose not to inspect or write out
1379 If match is provided, storage can choose not to inspect or write out
1377 items that do not match. Storage is still required to be able to provide
1380 items that do not match. Storage is still required to be able to provide
1378 the full manifest in the future for any directories written (these
1381 the full manifest in the future for any directories written (these
1379 manifests should not be "narrowed on disk").
1382 manifests should not be "narrowed on disk").
1380
1383
1381 Returns the binary node of the created revision.
1384 Returns the binary node of the created revision.
1382 """
1385 """
1383
1386
1384
1387
1385 class imanifeststorage(Protocol):
1388 class imanifeststorage(Protocol):
1386 """Storage interface for manifest data."""
1389 """Storage interface for manifest data."""
1387
1390
1388 nodeconstants: NodeConstants
1391 nodeconstants: NodeConstants
1389 """nodeconstants used by the current repository."""
1392 """nodeconstants used by the current repository."""
1390
1393
1391 tree: bytes
1394 tree: bytes
1392 """The path to the directory this manifest tracks.
1395 """The path to the directory this manifest tracks.
1393
1396
1394 The empty bytestring represents the root manifest.
1397 The empty bytestring represents the root manifest.
1395 """
1398 """
1396
1399
1397 index: ifilerevisionssequence
1400 index: ifilerevisionssequence
1398 """An ``ifilerevisionssequence`` instance."""
1401 """An ``ifilerevisionssequence`` instance."""
1399
1402
1400 opener: Vfs
1403 opener: Vfs
1401 """VFS opener to use to access underlying files used for storage.
1404 """VFS opener to use to access underlying files used for storage.
1402
1405
1403 TODO this is revlog specific and should not be exposed.
1406 TODO this is revlog specific and should not be exposed.
1404 """
1407 """
1405
1408
1406 # TODO: finish type hints
1409 # TODO: finish type hints
1407 fulltextcache: dict
1410 fulltextcache: dict
1408 """Dict with cache of fulltexts.
1411 """Dict with cache of fulltexts.
1409
1412
1410 TODO this doesn't feel appropriate for the storage interface.
1413 TODO this doesn't feel appropriate for the storage interface.
1411 """
1414 """
1412
1415
1413 @abc.abstractmethod
1416 @abc.abstractmethod
1414 def __len__(self):
1417 def __len__(self):
1415 """Obtain the number of revisions stored for this manifest."""
1418 """Obtain the number of revisions stored for this manifest."""
1416
1419
1417 @abc.abstractmethod
1420 @abc.abstractmethod
1418 def __iter__(self):
1421 def __iter__(self):
1419 """Iterate over revision numbers for this manifest."""
1422 """Iterate over revision numbers for this manifest."""
1420
1423
1421 @abc.abstractmethod
1424 @abc.abstractmethod
1422 def rev(self, node):
1425 def rev(self, node):
1423 """Obtain the revision number given a binary node.
1426 """Obtain the revision number given a binary node.
1424
1427
1425 Raises ``error.LookupError`` if the node is not known.
1428 Raises ``error.LookupError`` if the node is not known.
1426 """
1429 """
1427
1430
1428 @abc.abstractmethod
1431 @abc.abstractmethod
1429 def node(self, rev):
1432 def node(self, rev):
1430 """Obtain the node value given a revision number.
1433 """Obtain the node value given a revision number.
1431
1434
1432 Raises ``error.LookupError`` if the revision is not known.
1435 Raises ``error.LookupError`` if the revision is not known.
1433 """
1436 """
1434
1437
1435 @abc.abstractmethod
1438 @abc.abstractmethod
1436 def lookup(self, value):
1439 def lookup(self, value):
1437 """Attempt to resolve a value to a node.
1440 """Attempt to resolve a value to a node.
1438
1441
1439 Value can be a binary node, hex node, revision number, or a bytes
1442 Value can be a binary node, hex node, revision number, or a bytes
1440 that can be converted to an integer.
1443 that can be converted to an integer.
1441
1444
1442 Raises ``error.LookupError`` if a ndoe could not be resolved.
1445 Raises ``error.LookupError`` if a ndoe could not be resolved.
1443 """
1446 """
1444
1447
1445 @abc.abstractmethod
1448 @abc.abstractmethod
1446 def parents(self, node):
1449 def parents(self, node):
1447 """Returns a 2-tuple of parent nodes for a node.
1450 """Returns a 2-tuple of parent nodes for a node.
1448
1451
1449 Values will be ``nullid`` if the parent is empty.
1452 Values will be ``nullid`` if the parent is empty.
1450 """
1453 """
1451
1454
1452 @abc.abstractmethod
1455 @abc.abstractmethod
1453 def parentrevs(self, rev):
1456 def parentrevs(self, rev):
1454 """Like parents() but operates on revision numbers."""
1457 """Like parents() but operates on revision numbers."""
1455
1458
1456 @abc.abstractmethod
1459 @abc.abstractmethod
1457 def linkrev(self, rev):
1460 def linkrev(self, rev):
1458 """Obtain the changeset revision number a revision is linked to."""
1461 """Obtain the changeset revision number a revision is linked to."""
1459
1462
1460 @abc.abstractmethod
1463 @abc.abstractmethod
1461 def revision(self, node):
1464 def revision(self, node):
1462 """Obtain fulltext data for a node."""
1465 """Obtain fulltext data for a node."""
1463
1466
1464 @abc.abstractmethod
1467 @abc.abstractmethod
1465 def rawdata(self, node):
1468 def rawdata(self, node):
1466 """Obtain raw data for a node."""
1469 """Obtain raw data for a node."""
1467
1470
1468 @abc.abstractmethod
1471 @abc.abstractmethod
1469 def revdiff(self, rev1, rev2):
1472 def revdiff(self, rev1, rev2):
1470 """Obtain a delta between two revision numbers.
1473 """Obtain a delta between two revision numbers.
1471
1474
1472 The returned data is the result of ``bdiff.bdiff()`` on the raw
1475 The returned data is the result of ``bdiff.bdiff()`` on the raw
1473 revision data.
1476 revision data.
1474 """
1477 """
1475
1478
1476 @abc.abstractmethod
1479 @abc.abstractmethod
1477 def cmp(self, node, fulltext):
1480 def cmp(self, node, fulltext):
1478 """Compare fulltext to another revision.
1481 """Compare fulltext to another revision.
1479
1482
1480 Returns True if the fulltext is different from what is stored.
1483 Returns True if the fulltext is different from what is stored.
1481 """
1484 """
1482
1485
1483 @abc.abstractmethod
1486 @abc.abstractmethod
1484 def emitrevisions(
1487 def emitrevisions(
1485 self,
1488 self,
1486 nodes,
1489 nodes,
1487 nodesorder=None,
1490 nodesorder=None,
1488 revisiondata=False,
1491 revisiondata=False,
1489 assumehaveparentrevisions=False,
1492 assumehaveparentrevisions=False,
1490 ):
1493 ):
1491 """Produce ``irevisiondelta`` describing revisions.
1494 """Produce ``irevisiondelta`` describing revisions.
1492
1495
1493 See the documentation for ``ifiledata`` for more.
1496 See the documentation for ``ifiledata`` for more.
1494 """
1497 """
1495
1498
1496 @abc.abstractmethod
1499 @abc.abstractmethod
1497 def addgroup(
1500 def addgroup(
1498 self,
1501 self,
1499 deltas,
1502 deltas,
1500 linkmapper,
1503 linkmapper,
1501 transaction,
1504 transaction,
1502 addrevisioncb=None,
1505 addrevisioncb=None,
1503 duplicaterevisioncb=None,
1506 duplicaterevisioncb=None,
1504 ):
1507 ):
1505 """Process a series of deltas for storage.
1508 """Process a series of deltas for storage.
1506
1509
1507 See the documentation in ``ifilemutation`` for more.
1510 See the documentation in ``ifilemutation`` for more.
1508 """
1511 """
1509
1512
1510 @abc.abstractmethod
1513 @abc.abstractmethod
1511 def rawsize(self, rev):
1514 def rawsize(self, rev):
1512 """Obtain the size of tracked data.
1515 """Obtain the size of tracked data.
1513
1516
1514 Is equivalent to ``len(m.rawdata(node))``.
1517 Is equivalent to ``len(m.rawdata(node))``.
1515
1518
1516 TODO this method is only used by upgrade code and may be removed.
1519 TODO this method is only used by upgrade code and may be removed.
1517 """
1520 """
1518
1521
1519 @abc.abstractmethod
1522 @abc.abstractmethod
1520 def getstrippoint(self, minlink):
1523 def getstrippoint(self, minlink):
1521 """Find minimum revision that must be stripped to strip a linkrev.
1524 """Find minimum revision that must be stripped to strip a linkrev.
1522
1525
1523 See the documentation in ``ifilemutation`` for more.
1526 See the documentation in ``ifilemutation`` for more.
1524 """
1527 """
1525
1528
1526 @abc.abstractmethod
1529 @abc.abstractmethod
1527 def strip(self, minlink, transaction):
1530 def strip(self, minlink, transaction):
1528 """Remove storage of items starting at a linkrev.
1531 """Remove storage of items starting at a linkrev.
1529
1532
1530 See the documentation in ``ifilemutation`` for more.
1533 See the documentation in ``ifilemutation`` for more.
1531 """
1534 """
1532
1535
1533 @abc.abstractmethod
1536 @abc.abstractmethod
1534 def checksize(self):
1537 def checksize(self):
1535 """Obtain the expected sizes of backing files.
1538 """Obtain the expected sizes of backing files.
1536
1539
1537 TODO this is used by verify and it should not be part of the interface.
1540 TODO this is used by verify and it should not be part of the interface.
1538 """
1541 """
1539
1542
1540 @abc.abstractmethod
1543 @abc.abstractmethod
1541 def files(self):
1544 def files(self):
1542 """Obtain paths that are backing storage for this manifest.
1545 """Obtain paths that are backing storage for this manifest.
1543
1546
1544 TODO this is used by verify and there should probably be a better API
1547 TODO this is used by verify and there should probably be a better API
1545 for this functionality.
1548 for this functionality.
1546 """
1549 """
1547
1550
1548 @abc.abstractmethod
1551 @abc.abstractmethod
1549 def deltaparent(self, rev):
1552 def deltaparent(self, rev):
1550 """Obtain the revision that a revision is delta'd against.
1553 """Obtain the revision that a revision is delta'd against.
1551
1554
1552 TODO delta encoding is an implementation detail of storage and should
1555 TODO delta encoding is an implementation detail of storage and should
1553 not be exposed to the storage interface.
1556 not be exposed to the storage interface.
1554 """
1557 """
1555
1558
1556 @abc.abstractmethod
1559 @abc.abstractmethod
1557 def clone(self, tr, dest, **kwargs):
1560 def clone(self, tr, dest, **kwargs):
1558 """Clone this instance to another."""
1561 """Clone this instance to another."""
1559
1562
1560 @abc.abstractmethod
1563 @abc.abstractmethod
1561 def clearcaches(self, clear_persisted_data=False):
1564 def clearcaches(self, clear_persisted_data=False):
1562 """Clear any caches associated with this instance."""
1565 """Clear any caches associated with this instance."""
1563
1566
1564 @abc.abstractmethod
1567 @abc.abstractmethod
1565 def dirlog(self, d):
1568 def dirlog(self, d):
1566 """Obtain a manifest storage instance for a tree."""
1569 """Obtain a manifest storage instance for a tree."""
1567
1570
1568 @abc.abstractmethod
1571 @abc.abstractmethod
1569 def add(
1572 def add(
1570 self,
1573 self,
1571 m,
1574 m,
1572 transaction,
1575 transaction,
1573 link,
1576 link,
1574 p1,
1577 p1,
1575 p2,
1578 p2,
1576 added,
1579 added,
1577 removed,
1580 removed,
1578 readtree=None,
1581 readtree=None,
1579 match=None,
1582 match=None,
1580 ):
1583 ):
1581 """Add a revision to storage.
1584 """Add a revision to storage.
1582
1585
1583 ``m`` is an object conforming to ``imanifestdict``.
1586 ``m`` is an object conforming to ``imanifestdict``.
1584
1587
1585 ``link`` is the linkrev revision number.
1588 ``link`` is the linkrev revision number.
1586
1589
1587 ``p1`` and ``p2`` are the parent revision numbers.
1590 ``p1`` and ``p2`` are the parent revision numbers.
1588
1591
1589 ``added`` and ``removed`` are iterables of added and removed paths,
1592 ``added`` and ``removed`` are iterables of added and removed paths,
1590 respectively.
1593 respectively.
1591
1594
1592 ``readtree`` is a function that can be used to read the child tree(s)
1595 ``readtree`` is a function that can be used to read the child tree(s)
1593 when recursively writing the full tree structure when using
1596 when recursively writing the full tree structure when using
1594 treemanifets.
1597 treemanifets.
1595
1598
1596 ``match`` is a matcher that can be used to hint to storage that not all
1599 ``match`` is a matcher that can be used to hint to storage that not all
1597 paths must be inspected; this is an optimization and can be safely
1600 paths must be inspected; this is an optimization and can be safely
1598 ignored. Note that the storage must still be able to reproduce a full
1601 ignored. Note that the storage must still be able to reproduce a full
1599 manifest including files that did not match.
1602 manifest including files that did not match.
1600 """
1603 """
1601
1604
1602 @abc.abstractmethod
1605 @abc.abstractmethod
1603 def storageinfo(
1606 def storageinfo(
1604 self,
1607 self,
1605 exclusivefiles=False,
1608 exclusivefiles=False,
1606 sharedfiles=False,
1609 sharedfiles=False,
1607 revisionscount=False,
1610 revisionscount=False,
1608 trackedsize=False,
1611 trackedsize=False,
1609 storedsize=False,
1612 storedsize=False,
1610 ):
1613 ):
1611 """Obtain information about storage for this manifest's data.
1614 """Obtain information about storage for this manifest's data.
1612
1615
1613 See ``ifilestorage.storageinfo()`` for a description of this method.
1616 See ``ifilestorage.storageinfo()`` for a description of this method.
1614 This one behaves the same way, except for manifest data.
1617 This one behaves the same way, except for manifest data.
1615 """
1618 """
1616
1619
1617 @abc.abstractmethod
1620 @abc.abstractmethod
1618 def get_revlog(self):
1621 def get_revlog(self):
1619 """return an actual revlog instance if any
1622 """return an actual revlog instance if any
1620
1623
1621 This exist because a lot of code leverage the fact the underlying
1624 This exist because a lot of code leverage the fact the underlying
1622 storage is a revlog for optimization, so giving simple way to access
1625 storage is a revlog for optimization, so giving simple way to access
1623 the revlog instance helps such code.
1626 the revlog instance helps such code.
1624 """
1627 """
1625
1628
1626
1629
1627 class imanifestlog(Protocol):
1630 class imanifestlog(Protocol):
1628 """Interface representing a collection of manifest snapshots.
1631 """Interface representing a collection of manifest snapshots.
1629
1632
1630 Represents the root manifest in a repository.
1633 Represents the root manifest in a repository.
1631
1634
1632 Also serves as a means to access nested tree manifests and to cache
1635 Also serves as a means to access nested tree manifests and to cache
1633 tree manifests.
1636 tree manifests.
1634 """
1637 """
1635
1638
1636 nodeconstants: NodeConstants
1639 nodeconstants: NodeConstants
1637 """nodeconstants used by the current repository."""
1640 """nodeconstants used by the current repository."""
1638
1641
1639 narrowed: bool
1642 narrowed: bool
1640 """True, is the manifest is narrowed by a matcher"""
1643 """True, is the manifest is narrowed by a matcher"""
1641
1644
1642 @abc.abstractmethod
1645 @abc.abstractmethod
1643 def __getitem__(self, node):
1646 def __getitem__(self, node):
1644 """Obtain a manifest instance for a given binary node.
1647 """Obtain a manifest instance for a given binary node.
1645
1648
1646 Equivalent to calling ``self.get('', node)``.
1649 Equivalent to calling ``self.get('', node)``.
1647
1650
1648 The returned object conforms to the ``imanifestrevisionstored``
1651 The returned object conforms to the ``imanifestrevisionstored``
1649 interface.
1652 interface.
1650 """
1653 """
1651
1654
1652 @abc.abstractmethod
1655 @abc.abstractmethod
1653 def get(self, tree, node, verify=True):
1656 def get(self, tree, node, verify=True):
1654 """Retrieve the manifest instance for a given directory and binary node.
1657 """Retrieve the manifest instance for a given directory and binary node.
1655
1658
1656 ``node`` always refers to the node of the root manifest (which will be
1659 ``node`` always refers to the node of the root manifest (which will be
1657 the only manifest if flat manifests are being used).
1660 the only manifest if flat manifests are being used).
1658
1661
1659 If ``tree`` is the empty string, the root manifest is returned.
1662 If ``tree`` is the empty string, the root manifest is returned.
1660 Otherwise the manifest for the specified directory will be returned
1663 Otherwise the manifest for the specified directory will be returned
1661 (requires tree manifests).
1664 (requires tree manifests).
1662
1665
1663 If ``verify`` is True, ``LookupError`` is raised if the node is not
1666 If ``verify`` is True, ``LookupError`` is raised if the node is not
1664 known.
1667 known.
1665
1668
1666 The returned object conforms to the ``imanifestrevisionstored``
1669 The returned object conforms to the ``imanifestrevisionstored``
1667 interface.
1670 interface.
1668 """
1671 """
1669
1672
1670 @abc.abstractmethod
1673 @abc.abstractmethod
1671 def getstorage(self, tree):
1674 def getstorage(self, tree):
1672 """Retrieve an interface to storage for a particular tree.
1675 """Retrieve an interface to storage for a particular tree.
1673
1676
1674 If ``tree`` is the empty bytestring, storage for the root manifest will
1677 If ``tree`` is the empty bytestring, storage for the root manifest will
1675 be returned. Otherwise storage for a tree manifest is returned.
1678 be returned. Otherwise storage for a tree manifest is returned.
1676
1679
1677 TODO formalize interface for returned object.
1680 TODO formalize interface for returned object.
1678 """
1681 """
1679
1682
1680 @abc.abstractmethod
1683 @abc.abstractmethod
1681 def clearcaches(self, clear_persisted_data: bool = False) -> None:
1684 def clearcaches(self, clear_persisted_data: bool = False) -> None:
1682 """Clear caches associated with this collection."""
1685 """Clear caches associated with this collection."""
1683
1686
1684 @abc.abstractmethod
1687 @abc.abstractmethod
1685 def rev(self, node):
1688 def rev(self, node):
1686 """Obtain the revision number for a binary node.
1689 """Obtain the revision number for a binary node.
1687
1690
1688 Raises ``error.LookupError`` if the node is not known.
1691 Raises ``error.LookupError`` if the node is not known.
1689 """
1692 """
1690
1693
1691 @abc.abstractmethod
1694 @abc.abstractmethod
1692 def update_caches(self, transaction):
1695 def update_caches(self, transaction):
1693 """update whatever cache are relevant for the used storage."""
1696 """update whatever cache are relevant for the used storage."""
1694
1697
1695
1698
1696 class ilocalrepositoryfilestorage(Protocol):
1699 class ilocalrepositoryfilestorage(Protocol):
1697 """Local repository sub-interface providing access to tracked file storage.
1700 """Local repository sub-interface providing access to tracked file storage.
1698
1701
1699 This interface defines how a repository accesses storage for a single
1702 This interface defines how a repository accesses storage for a single
1700 tracked file path.
1703 tracked file path.
1701 """
1704 """
1702
1705
1703 @abc.abstractmethod
1706 @abc.abstractmethod
1704 def file(self, f):
1707 def file(self, f):
1705 """Obtain a filelog for a tracked path.
1708 """Obtain a filelog for a tracked path.
1706
1709
1707 The returned type conforms to the ``ifilestorage`` interface.
1710 The returned type conforms to the ``ifilestorage`` interface.
1708 """
1711 """
1709
1712
1710
1713
1711 class ilocalrepositorymain(Protocol):
1714 class ilocalrepositorymain(Protocol):
1712 """Main interface for local repositories.
1715 """Main interface for local repositories.
1713
1716
1714 This currently captures the reality of things - not how things should be.
1717 This currently captures the reality of things - not how things should be.
1715 """
1718 """
1716
1719
1717 nodeconstants: NodeConstants
1720 nodeconstants: NodeConstants
1718 """Constant nodes matching the hash function used by the repository."""
1721 """Constant nodes matching the hash function used by the repository."""
1719
1722
1720 nullid: bytes
1723 nullid: bytes
1721 """null revision for the hash function used by the repository."""
1724 """null revision for the hash function used by the repository."""
1722
1725
1723 supported: set[bytes]
1726 supported: set[bytes]
1724 """Set of requirements that this repo is capable of opening."""
1727 """Set of requirements that this repo is capable of opening."""
1725
1728
1726 requirements: set[bytes]
1729 requirements: set[bytes]
1727 """Set of requirements this repo uses."""
1730 """Set of requirements this repo uses."""
1728
1731
1729 features: set[bytes]
1732 features: set[bytes]
1730 """Set of "features" this repository supports.
1733 """Set of "features" this repository supports.
1731
1734
1732 A "feature" is a loosely-defined term. It can refer to a feature
1735 A "feature" is a loosely-defined term. It can refer to a feature
1733 in the classical sense or can describe an implementation detail
1736 in the classical sense or can describe an implementation detail
1734 of the repository. For example, a ``readonly`` feature may denote
1737 of the repository. For example, a ``readonly`` feature may denote
1735 the repository as read-only. Or a ``revlogfilestore`` feature may
1738 the repository as read-only. Or a ``revlogfilestore`` feature may
1736 denote that the repository is using revlogs for file storage.
1739 denote that the repository is using revlogs for file storage.
1737
1740
1738 The intent of features is to provide a machine-queryable mechanism
1741 The intent of features is to provide a machine-queryable mechanism
1739 for repo consumers to test for various repository characteristics.
1742 for repo consumers to test for various repository characteristics.
1740
1743
1741 Features are similar to ``requirements``. The main difference is that
1744 Features are similar to ``requirements``. The main difference is that
1742 requirements are stored on-disk and represent requirements to open the
1745 requirements are stored on-disk and represent requirements to open the
1743 repository. Features are more run-time capabilities of the repository
1746 repository. Features are more run-time capabilities of the repository
1744 and more granular capabilities (which may be derived from requirements).
1747 and more granular capabilities (which may be derived from requirements).
1745 """
1748 """
1746
1749
1747 filtername: bytes
1750 filtername: bytes
1748 """Name of the repoview that is active on this repo."""
1751 """Name of the repoview that is active on this repo."""
1749
1752
1750 vfs_map: Mapping[bytes, Vfs]
1753 vfs_map: Mapping[bytes, Vfs]
1751 """a bytes-key β†’ vfs mapping used by transaction and others"""
1754 """a bytes-key β†’ vfs mapping used by transaction and others"""
1752
1755
1753 wvfs: Vfs
1756 wvfs: Vfs
1754 """VFS used to access the working directory."""
1757 """VFS used to access the working directory."""
1755
1758
1756 vfs: Vfs
1759 vfs: Vfs
1757 """VFS rooted at the .hg directory.
1760 """VFS rooted at the .hg directory.
1758
1761
1759 Used to access repository data not in the store.
1762 Used to access repository data not in the store.
1760 """
1763 """
1761
1764
1762 svfs: Vfs
1765 svfs: Vfs
1763 """VFS rooted at the store.
1766 """VFS rooted at the store.
1764
1767
1765 Used to access repository data in the store. Typically .hg/store.
1768 Used to access repository data in the store. Typically .hg/store.
1766 But can point elsewhere if the store is shared.
1769 But can point elsewhere if the store is shared.
1767 """
1770 """
1768
1771
1769 root: bytes
1772 root: bytes
1770 """Path to the root of the working directory."""
1773 """Path to the root of the working directory."""
1771
1774
1772 path: bytes
1775 path: bytes
1773 """Path to the .hg directory."""
1776 """Path to the .hg directory."""
1774
1777
1775 origroot: bytes
1778 origroot: bytes
1776 """The filesystem path that was used to construct the repo."""
1779 """The filesystem path that was used to construct the repo."""
1777
1780
1778 auditor: Any
1781 auditor: Any
1779 """A pathauditor for the working directory.
1782 """A pathauditor for the working directory.
1780
1783
1781 This checks if a path refers to a nested repository.
1784 This checks if a path refers to a nested repository.
1782
1785
1783 Operates on the filesystem.
1786 Operates on the filesystem.
1784 """
1787 """
1785
1788
1786 nofsauditor: Any # TODO: add type hints
1789 nofsauditor: Any # TODO: add type hints
1787 """A pathauditor for the working directory.
1790 """A pathauditor for the working directory.
1788
1791
1789 This is like ``auditor`` except it doesn't do filesystem checks.
1792 This is like ``auditor`` except it doesn't do filesystem checks.
1790 """
1793 """
1791
1794
1792 baseui: Ui
1795 baseui: Ui
1793 """Original ui instance passed into constructor."""
1796 """Original ui instance passed into constructor."""
1794
1797
1795 ui: Ui
1798 ui: Ui
1796 """Main ui instance for this instance."""
1799 """Main ui instance for this instance."""
1797
1800
1798 sharedpath: bytes
1801 sharedpath: bytes
1799 """Path to the .hg directory of the repo this repo was shared from."""
1802 """Path to the .hg directory of the repo this repo was shared from."""
1800
1803
1801 store: Any # TODO: add type hints
1804 store: Any # TODO: add type hints
1802 """A store instance."""
1805 """A store instance."""
1803
1806
1804 spath: bytes
1807 spath: bytes
1805 """Path to the store."""
1808 """Path to the store."""
1806
1809
1807 sjoin: Callable # TODO: add type hints
1810 sjoin: Callable # TODO: add type hints
1808 """Alias to self.store.join."""
1811 """Alias to self.store.join."""
1809
1812
1810 cachevfs: Vfs
1813 cachevfs: Vfs
1811 """A VFS used to access the cache directory.
1814 """A VFS used to access the cache directory.
1812
1815
1813 Typically .hg/cache.
1816 Typically .hg/cache.
1814 """
1817 """
1815
1818
1816 wcachevfs: Vfs
1819 wcachevfs: Vfs
1817 """A VFS used to access the cache directory dedicated to working copy
1820 """A VFS used to access the cache directory dedicated to working copy
1818
1821
1819 Typically .hg/wcache.
1822 Typically .hg/wcache.
1820 """
1823 """
1821
1824
1822 filteredrevcache: Any # TODO: add type hints
1825 filteredrevcache: Any # TODO: add type hints
1823 """Holds sets of revisions to be filtered."""
1826 """Holds sets of revisions to be filtered."""
1824
1827
1825 names: Any # TODO: add type hints
1828 names: Any # TODO: add type hints
1826 """A ``namespaces`` instance."""
1829 """A ``namespaces`` instance."""
1827
1830
1828 filecopiesmode: Any # TODO: add type hints
1831 filecopiesmode: Any # TODO: add type hints
1829 """The way files copies should be dealt with in this repo."""
1832 """The way files copies should be dealt with in this repo."""
1830
1833
1831 @abc.abstractmethod
1834 @abc.abstractmethod
1832 def close(self):
1835 def close(self):
1833 """Close the handle on this repository."""
1836 """Close the handle on this repository."""
1834
1837
1835 @abc.abstractmethod
1838 @abc.abstractmethod
1836 def peer(self, path=None):
1839 def peer(self, path=None):
1837 """Obtain an object conforming to the ``peer`` interface."""
1840 """Obtain an object conforming to the ``peer`` interface."""
1838
1841
1839 @abc.abstractmethod
1842 @abc.abstractmethod
1840 def unfiltered(self):
1843 def unfiltered(self):
1841 """Obtain an unfiltered/raw view of this repo."""
1844 """Obtain an unfiltered/raw view of this repo."""
1842
1845
1843 @abc.abstractmethod
1846 @abc.abstractmethod
1844 def filtered(self, name, visibilityexceptions=None):
1847 def filtered(self, name, visibilityexceptions=None):
1845 """Obtain a named view of this repository."""
1848 """Obtain a named view of this repository."""
1846
1849
1847 obsstore: Any # TODO: add type hints
1850 obsstore: Any # TODO: add type hints
1848 """A store of obsolescence data."""
1851 """A store of obsolescence data."""
1849
1852
1850 changelog: Any # TODO: add type hints
1853 changelog: Any # TODO: add type hints
1851 """A handle on the changelog revlog."""
1854 """A handle on the changelog revlog."""
1852
1855
1853 manifestlog: imanifestlog
1856 manifestlog: imanifestlog
1854 """An instance conforming to the ``imanifestlog`` interface.
1857 """An instance conforming to the ``imanifestlog`` interface.
1855
1858
1856 Provides access to manifests for the repository.
1859 Provides access to manifests for the repository.
1857 """
1860 """
1858
1861
1859 dirstate: intdirstate.idirstate
1862 dirstate: intdirstate.idirstate
1860 """Working directory state."""
1863 """Working directory state."""
1861
1864
1862 narrowpats: Any # TODO: add type hints
1865 narrowpats: Any # TODO: add type hints
1863 """Matcher patterns for this repository's narrowspec."""
1866 """Matcher patterns for this repository's narrowspec."""
1864
1867
1865 @abc.abstractmethod
1868 @abc.abstractmethod
1866 def narrowmatch(self, match=None, includeexact=False):
1869 def narrowmatch(self, match=None, includeexact=False):
1867 """Obtain a matcher for the narrowspec."""
1870 """Obtain a matcher for the narrowspec."""
1868
1871
1869 @abc.abstractmethod
1872 @abc.abstractmethod
1870 def setnarrowpats(self, newincludes, newexcludes):
1873 def setnarrowpats(self, newincludes, newexcludes):
1871 """Define the narrowspec for this repository."""
1874 """Define the narrowspec for this repository."""
1872
1875
1873 @abc.abstractmethod
1876 @abc.abstractmethod
1874 def __getitem__(self, changeid):
1877 def __getitem__(self, changeid):
1875 """Try to resolve a changectx."""
1878 """Try to resolve a changectx."""
1876
1879
1877 @abc.abstractmethod
1880 @abc.abstractmethod
1878 def __contains__(self, changeid):
1881 def __contains__(self, changeid):
1879 """Whether a changeset exists."""
1882 """Whether a changeset exists."""
1880
1883
1881 @abc.abstractmethod
1884 @abc.abstractmethod
1882 def __nonzero__(self):
1885 def __nonzero__(self):
1883 """Always returns True."""
1886 """Always returns True."""
1884 return True
1887 return True
1885
1888
1886 __bool__ = __nonzero__
1889 __bool__ = __nonzero__
1887
1890
1888 @abc.abstractmethod
1891 @abc.abstractmethod
1889 def __len__(self):
1892 def __len__(self):
1890 """Returns the number of changesets in the repo."""
1893 """Returns the number of changesets in the repo."""
1891
1894
1892 @abc.abstractmethod
1895 @abc.abstractmethod
1893 def __iter__(self):
1896 def __iter__(self):
1894 """Iterate over revisions in the changelog."""
1897 """Iterate over revisions in the changelog."""
1895
1898
1896 @abc.abstractmethod
1899 @abc.abstractmethod
1897 def revs(self, expr, *args):
1900 def revs(self, expr, *args):
1898 """Evaluate a revset.
1901 """Evaluate a revset.
1899
1902
1900 Emits revisions.
1903 Emits revisions.
1901 """
1904 """
1902
1905
1903 @abc.abstractmethod
1906 @abc.abstractmethod
1904 def set(self, expr, *args):
1907 def set(self, expr, *args):
1905 """Evaluate a revset.
1908 """Evaluate a revset.
1906
1909
1907 Emits changectx instances.
1910 Emits changectx instances.
1908 """
1911 """
1909
1912
1910 @abc.abstractmethod
1913 @abc.abstractmethod
1911 def anyrevs(self, specs, user=False, localalias=None):
1914 def anyrevs(self, specs, user=False, localalias=None):
1912 """Find revisions matching one of the given revsets."""
1915 """Find revisions matching one of the given revsets."""
1913
1916
1914 @abc.abstractmethod
1917 @abc.abstractmethod
1915 def url(self):
1918 def url(self):
1916 """Returns a string representing the location of this repo."""
1919 """Returns a string representing the location of this repo."""
1917
1920
1918 @abc.abstractmethod
1921 @abc.abstractmethod
1919 def hook(self, name, throw=False, **args):
1922 def hook(self, name, throw=False, **args):
1920 """Call a hook."""
1923 """Call a hook."""
1921
1924
1922 @abc.abstractmethod
1925 @abc.abstractmethod
1923 def tags(self):
1926 def tags(self):
1924 """Return a mapping of tag to node."""
1927 """Return a mapping of tag to node."""
1925
1928
1926 @abc.abstractmethod
1929 @abc.abstractmethod
1927 def tagtype(self, tagname):
1930 def tagtype(self, tagname):
1928 """Return the type of a given tag."""
1931 """Return the type of a given tag."""
1929
1932
1930 @abc.abstractmethod
1933 @abc.abstractmethod
1931 def tagslist(self):
1934 def tagslist(self):
1932 """Return a list of tags ordered by revision."""
1935 """Return a list of tags ordered by revision."""
1933
1936
1934 @abc.abstractmethod
1937 @abc.abstractmethod
1935 def nodetags(self, node):
1938 def nodetags(self, node):
1936 """Return the tags associated with a node."""
1939 """Return the tags associated with a node."""
1937
1940
1938 @abc.abstractmethod
1941 @abc.abstractmethod
1939 def nodebookmarks(self, node):
1942 def nodebookmarks(self, node):
1940 """Return the list of bookmarks pointing to the specified node."""
1943 """Return the list of bookmarks pointing to the specified node."""
1941
1944
1942 @abc.abstractmethod
1945 @abc.abstractmethod
1943 def branchmap(self):
1946 def branchmap(self):
1944 """Return a mapping of branch to heads in that branch."""
1947 """Return a mapping of branch to heads in that branch."""
1945
1948
1946 @abc.abstractmethod
1949 @abc.abstractmethod
1947 def revbranchcache(self):
1950 def revbranchcache(self):
1948 pass
1951 pass
1949
1952
1950 @abc.abstractmethod
1953 @abc.abstractmethod
1951 def register_changeset(self, rev, changelogrevision):
1954 def register_changeset(self, rev, changelogrevision):
1952 """Extension point for caches for new nodes.
1955 """Extension point for caches for new nodes.
1953
1956
1954 Multiple consumers are expected to need parts of the changelogrevision,
1957 Multiple consumers are expected to need parts of the changelogrevision,
1955 so it is provided as optimization to avoid duplicate lookups. A simple
1958 so it is provided as optimization to avoid duplicate lookups. A simple
1956 cache would be fragile when other revisions are accessed, too."""
1959 cache would be fragile when other revisions are accessed, too."""
1957 pass
1960 pass
1958
1961
1959 @abc.abstractmethod
1962 @abc.abstractmethod
1960 def branchtip(self, branchtip, ignoremissing=False):
1963 def branchtip(self, branchtip, ignoremissing=False):
1961 """Return the tip node for a given branch."""
1964 """Return the tip node for a given branch."""
1962
1965
1963 @abc.abstractmethod
1966 @abc.abstractmethod
1964 def lookup(self, key):
1967 def lookup(self, key):
1965 """Resolve the node for a revision."""
1968 """Resolve the node for a revision."""
1966
1969
1967 @abc.abstractmethod
1970 @abc.abstractmethod
1968 def lookupbranch(self, key):
1971 def lookupbranch(self, key):
1969 """Look up the branch name of the given revision or branch name."""
1972 """Look up the branch name of the given revision or branch name."""
1970
1973
1971 @abc.abstractmethod
1974 @abc.abstractmethod
1972 def known(self, nodes):
1975 def known(self, nodes):
1973 """Determine whether a series of nodes is known.
1976 """Determine whether a series of nodes is known.
1974
1977
1975 Returns a list of bools.
1978 Returns a list of bools.
1976 """
1979 """
1977
1980
1978 @abc.abstractmethod
1981 @abc.abstractmethod
1979 def local(self):
1982 def local(self):
1980 """Whether the repository is local."""
1983 """Whether the repository is local."""
1981 return True
1984 return True
1982
1985
1983 @abc.abstractmethod
1986 @abc.abstractmethod
1984 def publishing(self):
1987 def publishing(self):
1985 """Whether the repository is a publishing repository."""
1988 """Whether the repository is a publishing repository."""
1986
1989
1987 @abc.abstractmethod
1990 @abc.abstractmethod
1988 def cancopy(self):
1991 def cancopy(self):
1989 pass
1992 pass
1990
1993
1991 @abc.abstractmethod
1994 @abc.abstractmethod
1992 def shared(self):
1995 def shared(self):
1993 """The type of shared repository or None."""
1996 """The type of shared repository or None."""
1994
1997
1995 @abc.abstractmethod
1998 @abc.abstractmethod
1996 def wjoin(self, f, *insidef):
1999 def wjoin(self, f, *insidef):
1997 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
2000 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1998
2001
1999 @abc.abstractmethod
2002 @abc.abstractmethod
2000 def setparents(self, p1, p2):
2003 def setparents(self, p1, p2):
2001 """Set the parent nodes of the working directory."""
2004 """Set the parent nodes of the working directory."""
2002
2005
2003 @abc.abstractmethod
2006 @abc.abstractmethod
2004 def filectx(self, path, changeid=None, fileid=None):
2007 def filectx(self, path, changeid=None, fileid=None):
2005 """Obtain a filectx for the given file revision."""
2008 """Obtain a filectx for the given file revision."""
2006
2009
2007 @abc.abstractmethod
2010 @abc.abstractmethod
2008 def getcwd(self):
2011 def getcwd(self):
2009 """Obtain the current working directory from the dirstate."""
2012 """Obtain the current working directory from the dirstate."""
2010
2013
2011 @abc.abstractmethod
2014 @abc.abstractmethod
2012 def pathto(self, f, cwd=None):
2015 def pathto(self, f, cwd=None):
2013 """Obtain the relative path to a file."""
2016 """Obtain the relative path to a file."""
2014
2017
2015 @abc.abstractmethod
2018 @abc.abstractmethod
2016 def adddatafilter(self, name, fltr):
2019 def adddatafilter(self, name, fltr):
2017 pass
2020 pass
2018
2021
2019 @abc.abstractmethod
2022 @abc.abstractmethod
2020 def wread(self, filename):
2023 def wread(self, filename):
2021 """Read a file from wvfs, using data filters."""
2024 """Read a file from wvfs, using data filters."""
2022
2025
2023 @abc.abstractmethod
2026 @abc.abstractmethod
2024 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2027 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2025 """Write data to a file in the wvfs, using data filters."""
2028 """Write data to a file in the wvfs, using data filters."""
2026
2029
2027 @abc.abstractmethod
2030 @abc.abstractmethod
2028 def wwritedata(self, filename, data):
2031 def wwritedata(self, filename, data):
2029 """Resolve data for writing to the wvfs, using data filters."""
2032 """Resolve data for writing to the wvfs, using data filters."""
2030
2033
2031 @abc.abstractmethod
2034 @abc.abstractmethod
2032 def currenttransaction(self):
2035 def currenttransaction(self):
2033 """Obtain the current transaction instance or None."""
2036 """Obtain the current transaction instance or None."""
2034
2037
2035 @abc.abstractmethod
2038 @abc.abstractmethod
2036 def transaction(self, desc, report=None):
2039 def transaction(self, desc, report=None):
2037 """Open a new transaction to write to the repository."""
2040 """Open a new transaction to write to the repository."""
2038
2041
2039 @abc.abstractmethod
2042 @abc.abstractmethod
2040 def undofiles(self):
2043 def undofiles(self):
2041 """Returns a list of (vfs, path) for files to undo transactions."""
2044 """Returns a list of (vfs, path) for files to undo transactions."""
2042
2045
2043 @abc.abstractmethod
2046 @abc.abstractmethod
2044 def recover(self):
2047 def recover(self):
2045 """Roll back an interrupted transaction."""
2048 """Roll back an interrupted transaction."""
2046
2049
2047 @abc.abstractmethod
2050 @abc.abstractmethod
2048 def rollback(self, dryrun=False, force=False):
2051 def rollback(self, dryrun=False, force=False):
2049 """Undo the last transaction.
2052 """Undo the last transaction.
2050
2053
2051 DANGEROUS.
2054 DANGEROUS.
2052 """
2055 """
2053
2056
2054 @abc.abstractmethod
2057 @abc.abstractmethod
2055 def updatecaches(self, tr=None, full=False, caches=None):
2058 def updatecaches(self, tr=None, full=False, caches=None):
2056 """Warm repo caches."""
2059 """Warm repo caches."""
2057
2060
2058 @abc.abstractmethod
2061 @abc.abstractmethod
2059 def invalidatecaches(self):
2062 def invalidatecaches(self):
2060 """Invalidate cached data due to the repository mutating."""
2063 """Invalidate cached data due to the repository mutating."""
2061
2064
2062 @abc.abstractmethod
2065 @abc.abstractmethod
2063 def invalidatevolatilesets(self):
2066 def invalidatevolatilesets(self):
2064 pass
2067 pass
2065
2068
2066 @abc.abstractmethod
2069 @abc.abstractmethod
2067 def invalidatedirstate(self):
2070 def invalidatedirstate(self):
2068 """Invalidate the dirstate."""
2071 """Invalidate the dirstate."""
2069
2072
2070 @abc.abstractmethod
2073 @abc.abstractmethod
2071 def invalidate(self, clearfilecache=False):
2074 def invalidate(self, clearfilecache=False):
2072 pass
2075 pass
2073
2076
2074 @abc.abstractmethod
2077 @abc.abstractmethod
2075 def invalidateall(self):
2078 def invalidateall(self):
2076 pass
2079 pass
2077
2080
2078 @abc.abstractmethod
2081 @abc.abstractmethod
2079 def lock(self, wait=True):
2082 def lock(self, wait=True):
2080 """Lock the repository store and return a lock instance."""
2083 """Lock the repository store and return a lock instance."""
2081
2084
2082 @abc.abstractmethod
2085 @abc.abstractmethod
2083 def currentlock(self):
2086 def currentlock(self):
2084 """Return the lock if it's held or None."""
2087 """Return the lock if it's held or None."""
2085
2088
2086 @abc.abstractmethod
2089 @abc.abstractmethod
2087 def wlock(self, wait=True):
2090 def wlock(self, wait=True):
2088 """Lock the non-store parts of the repository."""
2091 """Lock the non-store parts of the repository."""
2089
2092
2090 @abc.abstractmethod
2093 @abc.abstractmethod
2091 def currentwlock(self):
2094 def currentwlock(self):
2092 """Return the wlock if it's held or None."""
2095 """Return the wlock if it's held or None."""
2093
2096
2094 @abc.abstractmethod
2097 @abc.abstractmethod
2095 def checkcommitpatterns(self, wctx, match, status, fail):
2098 def checkcommitpatterns(self, wctx, match, status, fail):
2096 pass
2099 pass
2097
2100
2098 @abc.abstractmethod
2101 @abc.abstractmethod
2099 def commit(
2102 def commit(
2100 self,
2103 self,
2101 text=b'',
2104 text=b'',
2102 user=None,
2105 user=None,
2103 date=None,
2106 date=None,
2104 match=None,
2107 match=None,
2105 force=False,
2108 force=False,
2106 editor=False,
2109 editor=False,
2107 extra=None,
2110 extra=None,
2108 ):
2111 ):
2109 """Add a new revision to the repository."""
2112 """Add a new revision to the repository."""
2110
2113
2111 @abc.abstractmethod
2114 @abc.abstractmethod
2112 def commitctx(self, ctx, error=False, origctx=None):
2115 def commitctx(self, ctx, error=False, origctx=None):
2113 """Commit a commitctx instance to the repository."""
2116 """Commit a commitctx instance to the repository."""
2114
2117
2115 @abc.abstractmethod
2118 @abc.abstractmethod
2116 def destroying(self):
2119 def destroying(self):
2117 """Inform the repository that nodes are about to be destroyed."""
2120 """Inform the repository that nodes are about to be destroyed."""
2118
2121
2119 @abc.abstractmethod
2122 @abc.abstractmethod
2120 def destroyed(self):
2123 def destroyed(self):
2121 """Inform the repository that nodes have been destroyed."""
2124 """Inform the repository that nodes have been destroyed."""
2122
2125
2123 @abc.abstractmethod
2126 @abc.abstractmethod
2124 def status(
2127 def status(
2125 self,
2128 self,
2126 node1=b'.',
2129 node1=b'.',
2127 node2=None,
2130 node2=None,
2128 match=None,
2131 match=None,
2129 ignored=False,
2132 ignored=False,
2130 clean=False,
2133 clean=False,
2131 unknown=False,
2134 unknown=False,
2132 listsubrepos=False,
2135 listsubrepos=False,
2133 ):
2136 ):
2134 """Convenience method to call repo[x].status()."""
2137 """Convenience method to call repo[x].status()."""
2135
2138
2136 @abc.abstractmethod
2139 @abc.abstractmethod
2137 def addpostdsstatus(self, ps):
2140 def addpostdsstatus(self, ps):
2138 pass
2141 pass
2139
2142
2140 @abc.abstractmethod
2143 @abc.abstractmethod
2141 def postdsstatus(self):
2144 def postdsstatus(self):
2142 pass
2145 pass
2143
2146
2144 @abc.abstractmethod
2147 @abc.abstractmethod
2145 def clearpostdsstatus(self):
2148 def clearpostdsstatus(self):
2146 pass
2149 pass
2147
2150
2148 @abc.abstractmethod
2151 @abc.abstractmethod
2149 def heads(self, start=None):
2152 def heads(self, start=None):
2150 """Obtain list of nodes that are DAG heads."""
2153 """Obtain list of nodes that are DAG heads."""
2151
2154
2152 @abc.abstractmethod
2155 @abc.abstractmethod
2153 def branchheads(self, branch=None, start=None, closed=False):
2156 def branchheads(self, branch=None, start=None, closed=False):
2154 pass
2157 pass
2155
2158
2156 @abc.abstractmethod
2159 @abc.abstractmethod
2157 def branches(self, nodes):
2160 def branches(self, nodes):
2158 pass
2161 pass
2159
2162
2160 @abc.abstractmethod
2163 @abc.abstractmethod
2161 def between(self, pairs):
2164 def between(self, pairs):
2162 pass
2165 pass
2163
2166
2164 @abc.abstractmethod
2167 @abc.abstractmethod
2165 def checkpush(self, pushop):
2168 def checkpush(self, pushop):
2166 pass
2169 pass
2167
2170
2168 prepushoutgoinghooks: util.hooks
2171 prepushoutgoinghooks: util.hooks
2169 """util.hooks instance."""
2172 """util.hooks instance."""
2170
2173
2171 @abc.abstractmethod
2174 @abc.abstractmethod
2172 def pushkey(self, namespace, key, old, new):
2175 def pushkey(self, namespace, key, old, new):
2173 pass
2176 pass
2174
2177
2175 @abc.abstractmethod
2178 @abc.abstractmethod
2176 def listkeys(self, namespace):
2179 def listkeys(self, namespace):
2177 pass
2180 pass
2178
2181
2179 @abc.abstractmethod
2182 @abc.abstractmethod
2180 def debugwireargs(self, one, two, three=None, four=None, five=None):
2183 def debugwireargs(self, one, two, three=None, four=None, five=None):
2181 pass
2184 pass
2182
2185
2183 @abc.abstractmethod
2186 @abc.abstractmethod
2184 def savecommitmessage(self, text):
2187 def savecommitmessage(self, text):
2185 pass
2188 pass
2186
2189
2187 @abc.abstractmethod
2190 @abc.abstractmethod
2188 def register_sidedata_computer(
2191 def register_sidedata_computer(
2189 self, kind, category, keys, computer, flags, replace=False
2192 self, kind, category, keys, computer, flags, replace=False
2190 ):
2193 ):
2191 pass
2194 pass
2192
2195
2193 @abc.abstractmethod
2196 @abc.abstractmethod
2194 def register_wanted_sidedata(self, category):
2197 def register_wanted_sidedata(self, category):
2195 pass
2198 pass
2196
2199
2197
2200
2198 class completelocalrepository(
2201 class completelocalrepository(
2199 ilocalrepositorymain, ilocalrepositoryfilestorage
2202 ilocalrepositorymain, ilocalrepositoryfilestorage
2200 ):
2203 ):
2201 """Complete interface for a local repository."""
2204 """Complete interface for a local repository."""
2202
2205
2203
2206
2204 class iwireprotocolcommandcacher(Protocol):
2207 class iwireprotocolcommandcacher(Protocol):
2205 """Represents a caching backend for wire protocol commands.
2208 """Represents a caching backend for wire protocol commands.
2206
2209
2207 Wire protocol version 2 supports transparent caching of many commands.
2210 Wire protocol version 2 supports transparent caching of many commands.
2208 To leverage this caching, servers can activate objects that cache
2211 To leverage this caching, servers can activate objects that cache
2209 command responses. Objects handle both cache writing and reading.
2212 command responses. Objects handle both cache writing and reading.
2210 This interface defines how that response caching mechanism works.
2213 This interface defines how that response caching mechanism works.
2211
2214
2212 Wire protocol version 2 commands emit a series of objects that are
2215 Wire protocol version 2 commands emit a series of objects that are
2213 serialized and sent to the client. The caching layer exists between
2216 serialized and sent to the client. The caching layer exists between
2214 the invocation of the command function and the sending of its output
2217 the invocation of the command function and the sending of its output
2215 objects to an output layer.
2218 objects to an output layer.
2216
2219
2217 Instances of this interface represent a binding to a cache that
2220 Instances of this interface represent a binding to a cache that
2218 can serve a response (in place of calling a command function) and/or
2221 can serve a response (in place of calling a command function) and/or
2219 write responses to a cache for subsequent use.
2222 write responses to a cache for subsequent use.
2220
2223
2221 When a command request arrives, the following happens with regards
2224 When a command request arrives, the following happens with regards
2222 to this interface:
2225 to this interface:
2223
2226
2224 1. The server determines whether the command request is cacheable.
2227 1. The server determines whether the command request is cacheable.
2225 2. If it is, an instance of this interface is spawned.
2228 2. If it is, an instance of this interface is spawned.
2226 3. The cacher is activated in a context manager (``__enter__`` is called).
2229 3. The cacher is activated in a context manager (``__enter__`` is called).
2227 4. A cache *key* for that request is derived. This will call the
2230 4. A cache *key* for that request is derived. This will call the
2228 instance's ``adjustcachekeystate()`` method so the derivation
2231 instance's ``adjustcachekeystate()`` method so the derivation
2229 can be influenced.
2232 can be influenced.
2230 5. The cacher is informed of the derived cache key via a call to
2233 5. The cacher is informed of the derived cache key via a call to
2231 ``setcachekey()``.
2234 ``setcachekey()``.
2232 6. The cacher's ``lookup()`` method is called to test for presence of
2235 6. The cacher's ``lookup()`` method is called to test for presence of
2233 the derived key in the cache.
2236 the derived key in the cache.
2234 7. If ``lookup()`` returns a hit, that cached result is used in place
2237 7. If ``lookup()`` returns a hit, that cached result is used in place
2235 of invoking the command function. ``__exit__`` is called and the instance
2238 of invoking the command function. ``__exit__`` is called and the instance
2236 is discarded.
2239 is discarded.
2237 8. The command function is invoked.
2240 8. The command function is invoked.
2238 9. ``onobject()`` is called for each object emitted by the command
2241 9. ``onobject()`` is called for each object emitted by the command
2239 function.
2242 function.
2240 10. After the final object is seen, ``onfinished()`` is called.
2243 10. After the final object is seen, ``onfinished()`` is called.
2241 11. ``__exit__`` is called to signal the end of use of the instance.
2244 11. ``__exit__`` is called to signal the end of use of the instance.
2242
2245
2243 Cache *key* derivation can be influenced by the instance.
2246 Cache *key* derivation can be influenced by the instance.
2244
2247
2245 Cache keys are initially derived by a deterministic representation of
2248 Cache keys are initially derived by a deterministic representation of
2246 the command request. This includes the command name, arguments, protocol
2249 the command request. This includes the command name, arguments, protocol
2247 version, etc. This initial key derivation is performed by CBOR-encoding a
2250 version, etc. This initial key derivation is performed by CBOR-encoding a
2248 data structure and feeding that output into a hasher.
2251 data structure and feeding that output into a hasher.
2249
2252
2250 Instances of this interface can influence this initial key derivation
2253 Instances of this interface can influence this initial key derivation
2251 via ``adjustcachekeystate()``.
2254 via ``adjustcachekeystate()``.
2252
2255
2253 The instance is informed of the derived cache key via a call to
2256 The instance is informed of the derived cache key via a call to
2254 ``setcachekey()``. The instance must store the key locally so it can
2257 ``setcachekey()``. The instance must store the key locally so it can
2255 be consulted on subsequent operations that may require it.
2258 be consulted on subsequent operations that may require it.
2256
2259
2257 When constructed, the instance has access to a callable that can be used
2260 When constructed, the instance has access to a callable that can be used
2258 for encoding response objects. This callable receives as its single
2261 for encoding response objects. This callable receives as its single
2259 argument an object emitted by a command function. It returns an iterable
2262 argument an object emitted by a command function. It returns an iterable
2260 of bytes chunks representing the encoded object. Unless the cacher is
2263 of bytes chunks representing the encoded object. Unless the cacher is
2261 caching native Python objects in memory or has a way of reconstructing
2264 caching native Python objects in memory or has a way of reconstructing
2262 the original Python objects, implementations typically call this function
2265 the original Python objects, implementations typically call this function
2263 to produce bytes from the output objects and then store those bytes in
2266 to produce bytes from the output objects and then store those bytes in
2264 the cache. When it comes time to re-emit those bytes, they are wrapped
2267 the cache. When it comes time to re-emit those bytes, they are wrapped
2265 in a ``wireprototypes.encodedresponse`` instance to tell the output
2268 in a ``wireprototypes.encodedresponse`` instance to tell the output
2266 layer that they are pre-encoded.
2269 layer that they are pre-encoded.
2267
2270
2268 When receiving the objects emitted by the command function, instances
2271 When receiving the objects emitted by the command function, instances
2269 can choose what to do with those objects. The simplest thing to do is
2272 can choose what to do with those objects. The simplest thing to do is
2270 re-emit the original objects. They will be forwarded to the output
2273 re-emit the original objects. They will be forwarded to the output
2271 layer and will be processed as if the cacher did not exist.
2274 layer and will be processed as if the cacher did not exist.
2272
2275
2273 Implementations could also choose to not emit objects - instead locally
2276 Implementations could also choose to not emit objects - instead locally
2274 buffering objects or their encoded representation. They could then emit
2277 buffering objects or their encoded representation. They could then emit
2275 a single "coalesced" object when ``onfinished()`` is called. In
2278 a single "coalesced" object when ``onfinished()`` is called. In
2276 this way, the implementation would function as a filtering layer of
2279 this way, the implementation would function as a filtering layer of
2277 sorts.
2280 sorts.
2278
2281
2279 When caching objects, typically the encoded form of the object will
2282 When caching objects, typically the encoded form of the object will
2280 be stored. Keep in mind that if the original object is forwarded to
2283 be stored. Keep in mind that if the original object is forwarded to
2281 the output layer, it will need to be encoded there as well. For large
2284 the output layer, it will need to be encoded there as well. For large
2282 output, this redundant encoding could add overhead. Implementations
2285 output, this redundant encoding could add overhead. Implementations
2283 could wrap the encoded object data in ``wireprototypes.encodedresponse``
2286 could wrap the encoded object data in ``wireprototypes.encodedresponse``
2284 instances to avoid this overhead.
2287 instances to avoid this overhead.
2285 """
2288 """
2286
2289
2287 def __enter__(self):
2290 def __enter__(self):
2288 """Marks the instance as active.
2291 """Marks the instance as active.
2289
2292
2290 Should return self.
2293 Should return self.
2291 """
2294 """
2292
2295
2293 def __exit__(self, exctype, excvalue, exctb):
2296 def __exit__(self, exctype, excvalue, exctb):
2294 """Called when cacher is no longer used.
2297 """Called when cacher is no longer used.
2295
2298
2296 This can be used by implementations to perform cleanup actions (e.g.
2299 This can be used by implementations to perform cleanup actions (e.g.
2297 disconnecting network sockets, aborting a partially cached response.
2300 disconnecting network sockets, aborting a partially cached response.
2298 """
2301 """
2299
2302
2300 def adjustcachekeystate(self, state):
2303 def adjustcachekeystate(self, state):
2301 """Influences cache key derivation by adjusting state to derive key.
2304 """Influences cache key derivation by adjusting state to derive key.
2302
2305
2303 A dict defining the state used to derive the cache key is passed.
2306 A dict defining the state used to derive the cache key is passed.
2304
2307
2305 Implementations can modify this dict to record additional state that
2308 Implementations can modify this dict to record additional state that
2306 is wanted to influence key derivation.
2309 is wanted to influence key derivation.
2307
2310
2308 Implementations are *highly* encouraged to not modify or delete
2311 Implementations are *highly* encouraged to not modify or delete
2309 existing keys.
2312 existing keys.
2310 """
2313 """
2311
2314
2312 def setcachekey(self, key):
2315 def setcachekey(self, key):
2313 """Record the derived cache key for this request.
2316 """Record the derived cache key for this request.
2314
2317
2315 Instances may mutate the key for internal usage, as desired. e.g.
2318 Instances may mutate the key for internal usage, as desired. e.g.
2316 instances may wish to prepend the repo name, introduce path
2319 instances may wish to prepend the repo name, introduce path
2317 components for filesystem or URL addressing, etc. Behavior is up to
2320 components for filesystem or URL addressing, etc. Behavior is up to
2318 the cache.
2321 the cache.
2319
2322
2320 Returns a bool indicating if the request is cacheable by this
2323 Returns a bool indicating if the request is cacheable by this
2321 instance.
2324 instance.
2322 """
2325 """
2323
2326
2324 def lookup(self):
2327 def lookup(self):
2325 """Attempt to resolve an entry in the cache.
2328 """Attempt to resolve an entry in the cache.
2326
2329
2327 The instance is instructed to look for the cache key that it was
2330 The instance is instructed to look for the cache key that it was
2328 informed about via the call to ``setcachekey()``.
2331 informed about via the call to ``setcachekey()``.
2329
2332
2330 If there's no cache hit or the cacher doesn't wish to use the cached
2333 If there's no cache hit or the cacher doesn't wish to use the cached
2331 entry, ``None`` should be returned.
2334 entry, ``None`` should be returned.
2332
2335
2333 Else, a dict defining the cached result should be returned. The
2336 Else, a dict defining the cached result should be returned. The
2334 dict may have the following keys:
2337 dict may have the following keys:
2335
2338
2336 objs
2339 objs
2337 An iterable of objects that should be sent to the client. That
2340 An iterable of objects that should be sent to the client. That
2338 iterable of objects is expected to be what the command function
2341 iterable of objects is expected to be what the command function
2339 would return if invoked or an equivalent representation thereof.
2342 would return if invoked or an equivalent representation thereof.
2340 """
2343 """
2341
2344
2342 def onobject(self, obj):
2345 def onobject(self, obj):
2343 """Called when a new object is emitted from the command function.
2346 """Called when a new object is emitted from the command function.
2344
2347
2345 Receives as its argument the object that was emitted from the
2348 Receives as its argument the object that was emitted from the
2346 command function.
2349 command function.
2347
2350
2348 This method returns an iterator of objects to forward to the output
2351 This method returns an iterator of objects to forward to the output
2349 layer. The easiest implementation is a generator that just
2352 layer. The easiest implementation is a generator that just
2350 ``yield obj``.
2353 ``yield obj``.
2351 """
2354 """
2352
2355
2353 def onfinished(self):
2356 def onfinished(self):
2354 """Called after all objects have been emitted from the command function.
2357 """Called after all objects have been emitted from the command function.
2355
2358
2356 Implementations should return an iterator of objects to forward to
2359 Implementations should return an iterator of objects to forward to
2357 the output layer.
2360 the output layer.
2358
2361
2359 This method can be a generator.
2362 This method can be a generator.
2360 """
2363 """
@@ -1,4043 +1,4043
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 # coding: utf-8
2 # coding: utf-8
3 #
3 #
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import annotations
9 from __future__ import annotations
10
10
11 import functools
11 import functools
12 import os
12 import os
13 import random
13 import random
14 import re
14 import re
15 import sys
15 import sys
16 import time
16 import time
17 import typing
17 import typing
18 import weakref
18 import weakref
19
19
20 from concurrent import futures
20 from concurrent import futures
21 from typing import (
21 from typing import (
22 Optional,
22 Optional,
23 )
23 )
24
24
25 from .i18n import _
25 from .i18n import _
26 from .node import (
26 from .node import (
27 bin,
27 bin,
28 hex,
28 hex,
29 nullrev,
29 nullrev,
30 sha1nodeconstants,
30 sha1nodeconstants,
31 short,
31 short,
32 )
32 )
33 from . import (
33 from . import (
34 bookmarks,
34 bookmarks,
35 branchmap,
35 branchmap,
36 bundle2,
36 bundle2,
37 bundlecaches,
37 bundlecaches,
38 changegroup,
38 changegroup,
39 color,
39 color,
40 commit,
40 commit,
41 context,
41 context,
42 dirstate,
42 dirstate,
43 discovery,
43 discovery,
44 encoding,
44 encoding,
45 error,
45 error,
46 exchange,
46 exchange,
47 extensions,
47 extensions,
48 filelog,
48 filelog,
49 hook,
49 hook,
50 lock as lockmod,
50 lock as lockmod,
51 match as matchmod,
51 match as matchmod,
52 mergestate as mergestatemod,
52 mergestate as mergestatemod,
53 mergeutil,
53 mergeutil,
54 namespaces,
54 namespaces,
55 narrowspec,
55 narrowspec,
56 obsolete,
56 obsolete,
57 pathutil,
57 pathutil,
58 phases,
58 phases,
59 policy,
59 policy,
60 pushkey,
60 pushkey,
61 pycompat,
61 pycompat,
62 repoview,
62 repoview,
63 requirements as requirementsmod,
63 requirements as requirementsmod,
64 revlog,
64 revlog,
65 revset,
65 revset,
66 revsetlang,
66 revsetlang,
67 scmutil,
67 scmutil,
68 sparse,
68 sparse,
69 store as storemod,
69 store as storemod,
70 subrepoutil,
70 subrepoutil,
71 tags as tagsmod,
71 tags as tagsmod,
72 transaction,
72 transaction,
73 txnutil,
73 txnutil,
74 util,
74 util,
75 vfs as vfsmod,
75 vfs as vfsmod,
76 wireprototypes,
76 wireprototypes,
77 )
77 )
78
78
79 from .branching import (
79 from .branching import (
80 rev_cache as rev_branch_cache,
80 rev_cache as rev_branch_cache,
81 )
81 )
82 from .configuration import rcutil
82 from .configuration import rcutil
83 from .interfaces import (
83 from .interfaces import (
84 repository,
84 repository,
85 )
85 )
86
86
87 from .utils import (
87 from .utils import (
88 hashutil,
88 hashutil,
89 procutil,
89 procutil,
90 stringutil,
90 stringutil,
91 urlutil,
91 urlutil,
92 )
92 )
93
93
94 from .revlogutils import (
94 from .revlogutils import (
95 concurrency_checker as revlogchecker,
95 concurrency_checker as revlogchecker,
96 constants as revlogconst,
96 constants as revlogconst,
97 sidedata as sidedatamod,
97 sidedata as sidedatamod,
98 )
98 )
99
99
100 release = lockmod.release
100 release = lockmod.release
101 urlerr = util.urlerr
101 urlerr = util.urlerr
102 urlreq = util.urlreq
102 urlreq = util.urlreq
103
103
104 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(
104 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(
105 b"^((dirstate|narrowspec.dirstate).*|branch$)"
105 b"^((dirstate|narrowspec.dirstate).*|branch$)"
106 )
106 )
107
107
108 # set of (path, vfs-location) tuples. vfs-location is:
108 # set of (path, vfs-location) tuples. vfs-location is:
109 # - 'plain for vfs relative paths
109 # - 'plain for vfs relative paths
110 # - '' for svfs relative paths
110 # - '' for svfs relative paths
111 _cachedfiles = set()
111 _cachedfiles = set()
112
112
113
113
114 class _basefilecache(scmutil.filecache):
114 class _basefilecache(scmutil.filecache):
115 """All filecache usage on repo are done for logic that should be unfiltered"""
115 """All filecache usage on repo are done for logic that should be unfiltered"""
116
116
117 def __get__(self, repo, type=None):
117 def __get__(self, repo, type=None):
118 if repo is None:
118 if repo is None:
119 return self
119 return self
120 # proxy to unfiltered __dict__ since filtered repo has no entry
120 # proxy to unfiltered __dict__ since filtered repo has no entry
121 unfi = repo.unfiltered()
121 unfi = repo.unfiltered()
122 try:
122 try:
123 return unfi.__dict__[self.sname]
123 return unfi.__dict__[self.sname]
124 except KeyError:
124 except KeyError:
125 pass
125 pass
126 return super(_basefilecache, self).__get__(unfi, type)
126 return super(_basefilecache, self).__get__(unfi, type)
127
127
128 def set(self, repo, value):
128 def set(self, repo, value):
129 return super(_basefilecache, self).set(repo.unfiltered(), value)
129 return super(_basefilecache, self).set(repo.unfiltered(), value)
130
130
131
131
132 class repofilecache(_basefilecache):
132 class repofilecache(_basefilecache):
133 """filecache for files in .hg but outside of .hg/store"""
133 """filecache for files in .hg but outside of .hg/store"""
134
134
135 def __init__(self, *paths):
135 def __init__(self, *paths):
136 super(repofilecache, self).__init__(*paths)
136 super(repofilecache, self).__init__(*paths)
137 for path in paths:
137 for path in paths:
138 _cachedfiles.add((path, b'plain'))
138 _cachedfiles.add((path, b'plain'))
139
139
140 def join(self, obj, fname):
140 def join(self, obj, fname):
141 return obj.vfs.join(fname)
141 return obj.vfs.join(fname)
142
142
143
143
144 class storecache(_basefilecache):
144 class storecache(_basefilecache):
145 """filecache for files in the store"""
145 """filecache for files in the store"""
146
146
147 def __init__(self, *paths):
147 def __init__(self, *paths):
148 super(storecache, self).__init__(*paths)
148 super(storecache, self).__init__(*paths)
149 for path in paths:
149 for path in paths:
150 _cachedfiles.add((path, b''))
150 _cachedfiles.add((path, b''))
151
151
152 def join(self, obj, fname):
152 def join(self, obj, fname):
153 return obj.sjoin(fname)
153 return obj.sjoin(fname)
154
154
155
155
156 class changelogcache(storecache):
156 class changelogcache(storecache):
157 """filecache for the changelog"""
157 """filecache for the changelog"""
158
158
159 def __init__(self):
159 def __init__(self):
160 super(changelogcache, self).__init__()
160 super(changelogcache, self).__init__()
161 _cachedfiles.add((b'00changelog.i', b''))
161 _cachedfiles.add((b'00changelog.i', b''))
162 _cachedfiles.add((b'00changelog.n', b''))
162 _cachedfiles.add((b'00changelog.n', b''))
163
163
164 def tracked_paths(self, obj):
164 def tracked_paths(self, obj):
165 paths = [self.join(obj, b'00changelog.i')]
165 paths = [self.join(obj, b'00changelog.i')]
166 if obj.store.opener.options.get(b'persistent-nodemap', False):
166 if obj.store.opener.options.get(b'persistent-nodemap', False):
167 paths.append(self.join(obj, b'00changelog.n'))
167 paths.append(self.join(obj, b'00changelog.n'))
168 return paths
168 return paths
169
169
170
170
171 class manifestlogcache(storecache):
171 class manifestlogcache(storecache):
172 """filecache for the manifestlog"""
172 """filecache for the manifestlog"""
173
173
174 def __init__(self):
174 def __init__(self):
175 super(manifestlogcache, self).__init__()
175 super(manifestlogcache, self).__init__()
176 _cachedfiles.add((b'00manifest.i', b''))
176 _cachedfiles.add((b'00manifest.i', b''))
177 _cachedfiles.add((b'00manifest.n', b''))
177 _cachedfiles.add((b'00manifest.n', b''))
178
178
179 def tracked_paths(self, obj):
179 def tracked_paths(self, obj):
180 paths = [self.join(obj, b'00manifest.i')]
180 paths = [self.join(obj, b'00manifest.i')]
181 if obj.store.opener.options.get(b'persistent-nodemap', False):
181 if obj.store.opener.options.get(b'persistent-nodemap', False):
182 paths.append(self.join(obj, b'00manifest.n'))
182 paths.append(self.join(obj, b'00manifest.n'))
183 return paths
183 return paths
184
184
185
185
186 class mixedrepostorecache(_basefilecache):
186 class mixedrepostorecache(_basefilecache):
187 """filecache for a mix files in .hg/store and outside"""
187 """filecache for a mix files in .hg/store and outside"""
188
188
189 def __init__(self, *pathsandlocations):
189 def __init__(self, *pathsandlocations):
190 # scmutil.filecache only uses the path for passing back into our
190 # scmutil.filecache only uses the path for passing back into our
191 # join(), so we can safely pass a list of paths and locations
191 # join(), so we can safely pass a list of paths and locations
192 super(mixedrepostorecache, self).__init__(*pathsandlocations)
192 super(mixedrepostorecache, self).__init__(*pathsandlocations)
193 _cachedfiles.update(pathsandlocations)
193 _cachedfiles.update(pathsandlocations)
194
194
195 def join(self, obj, fnameandlocation):
195 def join(self, obj, fnameandlocation):
196 fname, location = fnameandlocation
196 fname, location = fnameandlocation
197 if location == b'plain':
197 if location == b'plain':
198 return obj.vfs.join(fname)
198 return obj.vfs.join(fname)
199 else:
199 else:
200 if location != b'':
200 if location != b'':
201 raise error.ProgrammingError(
201 raise error.ProgrammingError(
202 b'unexpected location: %s' % location
202 b'unexpected location: %s' % location
203 )
203 )
204 return obj.sjoin(fname)
204 return obj.sjoin(fname)
205
205
206
206
207 def isfilecached(repo, name):
207 def isfilecached(repo, name):
208 """check if a repo has already cached "name" filecache-ed property
208 """check if a repo has already cached "name" filecache-ed property
209
209
210 This returns (cachedobj-or-None, iscached) tuple.
210 This returns (cachedobj-or-None, iscached) tuple.
211 """
211 """
212 cacheentry = repo.unfiltered()._filecache.get(name, None)
212 cacheentry = repo.unfiltered()._filecache.get(name, None)
213 if not cacheentry:
213 if not cacheentry:
214 return None, False
214 return None, False
215 return cacheentry.obj, True
215 return cacheentry.obj, True
216
216
217
217
218 class unfilteredpropertycache(util.propertycache):
218 class unfilteredpropertycache(util.propertycache):
219 """propertycache that apply to unfiltered repo only"""
219 """propertycache that apply to unfiltered repo only"""
220
220
221 def __get__(self, repo, type=None):
221 def __get__(self, repo, type=None):
222 unfi = repo.unfiltered()
222 unfi = repo.unfiltered()
223 if unfi is repo:
223 if unfi is repo:
224 return super(unfilteredpropertycache, self).__get__(unfi)
224 return super(unfilteredpropertycache, self).__get__(unfi)
225 return getattr(unfi, self.name)
225 return getattr(unfi, self.name)
226
226
227
227
228 class filteredpropertycache(util.propertycache):
228 class filteredpropertycache(util.propertycache):
229 """propertycache that must take filtering in account"""
229 """propertycache that must take filtering in account"""
230
230
231 def cachevalue(self, obj, value):
231 def cachevalue(self, obj, value):
232 object.__setattr__(obj, self.name, value)
232 object.__setattr__(obj, self.name, value)
233
233
234
234
235 def hasunfilteredcache(repo, name):
235 def hasunfilteredcache(repo, name):
236 """check if a repo has an unfilteredpropertycache value for <name>"""
236 """check if a repo has an unfilteredpropertycache value for <name>"""
237 return name in vars(repo.unfiltered())
237 return name in vars(repo.unfiltered())
238
238
239
239
240 def unfilteredmethod(orig):
240 def unfilteredmethod(orig):
241 """decorate method that always need to be run on unfiltered version"""
241 """decorate method that always need to be run on unfiltered version"""
242
242
243 @functools.wraps(orig)
243 @functools.wraps(orig)
244 def wrapper(repo, *args, **kwargs):
244 def wrapper(repo, *args, **kwargs):
245 return orig(repo.unfiltered(), *args, **kwargs)
245 return orig(repo.unfiltered(), *args, **kwargs)
246
246
247 return wrapper
247 return wrapper
248
248
249
249
250 moderncaps = {
250 moderncaps = {
251 b'lookup',
251 b'lookup',
252 b'branchmap',
252 b'branchmap',
253 b'pushkey',
253 b'pushkey',
254 b'known',
254 b'known',
255 b'getbundle',
255 b'getbundle',
256 b'unbundle',
256 b'unbundle',
257 }
257 }
258 legacycaps = moderncaps.union({b'changegroupsubset'})
258 legacycaps = moderncaps.union({b'changegroupsubset'})
259
259
260
260
261 class localcommandexecutor: # (repository.ipeercommandexecutor)
261 class localcommandexecutor(repository.ipeercommandexecutor):
262 def __init__(self, peer):
262 def __init__(self, peer):
263 self._peer = peer
263 self._peer = peer
264 self._sent = False
264 self._sent = False
265 self._closed = False
265 self._closed = False
266
266
267 def __enter__(self):
267 def __enter__(self):
268 return self
268 return self
269
269
270 def __exit__(self, exctype, excvalue, exctb):
270 def __exit__(self, exctype, excvalue, exctb):
271 self.close()
271 self.close()
272
272
273 def callcommand(self, command, args):
273 def callcommand(self, command, args):
274 if self._sent:
274 if self._sent:
275 raise error.ProgrammingError(
275 raise error.ProgrammingError(
276 b'callcommand() cannot be used after sendcommands()'
276 b'callcommand() cannot be used after sendcommands()'
277 )
277 )
278
278
279 if self._closed:
279 if self._closed:
280 raise error.ProgrammingError(
280 raise error.ProgrammingError(
281 b'callcommand() cannot be used after close()'
281 b'callcommand() cannot be used after close()'
282 )
282 )
283
283
284 # We don't need to support anything fancy. Just call the named
284 # We don't need to support anything fancy. Just call the named
285 # method on the peer and return a resolved future.
285 # method on the peer and return a resolved future.
286 fn = getattr(self._peer, pycompat.sysstr(command))
286 fn = getattr(self._peer, pycompat.sysstr(command))
287
287
288 f = futures.Future()
288 f = futures.Future()
289
289
290 try:
290 try:
291 result = fn(**pycompat.strkwargs(args))
291 result = fn(**pycompat.strkwargs(args))
292 except Exception:
292 except Exception:
293 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
293 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
294 else:
294 else:
295 f.set_result(result)
295 f.set_result(result)
296
296
297 return f
297 return f
298
298
299 def sendcommands(self):
299 def sendcommands(self):
300 self._sent = True
300 self._sent = True
301
301
302 def close(self):
302 def close(self):
303 self._closed = True
303 self._closed = True
304
304
305
305
306 class localpeer(repository.peer): # (repository.ipeercommands)
306 class localpeer(repository.peer): # (repository.ipeercommands)
307 '''peer for a local repo; reflects only the most recent API'''
307 '''peer for a local repo; reflects only the most recent API'''
308
308
309 def __init__(self, repo, caps=None, path=None, remotehidden=False):
309 def __init__(self, repo, caps=None, path=None, remotehidden=False):
310 super(localpeer, self).__init__(
310 super(localpeer, self).__init__(
311 repo.ui, path=path, remotehidden=remotehidden
311 repo.ui, path=path, remotehidden=remotehidden
312 )
312 )
313
313
314 if caps is None:
314 if caps is None:
315 caps = moderncaps.copy()
315 caps = moderncaps.copy()
316 if remotehidden:
316 if remotehidden:
317 self._repo = repo.filtered(b'served.hidden')
317 self._repo = repo.filtered(b'served.hidden')
318 else:
318 else:
319 self._repo = repo.filtered(b'served')
319 self._repo = repo.filtered(b'served')
320 if repo._wanted_sidedata:
320 if repo._wanted_sidedata:
321 formatted = bundle2.format_remote_wanted_sidedata(repo)
321 formatted = bundle2.format_remote_wanted_sidedata(repo)
322 caps.add(b'exp-wanted-sidedata=' + formatted)
322 caps.add(b'exp-wanted-sidedata=' + formatted)
323
323
324 self._caps = repo._restrictcapabilities(caps)
324 self._caps = repo._restrictcapabilities(caps)
325
325
326 # Begin of _basepeer interface.
326 # Begin of _basepeer interface.
327
327
328 def url(self):
328 def url(self):
329 return self._repo.url()
329 return self._repo.url()
330
330
331 def local(self):
331 def local(self):
332 return self._repo
332 return self._repo
333
333
334 def canpush(self):
334 def canpush(self):
335 return True
335 return True
336
336
337 def close(self):
337 def close(self):
338 self._repo.close()
338 self._repo.close()
339
339
340 # End of _basepeer interface.
340 # End of _basepeer interface.
341
341
342 # Begin of _basewirecommands interface.
342 # Begin of _basewirecommands interface.
343
343
344 def branchmap(self):
344 def branchmap(self):
345 return self._repo.branchmap()
345 return self._repo.branchmap()
346
346
347 def capabilities(self):
347 def capabilities(self):
348 return self._caps
348 return self._caps
349
349
350 def get_cached_bundle_inline(self, path):
350 def get_cached_bundle_inline(self, path):
351 # not needed with local peer
351 # not needed with local peer
352 raise NotImplementedError
352 raise NotImplementedError
353
353
354 def clonebundles(self):
354 def clonebundles(self):
355 return bundlecaches.get_manifest(self._repo)
355 return bundlecaches.get_manifest(self._repo)
356
356
357 def debugwireargs(self, one, two, three=None, four=None, five=None):
357 def debugwireargs(self, one, two, three=None, four=None, five=None):
358 """Used to test argument passing over the wire"""
358 """Used to test argument passing over the wire"""
359 return b"%s %s %s %s %s" % (
359 return b"%s %s %s %s %s" % (
360 one,
360 one,
361 two,
361 two,
362 pycompat.bytestr(three),
362 pycompat.bytestr(three),
363 pycompat.bytestr(four),
363 pycompat.bytestr(four),
364 pycompat.bytestr(five),
364 pycompat.bytestr(five),
365 )
365 )
366
366
367 def getbundle(
367 def getbundle(
368 self,
368 self,
369 source,
369 source,
370 heads=None,
370 heads=None,
371 common=None,
371 common=None,
372 bundlecaps=None,
372 bundlecaps=None,
373 remote_sidedata=None,
373 remote_sidedata=None,
374 **kwargs,
374 **kwargs,
375 ):
375 ):
376 chunks = exchange.getbundlechunks(
376 chunks = exchange.getbundlechunks(
377 self._repo,
377 self._repo,
378 source,
378 source,
379 heads=heads,
379 heads=heads,
380 common=common,
380 common=common,
381 bundlecaps=bundlecaps,
381 bundlecaps=bundlecaps,
382 remote_sidedata=remote_sidedata,
382 remote_sidedata=remote_sidedata,
383 **kwargs,
383 **kwargs,
384 )[1]
384 )[1]
385 cb = util.chunkbuffer(chunks)
385 cb = util.chunkbuffer(chunks)
386
386
387 if exchange.bundle2requested(bundlecaps):
387 if exchange.bundle2requested(bundlecaps):
388 # When requesting a bundle2, getbundle returns a stream to make the
388 # When requesting a bundle2, getbundle returns a stream to make the
389 # wire level function happier. We need to build a proper object
389 # wire level function happier. We need to build a proper object
390 # from it in local peer.
390 # from it in local peer.
391 return bundle2.getunbundler(self.ui, cb)
391 return bundle2.getunbundler(self.ui, cb)
392 else:
392 else:
393 return changegroup.getunbundler(b'01', cb, None)
393 return changegroup.getunbundler(b'01', cb, None)
394
394
395 def heads(self):
395 def heads(self):
396 return self._repo.heads()
396 return self._repo.heads()
397
397
398 def known(self, nodes):
398 def known(self, nodes):
399 return self._repo.known(nodes)
399 return self._repo.known(nodes)
400
400
401 def listkeys(self, namespace):
401 def listkeys(self, namespace):
402 return self._repo.listkeys(namespace)
402 return self._repo.listkeys(namespace)
403
403
404 def lookup(self, key):
404 def lookup(self, key):
405 return self._repo.lookup(key)
405 return self._repo.lookup(key)
406
406
407 def pushkey(self, namespace, key, old, new):
407 def pushkey(self, namespace, key, old, new):
408 return self._repo.pushkey(namespace, key, old, new)
408 return self._repo.pushkey(namespace, key, old, new)
409
409
410 def stream_out(self):
410 def stream_out(self):
411 raise error.Abort(_(b'cannot perform stream clone against local peer'))
411 raise error.Abort(_(b'cannot perform stream clone against local peer'))
412
412
413 def unbundle(self, bundle, heads, url):
413 def unbundle(self, bundle, heads, url):
414 """apply a bundle on a repo
414 """apply a bundle on a repo
415
415
416 This function handles the repo locking itself."""
416 This function handles the repo locking itself."""
417 try:
417 try:
418 try:
418 try:
419 bundle = exchange.readbundle(self.ui, bundle, None)
419 bundle = exchange.readbundle(self.ui, bundle, None)
420 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
420 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
421 if hasattr(ret, 'getchunks'):
421 if hasattr(ret, 'getchunks'):
422 # This is a bundle20 object, turn it into an unbundler.
422 # This is a bundle20 object, turn it into an unbundler.
423 # This little dance should be dropped eventually when the
423 # This little dance should be dropped eventually when the
424 # API is finally improved.
424 # API is finally improved.
425 stream = util.chunkbuffer(ret.getchunks())
425 stream = util.chunkbuffer(ret.getchunks())
426 ret = bundle2.getunbundler(self.ui, stream)
426 ret = bundle2.getunbundler(self.ui, stream)
427 return ret
427 return ret
428 except Exception as exc:
428 except Exception as exc:
429 # If the exception contains output salvaged from a bundle2
429 # If the exception contains output salvaged from a bundle2
430 # reply, we need to make sure it is printed before continuing
430 # reply, we need to make sure it is printed before continuing
431 # to fail. So we build a bundle2 with such output and consume
431 # to fail. So we build a bundle2 with such output and consume
432 # it directly.
432 # it directly.
433 #
433 #
434 # This is not very elegant but allows a "simple" solution for
434 # This is not very elegant but allows a "simple" solution for
435 # issue4594
435 # issue4594
436 output = getattr(exc, '_bundle2salvagedoutput', ())
436 output = getattr(exc, '_bundle2salvagedoutput', ())
437 if output:
437 if output:
438 bundler = bundle2.bundle20(self._repo.ui)
438 bundler = bundle2.bundle20(self._repo.ui)
439 for out in output:
439 for out in output:
440 bundler.addpart(out)
440 bundler.addpart(out)
441 stream = util.chunkbuffer(bundler.getchunks())
441 stream = util.chunkbuffer(bundler.getchunks())
442 b = bundle2.getunbundler(self.ui, stream)
442 b = bundle2.getunbundler(self.ui, stream)
443 bundle2.processbundle(self._repo, b)
443 bundle2.processbundle(self._repo, b)
444 raise
444 raise
445 except error.PushRaced as exc:
445 except error.PushRaced as exc:
446 raise error.ResponseError(
446 raise error.ResponseError(
447 _(b'push failed:'), stringutil.forcebytestr(exc)
447 _(b'push failed:'), stringutil.forcebytestr(exc)
448 )
448 )
449
449
450 # End of _basewirecommands interface.
450 # End of _basewirecommands interface.
451
451
452 # Begin of peer interface.
452 # Begin of peer interface.
453
453
454 def commandexecutor(self):
454 def commandexecutor(self):
455 return localcommandexecutor(self)
455 return localcommandexecutor(self)
456
456
457 # End of peer interface.
457 # End of peer interface.
458
458
459
459
460 class locallegacypeer(localpeer): # (repository.ipeerlegacycommands)
460 class locallegacypeer(localpeer): # (repository.ipeerlegacycommands)
461 """peer extension which implements legacy methods too; used for tests with
461 """peer extension which implements legacy methods too; used for tests with
462 restricted capabilities"""
462 restricted capabilities"""
463
463
464 def __init__(self, repo, path=None, remotehidden=False):
464 def __init__(self, repo, path=None, remotehidden=False):
465 super(locallegacypeer, self).__init__(
465 super(locallegacypeer, self).__init__(
466 repo, caps=legacycaps, path=path, remotehidden=remotehidden
466 repo, caps=legacycaps, path=path, remotehidden=remotehidden
467 )
467 )
468
468
469 # Begin of baselegacywirecommands interface.
469 # Begin of baselegacywirecommands interface.
470
470
471 def between(self, pairs):
471 def between(self, pairs):
472 return self._repo.between(pairs)
472 return self._repo.between(pairs)
473
473
474 def branches(self, nodes):
474 def branches(self, nodes):
475 return self._repo.branches(nodes)
475 return self._repo.branches(nodes)
476
476
477 def changegroup(self, nodes, source):
477 def changegroup(self, nodes, source):
478 outgoing = discovery.outgoing(
478 outgoing = discovery.outgoing(
479 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
479 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
480 )
480 )
481 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
481 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
482
482
483 def changegroupsubset(self, bases, heads, source):
483 def changegroupsubset(self, bases, heads, source):
484 outgoing = discovery.outgoing(
484 outgoing = discovery.outgoing(
485 self._repo, missingroots=bases, ancestorsof=heads
485 self._repo, missingroots=bases, ancestorsof=heads
486 )
486 )
487 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
487 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
488
488
489 # End of baselegacywirecommands interface.
489 # End of baselegacywirecommands interface.
490
490
491
491
492 # Functions receiving (ui, features) that extensions can register to impact
492 # Functions receiving (ui, features) that extensions can register to impact
493 # the ability to load repositories with custom requirements. Only
493 # the ability to load repositories with custom requirements. Only
494 # functions defined in loaded extensions are called.
494 # functions defined in loaded extensions are called.
495 #
495 #
496 # The function receives a set of requirement strings that the repository
496 # The function receives a set of requirement strings that the repository
497 # is capable of opening. Functions will typically add elements to the
497 # is capable of opening. Functions will typically add elements to the
498 # set to reflect that the extension knows how to handle that requirements.
498 # set to reflect that the extension knows how to handle that requirements.
499 featuresetupfuncs = set()
499 featuresetupfuncs = set()
500
500
501
501
502 def _getsharedvfs(hgvfs, requirements):
502 def _getsharedvfs(hgvfs, requirements):
503 """returns the vfs object pointing to root of shared source
503 """returns the vfs object pointing to root of shared source
504 repo for a shared repository
504 repo for a shared repository
505
505
506 hgvfs is vfs pointing at .hg/ of current repo (shared one)
506 hgvfs is vfs pointing at .hg/ of current repo (shared one)
507 requirements is a set of requirements of current repo (shared one)
507 requirements is a set of requirements of current repo (shared one)
508 """
508 """
509 # The ``shared`` or ``relshared`` requirements indicate the
509 # The ``shared`` or ``relshared`` requirements indicate the
510 # store lives in the path contained in the ``.hg/sharedpath`` file.
510 # store lives in the path contained in the ``.hg/sharedpath`` file.
511 # This is an absolute path for ``shared`` and relative to
511 # This is an absolute path for ``shared`` and relative to
512 # ``.hg/`` for ``relshared``.
512 # ``.hg/`` for ``relshared``.
513 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
513 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
514 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
514 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
515 sharedpath = util.normpath(hgvfs.join(sharedpath))
515 sharedpath = util.normpath(hgvfs.join(sharedpath))
516
516
517 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
517 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
518
518
519 if not sharedvfs.exists():
519 if not sharedvfs.exists():
520 raise error.RepoError(
520 raise error.RepoError(
521 _(b'.hg/sharedpath points to nonexistent directory %s')
521 _(b'.hg/sharedpath points to nonexistent directory %s')
522 % sharedvfs.base
522 % sharedvfs.base
523 )
523 )
524 return sharedvfs
524 return sharedvfs
525
525
526
526
527 def makelocalrepository(baseui, path: bytes, intents=None):
527 def makelocalrepository(baseui, path: bytes, intents=None):
528 """Create a local repository object.
528 """Create a local repository object.
529
529
530 Given arguments needed to construct a local repository, this function
530 Given arguments needed to construct a local repository, this function
531 performs various early repository loading functionality (such as
531 performs various early repository loading functionality (such as
532 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
532 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
533 the repository can be opened, derives a type suitable for representing
533 the repository can be opened, derives a type suitable for representing
534 that repository, and returns an instance of it.
534 that repository, and returns an instance of it.
535
535
536 The returned object conforms to the ``repository.completelocalrepository``
536 The returned object conforms to the ``repository.completelocalrepository``
537 interface.
537 interface.
538
538
539 The repository type is derived by calling a series of factory functions
539 The repository type is derived by calling a series of factory functions
540 for each aspect/interface of the final repository. These are defined by
540 for each aspect/interface of the final repository. These are defined by
541 ``REPO_INTERFACES``.
541 ``REPO_INTERFACES``.
542
542
543 Each factory function is called to produce a type implementing a specific
543 Each factory function is called to produce a type implementing a specific
544 interface. The cumulative list of returned types will be combined into a
544 interface. The cumulative list of returned types will be combined into a
545 new type and that type will be instantiated to represent the local
545 new type and that type will be instantiated to represent the local
546 repository.
546 repository.
547
547
548 The factory functions each receive various state that may be consulted
548 The factory functions each receive various state that may be consulted
549 as part of deriving a type.
549 as part of deriving a type.
550
550
551 Extensions should wrap these factory functions to customize repository type
551 Extensions should wrap these factory functions to customize repository type
552 creation. Note that an extension's wrapped function may be called even if
552 creation. Note that an extension's wrapped function may be called even if
553 that extension is not loaded for the repo being constructed. Extensions
553 that extension is not loaded for the repo being constructed. Extensions
554 should check if their ``__name__`` appears in the
554 should check if their ``__name__`` appears in the
555 ``extensionmodulenames`` set passed to the factory function and no-op if
555 ``extensionmodulenames`` set passed to the factory function and no-op if
556 not.
556 not.
557 """
557 """
558 ui = baseui.copy()
558 ui = baseui.copy()
559 # Prevent copying repo configuration.
559 # Prevent copying repo configuration.
560 ui.copy = baseui.copy
560 ui.copy = baseui.copy
561
561
562 # Working directory VFS rooted at repository root.
562 # Working directory VFS rooted at repository root.
563 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
563 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
564
564
565 # Main VFS for .hg/ directory.
565 # Main VFS for .hg/ directory.
566 hgpath = wdirvfs.join(b'.hg')
566 hgpath = wdirvfs.join(b'.hg')
567 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
567 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
568 # Whether this repository is shared one or not
568 # Whether this repository is shared one or not
569 shared = False
569 shared = False
570 # If this repository is shared, vfs pointing to shared repo
570 # If this repository is shared, vfs pointing to shared repo
571 sharedvfs = None
571 sharedvfs = None
572
572
573 # The .hg/ path should exist and should be a directory. All other
573 # The .hg/ path should exist and should be a directory. All other
574 # cases are errors.
574 # cases are errors.
575 if not hgvfs.isdir():
575 if not hgvfs.isdir():
576 try:
576 try:
577 hgvfs.stat()
577 hgvfs.stat()
578 except FileNotFoundError:
578 except FileNotFoundError:
579 pass
579 pass
580 except ValueError as e:
580 except ValueError as e:
581 # Can be raised on Python 3.8 when path is invalid.
581 # Can be raised on Python 3.8 when path is invalid.
582 raise error.Abort(
582 raise error.Abort(
583 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
583 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
584 )
584 )
585
585
586 raise error.RepoError(_(b'repository %s not found') % path)
586 raise error.RepoError(_(b'repository %s not found') % path)
587
587
588 requirements = scmutil.readrequires(hgvfs, True)
588 requirements = scmutil.readrequires(hgvfs, True)
589 shared = (
589 shared = (
590 requirementsmod.SHARED_REQUIREMENT in requirements
590 requirementsmod.SHARED_REQUIREMENT in requirements
591 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
591 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
592 )
592 )
593 storevfs = None
593 storevfs = None
594 if shared:
594 if shared:
595 # This is a shared repo
595 # This is a shared repo
596 sharedvfs = _getsharedvfs(hgvfs, requirements)
596 sharedvfs = _getsharedvfs(hgvfs, requirements)
597 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
597 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
598 else:
598 else:
599 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
599 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
600
600
601 # if .hg/requires contains the sharesafe requirement, it means
601 # if .hg/requires contains the sharesafe requirement, it means
602 # there exists a `.hg/store/requires` too and we should read it
602 # there exists a `.hg/store/requires` too and we should read it
603 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
603 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
604 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
604 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
605 # is not present, refer checkrequirementscompat() for that
605 # is not present, refer checkrequirementscompat() for that
606 #
606 #
607 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
607 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
608 # repository was shared the old way. We check the share source .hg/requires
608 # repository was shared the old way. We check the share source .hg/requires
609 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
609 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
610 # to be reshared
610 # to be reshared
611 hint = _(b"see `hg help config.format.use-share-safe` for more information")
611 hint = _(b"see `hg help config.format.use-share-safe` for more information")
612 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
612 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
613 if (
613 if (
614 shared
614 shared
615 and requirementsmod.SHARESAFE_REQUIREMENT
615 and requirementsmod.SHARESAFE_REQUIREMENT
616 not in scmutil.readrequires(sharedvfs, True)
616 not in scmutil.readrequires(sharedvfs, True)
617 ):
617 ):
618 mismatch_warn = ui.configbool(
618 mismatch_warn = ui.configbool(
619 b'share', b'safe-mismatch.source-not-safe.warn'
619 b'share', b'safe-mismatch.source-not-safe.warn'
620 )
620 )
621 mismatch_config = ui.config(
621 mismatch_config = ui.config(
622 b'share', b'safe-mismatch.source-not-safe'
622 b'share', b'safe-mismatch.source-not-safe'
623 )
623 )
624 mismatch_verbose_upgrade = ui.configbool(
624 mismatch_verbose_upgrade = ui.configbool(
625 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
625 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
626 )
626 )
627 if mismatch_config in (
627 if mismatch_config in (
628 b'downgrade-allow',
628 b'downgrade-allow',
629 b'allow',
629 b'allow',
630 b'downgrade-abort',
630 b'downgrade-abort',
631 ):
631 ):
632 # prevent cyclic import localrepo -> upgrade -> localrepo
632 # prevent cyclic import localrepo -> upgrade -> localrepo
633 from . import upgrade
633 from . import upgrade
634
634
635 upgrade.downgrade_share_to_non_safe(
635 upgrade.downgrade_share_to_non_safe(
636 ui,
636 ui,
637 hgvfs,
637 hgvfs,
638 sharedvfs,
638 sharedvfs,
639 requirements,
639 requirements,
640 mismatch_config,
640 mismatch_config,
641 mismatch_warn,
641 mismatch_warn,
642 mismatch_verbose_upgrade,
642 mismatch_verbose_upgrade,
643 )
643 )
644 elif mismatch_config == b'abort':
644 elif mismatch_config == b'abort':
645 raise error.Abort(
645 raise error.Abort(
646 _(b"share source does not support share-safe requirement"),
646 _(b"share source does not support share-safe requirement"),
647 hint=hint,
647 hint=hint,
648 )
648 )
649 else:
649 else:
650 raise error.Abort(
650 raise error.Abort(
651 _(
651 _(
652 b"share-safe mismatch with source.\nUnrecognized"
652 b"share-safe mismatch with source.\nUnrecognized"
653 b" value '%s' of `share.safe-mismatch.source-not-safe`"
653 b" value '%s' of `share.safe-mismatch.source-not-safe`"
654 b" set."
654 b" set."
655 )
655 )
656 % mismatch_config,
656 % mismatch_config,
657 hint=hint,
657 hint=hint,
658 )
658 )
659 else:
659 else:
660 requirements |= scmutil.readrequires(storevfs, False)
660 requirements |= scmutil.readrequires(storevfs, False)
661 elif shared:
661 elif shared:
662 sourcerequires = scmutil.readrequires(sharedvfs, False)
662 sourcerequires = scmutil.readrequires(sharedvfs, False)
663 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
663 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
664 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
664 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
665 mismatch_warn = ui.configbool(
665 mismatch_warn = ui.configbool(
666 b'share', b'safe-mismatch.source-safe.warn'
666 b'share', b'safe-mismatch.source-safe.warn'
667 )
667 )
668 mismatch_verbose_upgrade = ui.configbool(
668 mismatch_verbose_upgrade = ui.configbool(
669 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
669 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
670 )
670 )
671 if mismatch_config in (
671 if mismatch_config in (
672 b'upgrade-allow',
672 b'upgrade-allow',
673 b'allow',
673 b'allow',
674 b'upgrade-abort',
674 b'upgrade-abort',
675 ):
675 ):
676 # prevent cyclic import localrepo -> upgrade -> localrepo
676 # prevent cyclic import localrepo -> upgrade -> localrepo
677 from . import upgrade
677 from . import upgrade
678
678
679 upgrade.upgrade_share_to_safe(
679 upgrade.upgrade_share_to_safe(
680 ui,
680 ui,
681 hgvfs,
681 hgvfs,
682 storevfs,
682 storevfs,
683 requirements,
683 requirements,
684 mismatch_config,
684 mismatch_config,
685 mismatch_warn,
685 mismatch_warn,
686 mismatch_verbose_upgrade,
686 mismatch_verbose_upgrade,
687 )
687 )
688 elif mismatch_config == b'abort':
688 elif mismatch_config == b'abort':
689 raise error.Abort(
689 raise error.Abort(
690 _(
690 _(
691 b'version mismatch: source uses share-safe'
691 b'version mismatch: source uses share-safe'
692 b' functionality while the current share does not'
692 b' functionality while the current share does not'
693 ),
693 ),
694 hint=hint,
694 hint=hint,
695 )
695 )
696 else:
696 else:
697 raise error.Abort(
697 raise error.Abort(
698 _(
698 _(
699 b"share-safe mismatch with source.\nUnrecognized"
699 b"share-safe mismatch with source.\nUnrecognized"
700 b" value '%s' of `share.safe-mismatch.source-safe` set."
700 b" value '%s' of `share.safe-mismatch.source-safe` set."
701 )
701 )
702 % mismatch_config,
702 % mismatch_config,
703 hint=hint,
703 hint=hint,
704 )
704 )
705
705
706 # The .hg/hgrc file may load extensions or contain config options
706 # The .hg/hgrc file may load extensions or contain config options
707 # that influence repository construction. Attempt to load it and
707 # that influence repository construction. Attempt to load it and
708 # process any new extensions that it may have pulled in.
708 # process any new extensions that it may have pulled in.
709 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
709 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
710 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
710 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
711 extensions.loadall(ui)
711 extensions.loadall(ui)
712 extensions.populateui(ui)
712 extensions.populateui(ui)
713
713
714 # Set of module names of extensions loaded for this repository.
714 # Set of module names of extensions loaded for this repository.
715 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
715 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
716
716
717 supportedrequirements = gathersupportedrequirements(ui)
717 supportedrequirements = gathersupportedrequirements(ui)
718
718
719 # We first validate the requirements are known.
719 # We first validate the requirements are known.
720 ensurerequirementsrecognized(requirements, supportedrequirements)
720 ensurerequirementsrecognized(requirements, supportedrequirements)
721
721
722 # Then we validate that the known set is reasonable to use together.
722 # Then we validate that the known set is reasonable to use together.
723 ensurerequirementscompatible(ui, requirements)
723 ensurerequirementscompatible(ui, requirements)
724
724
725 # TODO there are unhandled edge cases related to opening repositories with
725 # TODO there are unhandled edge cases related to opening repositories with
726 # shared storage. If storage is shared, we should also test for requirements
726 # shared storage. If storage is shared, we should also test for requirements
727 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
727 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
728 # that repo, as that repo may load extensions needed to open it. This is a
728 # that repo, as that repo may load extensions needed to open it. This is a
729 # bit complicated because we don't want the other hgrc to overwrite settings
729 # bit complicated because we don't want the other hgrc to overwrite settings
730 # in this hgrc.
730 # in this hgrc.
731 #
731 #
732 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
732 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
733 # file when sharing repos. But if a requirement is added after the share is
733 # file when sharing repos. But if a requirement is added after the share is
734 # performed, thereby introducing a new requirement for the opener, we may
734 # performed, thereby introducing a new requirement for the opener, we may
735 # will not see that and could encounter a run-time error interacting with
735 # will not see that and could encounter a run-time error interacting with
736 # that shared store since it has an unknown-to-us requirement.
736 # that shared store since it has an unknown-to-us requirement.
737
737
738 # At this point, we know we should be capable of opening the repository.
738 # At this point, we know we should be capable of opening the repository.
739 # Now get on with doing that.
739 # Now get on with doing that.
740
740
741 features = set()
741 features = set()
742
742
743 # The "store" part of the repository holds versioned data. How it is
743 # The "store" part of the repository holds versioned data. How it is
744 # accessed is determined by various requirements. If `shared` or
744 # accessed is determined by various requirements. If `shared` or
745 # `relshared` requirements are present, this indicates current repository
745 # `relshared` requirements are present, this indicates current repository
746 # is a share and store exists in path mentioned in `.hg/sharedpath`
746 # is a share and store exists in path mentioned in `.hg/sharedpath`
747 if shared:
747 if shared:
748 storebasepath = sharedvfs.base
748 storebasepath = sharedvfs.base
749 cachepath = sharedvfs.join(b'cache')
749 cachepath = sharedvfs.join(b'cache')
750 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
750 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
751 else:
751 else:
752 storebasepath = hgvfs.base
752 storebasepath = hgvfs.base
753 cachepath = hgvfs.join(b'cache')
753 cachepath = hgvfs.join(b'cache')
754 wcachepath = hgvfs.join(b'wcache')
754 wcachepath = hgvfs.join(b'wcache')
755
755
756 # The store has changed over time and the exact layout is dictated by
756 # The store has changed over time and the exact layout is dictated by
757 # requirements. The store interface abstracts differences across all
757 # requirements. The store interface abstracts differences across all
758 # of them.
758 # of them.
759 store = makestore(
759 store = makestore(
760 requirements,
760 requirements,
761 storebasepath,
761 storebasepath,
762 lambda base: vfsmod.vfs(base, cacheaudited=True),
762 lambda base: vfsmod.vfs(base, cacheaudited=True),
763 )
763 )
764 hgvfs.createmode = store.createmode
764 hgvfs.createmode = store.createmode
765
765
766 storevfs = store.vfs
766 storevfs = store.vfs
767 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
767 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
768
768
769 if (
769 if (
770 requirementsmod.REVLOGV2_REQUIREMENT in requirements
770 requirementsmod.REVLOGV2_REQUIREMENT in requirements
771 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
771 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
772 ):
772 ):
773 features.add(repository.REPO_FEATURE_SIDE_DATA)
773 features.add(repository.REPO_FEATURE_SIDE_DATA)
774 # the revlogv2 docket introduced race condition that we need to fix
774 # the revlogv2 docket introduced race condition that we need to fix
775 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
775 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
776
776
777 # The cache vfs is used to manage cache files.
777 # The cache vfs is used to manage cache files.
778 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
778 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
779 cachevfs.createmode = store.createmode
779 cachevfs.createmode = store.createmode
780 # The cache vfs is used to manage cache files related to the working copy
780 # The cache vfs is used to manage cache files related to the working copy
781 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
781 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
782 wcachevfs.createmode = store.createmode
782 wcachevfs.createmode = store.createmode
783
783
784 # Now resolve the type for the repository object. We do this by repeatedly
784 # Now resolve the type for the repository object. We do this by repeatedly
785 # calling a factory function to produces types for specific aspects of the
785 # calling a factory function to produces types for specific aspects of the
786 # repo's operation. The aggregate returned types are used as base classes
786 # repo's operation. The aggregate returned types are used as base classes
787 # for a dynamically-derived type, which will represent our new repository.
787 # for a dynamically-derived type, which will represent our new repository.
788
788
789 bases = []
789 bases = []
790 extrastate = {}
790 extrastate = {}
791
791
792 for iface, fn in REPO_INTERFACES:
792 for iface, fn in REPO_INTERFACES:
793 # We pass all potentially useful state to give extensions tons of
793 # We pass all potentially useful state to give extensions tons of
794 # flexibility.
794 # flexibility.
795 typ = fn()(
795 typ = fn()(
796 ui=ui,
796 ui=ui,
797 intents=intents,
797 intents=intents,
798 requirements=requirements,
798 requirements=requirements,
799 features=features,
799 features=features,
800 wdirvfs=wdirvfs,
800 wdirvfs=wdirvfs,
801 hgvfs=hgvfs,
801 hgvfs=hgvfs,
802 store=store,
802 store=store,
803 storevfs=storevfs,
803 storevfs=storevfs,
804 storeoptions=storevfs.options,
804 storeoptions=storevfs.options,
805 cachevfs=cachevfs,
805 cachevfs=cachevfs,
806 wcachevfs=wcachevfs,
806 wcachevfs=wcachevfs,
807 extensionmodulenames=extensionmodulenames,
807 extensionmodulenames=extensionmodulenames,
808 extrastate=extrastate,
808 extrastate=extrastate,
809 baseclasses=bases,
809 baseclasses=bases,
810 )
810 )
811
811
812 if not isinstance(typ, type):
812 if not isinstance(typ, type):
813 raise error.ProgrammingError(
813 raise error.ProgrammingError(
814 b'unable to construct type for %s' % iface
814 b'unable to construct type for %s' % iface
815 )
815 )
816
816
817 bases.append(typ)
817 bases.append(typ)
818
818
819 # type() allows you to use characters in type names that wouldn't be
819 # type() allows you to use characters in type names that wouldn't be
820 # recognized as Python symbols in source code. We abuse that to add
820 # recognized as Python symbols in source code. We abuse that to add
821 # rich information about our constructed repo.
821 # rich information about our constructed repo.
822 name = pycompat.sysstr(
822 name = pycompat.sysstr(
823 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
823 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
824 )
824 )
825
825
826 cls = type(name, tuple(bases), {})
826 cls = type(name, tuple(bases), {})
827
827
828 return cls(
828 return cls(
829 baseui=baseui,
829 baseui=baseui,
830 ui=ui,
830 ui=ui,
831 origroot=path,
831 origroot=path,
832 wdirvfs=wdirvfs,
832 wdirvfs=wdirvfs,
833 hgvfs=hgvfs,
833 hgvfs=hgvfs,
834 requirements=requirements,
834 requirements=requirements,
835 supportedrequirements=supportedrequirements,
835 supportedrequirements=supportedrequirements,
836 sharedpath=storebasepath,
836 sharedpath=storebasepath,
837 store=store,
837 store=store,
838 cachevfs=cachevfs,
838 cachevfs=cachevfs,
839 wcachevfs=wcachevfs,
839 wcachevfs=wcachevfs,
840 features=features,
840 features=features,
841 intents=intents,
841 intents=intents,
842 )
842 )
843
843
844
844
845 def loadhgrc(
845 def loadhgrc(
846 ui,
846 ui,
847 wdirvfs: vfsmod.vfs,
847 wdirvfs: vfsmod.vfs,
848 hgvfs: vfsmod.vfs,
848 hgvfs: vfsmod.vfs,
849 requirements,
849 requirements,
850 sharedvfs: Optional[vfsmod.vfs] = None,
850 sharedvfs: Optional[vfsmod.vfs] = None,
851 ):
851 ):
852 """Load hgrc files/content into a ui instance.
852 """Load hgrc files/content into a ui instance.
853
853
854 This is called during repository opening to load any additional
854 This is called during repository opening to load any additional
855 config files or settings relevant to the current repository.
855 config files or settings relevant to the current repository.
856
856
857 Returns a bool indicating whether any additional configs were loaded.
857 Returns a bool indicating whether any additional configs were loaded.
858
858
859 Extensions should monkeypatch this function to modify how per-repo
859 Extensions should monkeypatch this function to modify how per-repo
860 configs are loaded. For example, an extension may wish to pull in
860 configs are loaded. For example, an extension may wish to pull in
861 configs from alternate files or sources.
861 configs from alternate files or sources.
862
862
863 sharedvfs is vfs object pointing to source repo if the current one is a
863 sharedvfs is vfs object pointing to source repo if the current one is a
864 shared one
864 shared one
865 """
865 """
866 if not rcutil.use_repo_hgrc():
866 if not rcutil.use_repo_hgrc():
867 return False
867 return False
868
868
869 ret = False
869 ret = False
870 # first load config from shared source if we has to
870 # first load config from shared source if we has to
871 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
871 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
872 try:
872 try:
873 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
873 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
874 ret = True
874 ret = True
875 except IOError:
875 except IOError:
876 pass
876 pass
877
877
878 try:
878 try:
879 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
879 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
880 ret = True
880 ret = True
881 except IOError:
881 except IOError:
882 pass
882 pass
883
883
884 try:
884 try:
885 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
885 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
886 ret = True
886 ret = True
887 except IOError:
887 except IOError:
888 pass
888 pass
889
889
890 return ret
890 return ret
891
891
892
892
893 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
893 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
894 """Perform additional actions after .hg/hgrc is loaded.
894 """Perform additional actions after .hg/hgrc is loaded.
895
895
896 This function is called during repository loading immediately after
896 This function is called during repository loading immediately after
897 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
897 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
898
898
899 The function can be used to validate configs, automatically add
899 The function can be used to validate configs, automatically add
900 options (including extensions) based on requirements, etc.
900 options (including extensions) based on requirements, etc.
901 """
901 """
902
902
903 # Map of requirements to list of extensions to load automatically when
903 # Map of requirements to list of extensions to load automatically when
904 # requirement is present.
904 # requirement is present.
905 autoextensions = {
905 autoextensions = {
906 b'git': [b'git'],
906 b'git': [b'git'],
907 b'largefiles': [b'largefiles'],
907 b'largefiles': [b'largefiles'],
908 b'lfs': [b'lfs'],
908 b'lfs': [b'lfs'],
909 }
909 }
910
910
911 for requirement, names in sorted(autoextensions.items()):
911 for requirement, names in sorted(autoextensions.items()):
912 if requirement not in requirements:
912 if requirement not in requirements:
913 continue
913 continue
914
914
915 for name in names:
915 for name in names:
916 if not ui.hasconfig(b'extensions', name):
916 if not ui.hasconfig(b'extensions', name):
917 ui.setconfig(b'extensions', name, b'', source=b'autoload')
917 ui.setconfig(b'extensions', name, b'', source=b'autoload')
918
918
919
919
920 def gathersupportedrequirements(ui):
920 def gathersupportedrequirements(ui):
921 """Determine the complete set of recognized requirements."""
921 """Determine the complete set of recognized requirements."""
922 # Start with all requirements supported by this file.
922 # Start with all requirements supported by this file.
923 supported = set(localrepository._basesupported)
923 supported = set(localrepository._basesupported)
924
924
925 # Execute ``featuresetupfuncs`` entries if they belong to an extension
925 # Execute ``featuresetupfuncs`` entries if they belong to an extension
926 # relevant to this ui instance.
926 # relevant to this ui instance.
927 modules = {m.__name__ for n, m in extensions.extensions(ui)}
927 modules = {m.__name__ for n, m in extensions.extensions(ui)}
928
928
929 for fn in featuresetupfuncs:
929 for fn in featuresetupfuncs:
930 if fn.__module__ in modules:
930 if fn.__module__ in modules:
931 fn(ui, supported)
931 fn(ui, supported)
932
932
933 # Add derived requirements from registered compression engines.
933 # Add derived requirements from registered compression engines.
934 for name in util.compengines:
934 for name in util.compengines:
935 engine = util.compengines[name]
935 engine = util.compengines[name]
936 if engine.available() and engine.revlogheader():
936 if engine.available() and engine.revlogheader():
937 supported.add(b'exp-compression-%s' % name)
937 supported.add(b'exp-compression-%s' % name)
938 if engine.name() == b'zstd':
938 if engine.name() == b'zstd':
939 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
939 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
940
940
941 return supported
941 return supported
942
942
943
943
944 def ensurerequirementsrecognized(requirements, supported):
944 def ensurerequirementsrecognized(requirements, supported):
945 """Validate that a set of local requirements is recognized.
945 """Validate that a set of local requirements is recognized.
946
946
947 Receives a set of requirements. Raises an ``error.RepoError`` if there
947 Receives a set of requirements. Raises an ``error.RepoError`` if there
948 exists any requirement in that set that currently loaded code doesn't
948 exists any requirement in that set that currently loaded code doesn't
949 recognize.
949 recognize.
950
950
951 Returns a set of supported requirements.
951 Returns a set of supported requirements.
952 """
952 """
953 missing = set()
953 missing = set()
954
954
955 for requirement in requirements:
955 for requirement in requirements:
956 if requirement in supported:
956 if requirement in supported:
957 continue
957 continue
958
958
959 if not requirement or not requirement[0:1].isalnum():
959 if not requirement or not requirement[0:1].isalnum():
960 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
960 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
961
961
962 missing.add(requirement)
962 missing.add(requirement)
963
963
964 if missing:
964 if missing:
965 raise error.RequirementError(
965 raise error.RequirementError(
966 _(b'repository requires features unknown to this Mercurial: %s')
966 _(b'repository requires features unknown to this Mercurial: %s')
967 % b' '.join(sorted(missing)),
967 % b' '.join(sorted(missing)),
968 hint=_(
968 hint=_(
969 b'see https://mercurial-scm.org/wiki/MissingRequirement '
969 b'see https://mercurial-scm.org/wiki/MissingRequirement '
970 b'for more information'
970 b'for more information'
971 ),
971 ),
972 )
972 )
973
973
974
974
975 def ensurerequirementscompatible(ui, requirements):
975 def ensurerequirementscompatible(ui, requirements):
976 """Validates that a set of recognized requirements is mutually compatible.
976 """Validates that a set of recognized requirements is mutually compatible.
977
977
978 Some requirements may not be compatible with others or require
978 Some requirements may not be compatible with others or require
979 config options that aren't enabled. This function is called during
979 config options that aren't enabled. This function is called during
980 repository opening to ensure that the set of requirements needed
980 repository opening to ensure that the set of requirements needed
981 to open a repository is sane and compatible with config options.
981 to open a repository is sane and compatible with config options.
982
982
983 Extensions can monkeypatch this function to perform additional
983 Extensions can monkeypatch this function to perform additional
984 checking.
984 checking.
985
985
986 ``error.RepoError`` should be raised on failure.
986 ``error.RepoError`` should be raised on failure.
987 """
987 """
988 if (
988 if (
989 requirementsmod.SPARSE_REQUIREMENT in requirements
989 requirementsmod.SPARSE_REQUIREMENT in requirements
990 and not sparse.enabled
990 and not sparse.enabled
991 ):
991 ):
992 raise error.RepoError(
992 raise error.RepoError(
993 _(
993 _(
994 b'repository is using sparse feature but '
994 b'repository is using sparse feature but '
995 b'sparse is not enabled; enable the '
995 b'sparse is not enabled; enable the '
996 b'"sparse" extensions to access'
996 b'"sparse" extensions to access'
997 )
997 )
998 )
998 )
999
999
1000
1000
1001 def makestore(requirements, path, vfstype):
1001 def makestore(requirements, path, vfstype):
1002 """Construct a storage object for a repository."""
1002 """Construct a storage object for a repository."""
1003 if requirementsmod.STORE_REQUIREMENT in requirements:
1003 if requirementsmod.STORE_REQUIREMENT in requirements:
1004 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1004 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1005 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1005 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1006 return storemod.fncachestore(path, vfstype, dotencode)
1006 return storemod.fncachestore(path, vfstype, dotencode)
1007
1007
1008 return storemod.encodedstore(path, vfstype)
1008 return storemod.encodedstore(path, vfstype)
1009
1009
1010 return storemod.basicstore(path, vfstype)
1010 return storemod.basicstore(path, vfstype)
1011
1011
1012
1012
1013 def resolvestorevfsoptions(ui, requirements, features):
1013 def resolvestorevfsoptions(ui, requirements, features):
1014 """Resolve the options to pass to the store vfs opener.
1014 """Resolve the options to pass to the store vfs opener.
1015
1015
1016 The returned dict is used to influence behavior of the storage layer.
1016 The returned dict is used to influence behavior of the storage layer.
1017 """
1017 """
1018 options = {}
1018 options = {}
1019
1019
1020 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1020 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1021 options[b'treemanifest'] = True
1021 options[b'treemanifest'] = True
1022
1022
1023 # experimental config: format.manifestcachesize
1023 # experimental config: format.manifestcachesize
1024 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1024 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1025 if manifestcachesize is not None:
1025 if manifestcachesize is not None:
1026 options[b'manifestcachesize'] = manifestcachesize
1026 options[b'manifestcachesize'] = manifestcachesize
1027
1027
1028 # In the absence of another requirement superseding a revlog-related
1028 # In the absence of another requirement superseding a revlog-related
1029 # requirement, we have to assume the repo is using revlog version 0.
1029 # requirement, we have to assume the repo is using revlog version 0.
1030 # This revlog format is super old and we don't bother trying to parse
1030 # This revlog format is super old and we don't bother trying to parse
1031 # opener options for it because those options wouldn't do anything
1031 # opener options for it because those options wouldn't do anything
1032 # meaningful on such old repos.
1032 # meaningful on such old repos.
1033 if (
1033 if (
1034 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1034 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1035 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1035 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1036 ):
1036 ):
1037 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1037 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1038 else: # explicitly mark repo as using revlogv0
1038 else: # explicitly mark repo as using revlogv0
1039 options[b'revlogv0'] = True
1039 options[b'revlogv0'] = True
1040
1040
1041 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1041 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1042 options[b'copies-storage'] = b'changeset-sidedata'
1042 options[b'copies-storage'] = b'changeset-sidedata'
1043 else:
1043 else:
1044 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1044 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1045 copiesextramode = (b'changeset-only', b'compatibility')
1045 copiesextramode = (b'changeset-only', b'compatibility')
1046 if writecopiesto in copiesextramode:
1046 if writecopiesto in copiesextramode:
1047 options[b'copies-storage'] = b'extra'
1047 options[b'copies-storage'] = b'extra'
1048
1048
1049 return options
1049 return options
1050
1050
1051
1051
1052 def resolverevlogstorevfsoptions(ui, requirements, features):
1052 def resolverevlogstorevfsoptions(ui, requirements, features):
1053 """Resolve opener options specific to revlogs."""
1053 """Resolve opener options specific to revlogs."""
1054
1054
1055 options = {}
1055 options = {}
1056 options[b'flagprocessors'] = {}
1056 options[b'flagprocessors'] = {}
1057
1057
1058 feature_config = options[b'feature-config'] = revlog.FeatureConfig()
1058 feature_config = options[b'feature-config'] = revlog.FeatureConfig()
1059 data_config = options[b'data-config'] = revlog.DataConfig()
1059 data_config = options[b'data-config'] = revlog.DataConfig()
1060 delta_config = options[b'delta-config'] = revlog.DeltaConfig()
1060 delta_config = options[b'delta-config'] = revlog.DeltaConfig()
1061
1061
1062 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1062 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1063 options[b'revlogv1'] = True
1063 options[b'revlogv1'] = True
1064 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1064 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1065 options[b'revlogv2'] = True
1065 options[b'revlogv2'] = True
1066 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1066 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1067 options[b'changelogv2'] = True
1067 options[b'changelogv2'] = True
1068 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1068 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1069 options[b'changelogv2.compute-rank'] = cmp_rank
1069 options[b'changelogv2.compute-rank'] = cmp_rank
1070
1070
1071 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1071 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1072 options[b'generaldelta'] = True
1072 options[b'generaldelta'] = True
1073
1073
1074 # experimental config: format.chunkcachesize
1074 # experimental config: format.chunkcachesize
1075 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1075 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1076 if chunkcachesize is not None:
1076 if chunkcachesize is not None:
1077 data_config.chunk_cache_size = chunkcachesize
1077 data_config.chunk_cache_size = chunkcachesize
1078
1078
1079 memory_profile = scmutil.get_resource_profile(ui, b'memory')
1079 memory_profile = scmutil.get_resource_profile(ui, b'memory')
1080 if memory_profile >= scmutil.RESOURCE_MEDIUM:
1080 if memory_profile >= scmutil.RESOURCE_MEDIUM:
1081 data_config.uncompressed_cache_count = 10_000
1081 data_config.uncompressed_cache_count = 10_000
1082 data_config.uncompressed_cache_factor = 4
1082 data_config.uncompressed_cache_factor = 4
1083 if memory_profile >= scmutil.RESOURCE_HIGH:
1083 if memory_profile >= scmutil.RESOURCE_HIGH:
1084 data_config.uncompressed_cache_factor = 10
1084 data_config.uncompressed_cache_factor = 10
1085
1085
1086 delta_config.delta_both_parents = ui.configbool(
1086 delta_config.delta_both_parents = ui.configbool(
1087 b'storage', b'revlog.optimize-delta-parent-choice'
1087 b'storage', b'revlog.optimize-delta-parent-choice'
1088 )
1088 )
1089 delta_config.candidate_group_chunk_size = ui.configint(
1089 delta_config.candidate_group_chunk_size = ui.configint(
1090 b'storage',
1090 b'storage',
1091 b'revlog.delta-parent-search.candidate-group-chunk-size',
1091 b'revlog.delta-parent-search.candidate-group-chunk-size',
1092 )
1092 )
1093 delta_config.debug_delta = ui.configbool(b'debug', b'revlog.debug-delta')
1093 delta_config.debug_delta = ui.configbool(b'debug', b'revlog.debug-delta')
1094
1094
1095 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1095 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1096 options[b'issue6528.fix-incoming'] = issue6528
1096 options[b'issue6528.fix-incoming'] = issue6528
1097
1097
1098 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1098 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1099 lazydeltabase = False
1099 lazydeltabase = False
1100 if lazydelta:
1100 if lazydelta:
1101 lazydeltabase = ui.configbool(
1101 lazydeltabase = ui.configbool(
1102 b'storage', b'revlog.reuse-external-delta-parent'
1102 b'storage', b'revlog.reuse-external-delta-parent'
1103 )
1103 )
1104 if lazydeltabase is None:
1104 if lazydeltabase is None:
1105 lazydeltabase = not scmutil.gddeltaconfig(ui)
1105 lazydeltabase = not scmutil.gddeltaconfig(ui)
1106 delta_config.lazy_delta = lazydelta
1106 delta_config.lazy_delta = lazydelta
1107 delta_config.lazy_delta_base = lazydeltabase
1107 delta_config.lazy_delta_base = lazydeltabase
1108
1108
1109 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1109 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1110 if 0 <= chainspan:
1110 if 0 <= chainspan:
1111 delta_config.max_deltachain_span = chainspan
1111 delta_config.max_deltachain_span = chainspan
1112
1112
1113 has_populate = util.has_mmap_populate()
1113 has_populate = util.has_mmap_populate()
1114 if ui.configbool(b'storage', b'revlog.mmap.index', has_populate):
1114 if ui.configbool(b'storage', b'revlog.mmap.index', has_populate):
1115 data_config.mmap_index_threshold = ui.configbytes(
1115 data_config.mmap_index_threshold = ui.configbytes(
1116 b'storage',
1116 b'storage',
1117 b'revlog.mmap.index:size-threshold',
1117 b'revlog.mmap.index:size-threshold',
1118 )
1118 )
1119
1119
1120 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1120 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1121 srdensitythres = float(
1121 srdensitythres = float(
1122 ui.config(b'experimental', b'sparse-read.density-threshold')
1122 ui.config(b'experimental', b'sparse-read.density-threshold')
1123 )
1123 )
1124 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1124 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1125 data_config.with_sparse_read = withsparseread
1125 data_config.with_sparse_read = withsparseread
1126 data_config.sr_density_threshold = srdensitythres
1126 data_config.sr_density_threshold = srdensitythres
1127 data_config.sr_min_gap_size = srmingapsize
1127 data_config.sr_min_gap_size = srmingapsize
1128
1128
1129 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1129 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1130 delta_config.sparse_revlog = sparserevlog
1130 delta_config.sparse_revlog = sparserevlog
1131 if sparserevlog:
1131 if sparserevlog:
1132 options[b'generaldelta'] = True
1132 options[b'generaldelta'] = True
1133 data_config.with_sparse_read = True
1133 data_config.with_sparse_read = True
1134
1134
1135 maxchainlen = None
1135 maxchainlen = None
1136 if sparserevlog:
1136 if sparserevlog:
1137 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1137 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1138 # experimental config: format.maxchainlen
1138 # experimental config: format.maxchainlen
1139 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1139 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1140 if maxchainlen is not None:
1140 if maxchainlen is not None:
1141 delta_config.max_chain_len = maxchainlen
1141 delta_config.max_chain_len = maxchainlen
1142
1142
1143 for r in requirements:
1143 for r in requirements:
1144 # we allow multiple compression engine requirement to co-exist because
1144 # we allow multiple compression engine requirement to co-exist because
1145 # strickly speaking, revlog seems to support mixed compression style.
1145 # strickly speaking, revlog seems to support mixed compression style.
1146 #
1146 #
1147 # The compression used for new entries will be "the last one"
1147 # The compression used for new entries will be "the last one"
1148 prefix = r.startswith
1148 prefix = r.startswith
1149 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1149 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1150 feature_config.compression_engine = r.split(b'-', 2)[2]
1150 feature_config.compression_engine = r.split(b'-', 2)[2]
1151
1151
1152 zlib_level = ui.configint(b'storage', b'revlog.zlib.level')
1152 zlib_level = ui.configint(b'storage', b'revlog.zlib.level')
1153 if zlib_level is not None:
1153 if zlib_level is not None:
1154 if not (0 <= zlib_level <= 9):
1154 if not (0 <= zlib_level <= 9):
1155 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1155 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1156 raise error.Abort(msg % zlib_level)
1156 raise error.Abort(msg % zlib_level)
1157 feature_config.compression_engine_options[b'zlib.level'] = zlib_level
1157 feature_config.compression_engine_options[b'zlib.level'] = zlib_level
1158 zstd_level = ui.configint(b'storage', b'revlog.zstd.level')
1158 zstd_level = ui.configint(b'storage', b'revlog.zstd.level')
1159 if zstd_level is not None:
1159 if zstd_level is not None:
1160 if not (0 <= zstd_level <= 22):
1160 if not (0 <= zstd_level <= 22):
1161 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1161 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1162 raise error.Abort(msg % zstd_level)
1162 raise error.Abort(msg % zstd_level)
1163 feature_config.compression_engine_options[b'zstd.level'] = zstd_level
1163 feature_config.compression_engine_options[b'zstd.level'] = zstd_level
1164
1164
1165 if requirementsmod.NARROW_REQUIREMENT in requirements:
1165 if requirementsmod.NARROW_REQUIREMENT in requirements:
1166 feature_config.enable_ellipsis = True
1166 feature_config.enable_ellipsis = True
1167
1167
1168 if ui.configbool(b'experimental', b'rust.index'):
1168 if ui.configbool(b'experimental', b'rust.index'):
1169 options[b'rust.index'] = True
1169 options[b'rust.index'] = True
1170 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1170 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1171 slow_path = ui.config(
1171 slow_path = ui.config(
1172 b'storage', b'revlog.persistent-nodemap.slow-path'
1172 b'storage', b'revlog.persistent-nodemap.slow-path'
1173 )
1173 )
1174 if slow_path not in (b'allow', b'warn', b'abort'):
1174 if slow_path not in (b'allow', b'warn', b'abort'):
1175 default = ui.config_default(
1175 default = ui.config_default(
1176 b'storage', b'revlog.persistent-nodemap.slow-path'
1176 b'storage', b'revlog.persistent-nodemap.slow-path'
1177 )
1177 )
1178 msg = _(
1178 msg = _(
1179 b'unknown value for config '
1179 b'unknown value for config '
1180 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1180 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1181 )
1181 )
1182 ui.warn(msg % slow_path)
1182 ui.warn(msg % slow_path)
1183 if not ui.quiet:
1183 if not ui.quiet:
1184 ui.warn(_(b'falling back to default value: %s\n') % default)
1184 ui.warn(_(b'falling back to default value: %s\n') % default)
1185 slow_path = default
1185 slow_path = default
1186
1186
1187 msg = _(
1187 msg = _(
1188 b"accessing `persistent-nodemap` repository without associated "
1188 b"accessing `persistent-nodemap` repository without associated "
1189 b"fast implementation."
1189 b"fast implementation."
1190 )
1190 )
1191 hint = _(
1191 hint = _(
1192 b"check `hg help config.format.use-persistent-nodemap` "
1192 b"check `hg help config.format.use-persistent-nodemap` "
1193 b"for details"
1193 b"for details"
1194 )
1194 )
1195 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1195 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1196 if slow_path == b'warn':
1196 if slow_path == b'warn':
1197 msg = b"warning: " + msg + b'\n'
1197 msg = b"warning: " + msg + b'\n'
1198 ui.warn(msg)
1198 ui.warn(msg)
1199 if not ui.quiet:
1199 if not ui.quiet:
1200 hint = b'(' + hint + b')\n'
1200 hint = b'(' + hint + b')\n'
1201 ui.warn(hint)
1201 ui.warn(hint)
1202 if slow_path == b'abort':
1202 if slow_path == b'abort':
1203 raise error.Abort(msg, hint=hint)
1203 raise error.Abort(msg, hint=hint)
1204 options[b'persistent-nodemap'] = True
1204 options[b'persistent-nodemap'] = True
1205 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1205 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1206 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1206 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1207 if slow_path not in (b'allow', b'warn', b'abort'):
1207 if slow_path not in (b'allow', b'warn', b'abort'):
1208 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1208 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1209 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1209 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1210 ui.warn(msg % slow_path)
1210 ui.warn(msg % slow_path)
1211 if not ui.quiet:
1211 if not ui.quiet:
1212 ui.warn(_(b'falling back to default value: %s\n') % default)
1212 ui.warn(_(b'falling back to default value: %s\n') % default)
1213 slow_path = default
1213 slow_path = default
1214
1214
1215 msg = _(
1215 msg = _(
1216 b"accessing `dirstate-v2` repository without associated "
1216 b"accessing `dirstate-v2` repository without associated "
1217 b"fast implementation."
1217 b"fast implementation."
1218 )
1218 )
1219 hint = _(
1219 hint = _(
1220 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1220 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1221 )
1221 )
1222 if not dirstate.HAS_FAST_DIRSTATE_V2:
1222 if not dirstate.HAS_FAST_DIRSTATE_V2:
1223 if slow_path == b'warn':
1223 if slow_path == b'warn':
1224 msg = b"warning: " + msg + b'\n'
1224 msg = b"warning: " + msg + b'\n'
1225 ui.warn(msg)
1225 ui.warn(msg)
1226 if not ui.quiet:
1226 if not ui.quiet:
1227 hint = b'(' + hint + b')\n'
1227 hint = b'(' + hint + b')\n'
1228 ui.warn(hint)
1228 ui.warn(hint)
1229 if slow_path == b'abort':
1229 if slow_path == b'abort':
1230 raise error.Abort(msg, hint=hint)
1230 raise error.Abort(msg, hint=hint)
1231 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1231 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1232 options[b'persistent-nodemap.mmap'] = True
1232 options[b'persistent-nodemap.mmap'] = True
1233 if ui.configbool(b'devel', b'persistent-nodemap'):
1233 if ui.configbool(b'devel', b'persistent-nodemap'):
1234 options[b'devel-force-nodemap'] = True
1234 options[b'devel-force-nodemap'] = True
1235
1235
1236 return options
1236 return options
1237
1237
1238
1238
1239 def makemain(**kwargs):
1239 def makemain(**kwargs):
1240 """Produce a type conforming to ``ilocalrepositorymain``."""
1240 """Produce a type conforming to ``ilocalrepositorymain``."""
1241 return localrepository
1241 return localrepository
1242
1242
1243
1243
1244 class revlogfilestorage(repository.ilocalrepositoryfilestorage):
1244 class revlogfilestorage(repository.ilocalrepositoryfilestorage):
1245 """File storage when using revlogs."""
1245 """File storage when using revlogs."""
1246
1246
1247 def file(self, path):
1247 def file(self, path):
1248 if path.startswith(b'/'):
1248 if path.startswith(b'/'):
1249 path = path[1:]
1249 path = path[1:]
1250
1250
1251 try_split = (
1251 try_split = (
1252 self.currenttransaction() is not None
1252 self.currenttransaction() is not None
1253 or txnutil.mayhavepending(self.root)
1253 or txnutil.mayhavepending(self.root)
1254 )
1254 )
1255
1255
1256 return filelog.filelog(self.svfs, path, try_split=try_split)
1256 return filelog.filelog(self.svfs, path, try_split=try_split)
1257
1257
1258
1258
1259 class revlognarrowfilestorage(repository.ilocalrepositoryfilestorage):
1259 class revlognarrowfilestorage(repository.ilocalrepositoryfilestorage):
1260 """File storage when using revlogs and narrow files."""
1260 """File storage when using revlogs and narrow files."""
1261
1261
1262 def file(self, path):
1262 def file(self, path):
1263 if path.startswith(b'/'):
1263 if path.startswith(b'/'):
1264 path = path[1:]
1264 path = path[1:]
1265
1265
1266 try_split = (
1266 try_split = (
1267 self.currenttransaction() is not None
1267 self.currenttransaction() is not None
1268 or txnutil.mayhavepending(self.root)
1268 or txnutil.mayhavepending(self.root)
1269 )
1269 )
1270 return filelog.narrowfilelog(
1270 return filelog.narrowfilelog(
1271 self.svfs, path, self._storenarrowmatch, try_split=try_split
1271 self.svfs, path, self._storenarrowmatch, try_split=try_split
1272 )
1272 )
1273
1273
1274
1274
1275 def makefilestorage(requirements, features, **kwargs):
1275 def makefilestorage(requirements, features, **kwargs):
1276 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1276 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1277 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1277 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1278 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1278 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1279
1279
1280 if requirementsmod.NARROW_REQUIREMENT in requirements:
1280 if requirementsmod.NARROW_REQUIREMENT in requirements:
1281 return revlognarrowfilestorage
1281 return revlognarrowfilestorage
1282 else:
1282 else:
1283 return revlogfilestorage
1283 return revlogfilestorage
1284
1284
1285
1285
1286 # List of repository interfaces and factory functions for them. Each
1286 # List of repository interfaces and factory functions for them. Each
1287 # will be called in order during ``makelocalrepository()`` to iteratively
1287 # will be called in order during ``makelocalrepository()`` to iteratively
1288 # derive the final type for a local repository instance. We capture the
1288 # derive the final type for a local repository instance. We capture the
1289 # function as a lambda so we don't hold a reference and the module-level
1289 # function as a lambda so we don't hold a reference and the module-level
1290 # functions can be wrapped.
1290 # functions can be wrapped.
1291 REPO_INTERFACES = [
1291 REPO_INTERFACES = [
1292 (repository.ilocalrepositorymain, lambda: makemain),
1292 (repository.ilocalrepositorymain, lambda: makemain),
1293 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1293 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1294 ]
1294 ]
1295
1295
1296 _localrepo_base_classes = object
1296 _localrepo_base_classes = object
1297
1297
1298 if typing.TYPE_CHECKING:
1298 if typing.TYPE_CHECKING:
1299 _localrepo_base_classes = [
1299 _localrepo_base_classes = [
1300 repository.ilocalrepositorymain,
1300 repository.ilocalrepositorymain,
1301 repository.ilocalrepositoryfilestorage,
1301 repository.ilocalrepositoryfilestorage,
1302 ]
1302 ]
1303
1303
1304
1304
1305 class localrepository(_localrepo_base_classes):
1305 class localrepository(_localrepo_base_classes):
1306 """Main class for representing local repositories.
1306 """Main class for representing local repositories.
1307
1307
1308 All local repositories are instances of this class.
1308 All local repositories are instances of this class.
1309
1309
1310 Constructed on its own, instances of this class are not usable as
1310 Constructed on its own, instances of this class are not usable as
1311 repository objects. To obtain a usable repository object, call
1311 repository objects. To obtain a usable repository object, call
1312 ``hg.repository()``, ``localrepo.instance()``, or
1312 ``hg.repository()``, ``localrepo.instance()``, or
1313 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1313 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1314 ``instance()`` adds support for creating new repositories.
1314 ``instance()`` adds support for creating new repositories.
1315 ``hg.repository()`` adds more extension integration, including calling
1315 ``hg.repository()`` adds more extension integration, including calling
1316 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1316 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1317 used.
1317 used.
1318 """
1318 """
1319
1319
1320 _basesupported = {
1320 _basesupported = {
1321 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1321 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1322 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1322 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1323 requirementsmod.CHANGELOGV2_REQUIREMENT,
1323 requirementsmod.CHANGELOGV2_REQUIREMENT,
1324 requirementsmod.COPIESSDC_REQUIREMENT,
1324 requirementsmod.COPIESSDC_REQUIREMENT,
1325 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1325 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1326 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1326 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1327 requirementsmod.DOTENCODE_REQUIREMENT,
1327 requirementsmod.DOTENCODE_REQUIREMENT,
1328 requirementsmod.FNCACHE_REQUIREMENT,
1328 requirementsmod.FNCACHE_REQUIREMENT,
1329 requirementsmod.GENERALDELTA_REQUIREMENT,
1329 requirementsmod.GENERALDELTA_REQUIREMENT,
1330 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1330 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1331 requirementsmod.NODEMAP_REQUIREMENT,
1331 requirementsmod.NODEMAP_REQUIREMENT,
1332 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1332 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1333 requirementsmod.REVLOGV1_REQUIREMENT,
1333 requirementsmod.REVLOGV1_REQUIREMENT,
1334 requirementsmod.REVLOGV2_REQUIREMENT,
1334 requirementsmod.REVLOGV2_REQUIREMENT,
1335 requirementsmod.SHARED_REQUIREMENT,
1335 requirementsmod.SHARED_REQUIREMENT,
1336 requirementsmod.SHARESAFE_REQUIREMENT,
1336 requirementsmod.SHARESAFE_REQUIREMENT,
1337 requirementsmod.SPARSE_REQUIREMENT,
1337 requirementsmod.SPARSE_REQUIREMENT,
1338 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1338 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1339 requirementsmod.STORE_REQUIREMENT,
1339 requirementsmod.STORE_REQUIREMENT,
1340 requirementsmod.TREEMANIFEST_REQUIREMENT,
1340 requirementsmod.TREEMANIFEST_REQUIREMENT,
1341 }
1341 }
1342
1342
1343 # list of prefix for file which can be written without 'wlock'
1343 # list of prefix for file which can be written without 'wlock'
1344 # Extensions should extend this list when needed
1344 # Extensions should extend this list when needed
1345 _wlockfreeprefix = {
1345 _wlockfreeprefix = {
1346 # We migh consider requiring 'wlock' for the next
1346 # We migh consider requiring 'wlock' for the next
1347 # two, but pretty much all the existing code assume
1347 # two, but pretty much all the existing code assume
1348 # wlock is not needed so we keep them excluded for
1348 # wlock is not needed so we keep them excluded for
1349 # now.
1349 # now.
1350 b'hgrc',
1350 b'hgrc',
1351 b'requires',
1351 b'requires',
1352 # XXX cache is a complicatged business someone
1352 # XXX cache is a complicatged business someone
1353 # should investigate this in depth at some point
1353 # should investigate this in depth at some point
1354 b'cache/',
1354 b'cache/',
1355 # XXX bisect was still a bit too messy at the time
1355 # XXX bisect was still a bit too messy at the time
1356 # this changeset was introduced. Someone should fix
1356 # this changeset was introduced. Someone should fix
1357 # the remainig bit and drop this line
1357 # the remainig bit and drop this line
1358 b'bisect.state',
1358 b'bisect.state',
1359 }
1359 }
1360
1360
1361 def __init__(
1361 def __init__(
1362 self,
1362 self,
1363 baseui,
1363 baseui,
1364 ui,
1364 ui,
1365 origroot: bytes,
1365 origroot: bytes,
1366 wdirvfs: vfsmod.vfs,
1366 wdirvfs: vfsmod.vfs,
1367 hgvfs: vfsmod.vfs,
1367 hgvfs: vfsmod.vfs,
1368 requirements,
1368 requirements,
1369 supportedrequirements,
1369 supportedrequirements,
1370 sharedpath: bytes,
1370 sharedpath: bytes,
1371 store,
1371 store,
1372 cachevfs: vfsmod.vfs,
1372 cachevfs: vfsmod.vfs,
1373 wcachevfs: vfsmod.vfs,
1373 wcachevfs: vfsmod.vfs,
1374 features,
1374 features,
1375 intents=None,
1375 intents=None,
1376 ):
1376 ):
1377 """Create a new local repository instance.
1377 """Create a new local repository instance.
1378
1378
1379 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1379 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1380 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1380 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1381 object.
1381 object.
1382
1382
1383 Arguments:
1383 Arguments:
1384
1384
1385 baseui
1385 baseui
1386 ``ui.ui`` instance that ``ui`` argument was based off of.
1386 ``ui.ui`` instance that ``ui`` argument was based off of.
1387
1387
1388 ui
1388 ui
1389 ``ui.ui`` instance for use by the repository.
1389 ``ui.ui`` instance for use by the repository.
1390
1390
1391 origroot
1391 origroot
1392 ``bytes`` path to working directory root of this repository.
1392 ``bytes`` path to working directory root of this repository.
1393
1393
1394 wdirvfs
1394 wdirvfs
1395 ``vfs.vfs`` rooted at the working directory.
1395 ``vfs.vfs`` rooted at the working directory.
1396
1396
1397 hgvfs
1397 hgvfs
1398 ``vfs.vfs`` rooted at .hg/
1398 ``vfs.vfs`` rooted at .hg/
1399
1399
1400 requirements
1400 requirements
1401 ``set`` of bytestrings representing repository opening requirements.
1401 ``set`` of bytestrings representing repository opening requirements.
1402
1402
1403 supportedrequirements
1403 supportedrequirements
1404 ``set`` of bytestrings representing repository requirements that we
1404 ``set`` of bytestrings representing repository requirements that we
1405 know how to open. May be a supetset of ``requirements``.
1405 know how to open. May be a supetset of ``requirements``.
1406
1406
1407 sharedpath
1407 sharedpath
1408 ``bytes`` Defining path to storage base directory. Points to a
1408 ``bytes`` Defining path to storage base directory. Points to a
1409 ``.hg/`` directory somewhere.
1409 ``.hg/`` directory somewhere.
1410
1410
1411 store
1411 store
1412 ``store.basicstore`` (or derived) instance providing access to
1412 ``store.basicstore`` (or derived) instance providing access to
1413 versioned storage.
1413 versioned storage.
1414
1414
1415 cachevfs
1415 cachevfs
1416 ``vfs.vfs`` used for cache files.
1416 ``vfs.vfs`` used for cache files.
1417
1417
1418 wcachevfs
1418 wcachevfs
1419 ``vfs.vfs`` used for cache files related to the working copy.
1419 ``vfs.vfs`` used for cache files related to the working copy.
1420
1420
1421 features
1421 features
1422 ``set`` of bytestrings defining features/capabilities of this
1422 ``set`` of bytestrings defining features/capabilities of this
1423 instance.
1423 instance.
1424
1424
1425 intents
1425 intents
1426 ``set`` of system strings indicating what this repo will be used
1426 ``set`` of system strings indicating what this repo will be used
1427 for.
1427 for.
1428 """
1428 """
1429 self.baseui = baseui
1429 self.baseui = baseui
1430 self.ui = ui
1430 self.ui = ui
1431 self.origroot = origroot
1431 self.origroot = origroot
1432 # vfs rooted at working directory.
1432 # vfs rooted at working directory.
1433 self.wvfs = wdirvfs
1433 self.wvfs = wdirvfs
1434 self.root = wdirvfs.base
1434 self.root = wdirvfs.base
1435 # vfs rooted at .hg/. Used to access most non-store paths.
1435 # vfs rooted at .hg/. Used to access most non-store paths.
1436 self.vfs = hgvfs
1436 self.vfs = hgvfs
1437 self.path = hgvfs.base
1437 self.path = hgvfs.base
1438 self.requirements = requirements
1438 self.requirements = requirements
1439 self.nodeconstants = sha1nodeconstants
1439 self.nodeconstants = sha1nodeconstants
1440 self.nullid = self.nodeconstants.nullid
1440 self.nullid = self.nodeconstants.nullid
1441 self.supported = supportedrequirements
1441 self.supported = supportedrequirements
1442 self.sharedpath = sharedpath
1442 self.sharedpath = sharedpath
1443 self.store = store
1443 self.store = store
1444 self.cachevfs = cachevfs
1444 self.cachevfs = cachevfs
1445 self.wcachevfs = wcachevfs
1445 self.wcachevfs = wcachevfs
1446 self.features = features
1446 self.features = features
1447
1447
1448 self.filtername = None
1448 self.filtername = None
1449
1449
1450 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1450 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1451 b'devel', b'check-locks'
1451 b'devel', b'check-locks'
1452 ):
1452 ):
1453 self.vfs.audit = self._getvfsward(self.vfs.audit)
1453 self.vfs.audit = self._getvfsward(self.vfs.audit)
1454 # A list of callback to shape the phase if no data were found.
1454 # A list of callback to shape the phase if no data were found.
1455 # Callback are in the form: func(repo, roots) --> processed root.
1455 # Callback are in the form: func(repo, roots) --> processed root.
1456 # This list it to be filled by extension during repo setup
1456 # This list it to be filled by extension during repo setup
1457 self._phasedefaults = []
1457 self._phasedefaults = []
1458
1458
1459 color.setup(self.ui)
1459 color.setup(self.ui)
1460
1460
1461 self.spath = self.store.path
1461 self.spath = self.store.path
1462 self.svfs = self.store.vfs
1462 self.svfs = self.store.vfs
1463 self.sjoin = self.store.join
1463 self.sjoin = self.store.join
1464 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1464 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1465 b'devel', b'check-locks'
1465 b'devel', b'check-locks'
1466 ):
1466 ):
1467 if hasattr(self.svfs, 'vfs'): # this is filtervfs
1467 if hasattr(self.svfs, 'vfs'): # this is filtervfs
1468 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1468 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1469 else: # standard vfs
1469 else: # standard vfs
1470 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1470 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1471
1471
1472 self._dirstatevalidatewarned = False
1472 self._dirstatevalidatewarned = False
1473
1473
1474 self._branchcaches = branchmap.BranchMapCache()
1474 self._branchcaches = branchmap.BranchMapCache()
1475 self._revbranchcache = None
1475 self._revbranchcache = None
1476 self._filterpats = {}
1476 self._filterpats = {}
1477 self._datafilters = {}
1477 self._datafilters = {}
1478 self._transref = self._lockref = self._wlockref = None
1478 self._transref = self._lockref = self._wlockref = None
1479
1479
1480 # A cache for various files under .hg/ that tracks file changes,
1480 # A cache for various files under .hg/ that tracks file changes,
1481 # (used by the filecache decorator)
1481 # (used by the filecache decorator)
1482 #
1482 #
1483 # Maps a property name to its util.filecacheentry
1483 # Maps a property name to its util.filecacheentry
1484 self._filecache = {}
1484 self._filecache = {}
1485
1485
1486 # hold sets of revision to be filtered
1486 # hold sets of revision to be filtered
1487 # should be cleared when something might have changed the filter value:
1487 # should be cleared when something might have changed the filter value:
1488 # - new changesets,
1488 # - new changesets,
1489 # - phase change,
1489 # - phase change,
1490 # - new obsolescence marker,
1490 # - new obsolescence marker,
1491 # - working directory parent change,
1491 # - working directory parent change,
1492 # - bookmark changes
1492 # - bookmark changes
1493 self.filteredrevcache = {}
1493 self.filteredrevcache = {}
1494
1494
1495 self._dirstate = None
1495 self._dirstate = None
1496 # post-dirstate-status hooks
1496 # post-dirstate-status hooks
1497 self._postdsstatus = []
1497 self._postdsstatus = []
1498
1498
1499 self._pending_narrow_pats = None
1499 self._pending_narrow_pats = None
1500 self._pending_narrow_pats_dirstate = None
1500 self._pending_narrow_pats_dirstate = None
1501
1501
1502 # generic mapping between names and nodes
1502 # generic mapping between names and nodes
1503 self.names = namespaces.namespaces()
1503 self.names = namespaces.namespaces()
1504
1504
1505 # Key to signature value.
1505 # Key to signature value.
1506 self._sparsesignaturecache = {}
1506 self._sparsesignaturecache = {}
1507 # Signature to cached matcher instance.
1507 # Signature to cached matcher instance.
1508 self._sparsematchercache = {}
1508 self._sparsematchercache = {}
1509
1509
1510 self._extrafilterid = repoview.extrafilter(ui)
1510 self._extrafilterid = repoview.extrafilter(ui)
1511
1511
1512 self.filecopiesmode = None
1512 self.filecopiesmode = None
1513 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1513 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1514 self.filecopiesmode = b'changeset-sidedata'
1514 self.filecopiesmode = b'changeset-sidedata'
1515
1515
1516 self._wanted_sidedata = set()
1516 self._wanted_sidedata = set()
1517 self._sidedata_computers = {}
1517 self._sidedata_computers = {}
1518 sidedatamod.set_sidedata_spec_for_repo(self)
1518 sidedatamod.set_sidedata_spec_for_repo(self)
1519
1519
1520 def _getvfsward(self, origfunc):
1520 def _getvfsward(self, origfunc):
1521 """build a ward for self.vfs"""
1521 """build a ward for self.vfs"""
1522 rref = weakref.ref(self)
1522 rref = weakref.ref(self)
1523
1523
1524 def checkvfs(path, mode=None):
1524 def checkvfs(path, mode=None):
1525 ret = origfunc(path, mode=mode)
1525 ret = origfunc(path, mode=mode)
1526 repo = rref()
1526 repo = rref()
1527 if (
1527 if (
1528 repo is None
1528 repo is None
1529 or not hasattr(repo, '_wlockref')
1529 or not hasattr(repo, '_wlockref')
1530 or not hasattr(repo, '_lockref')
1530 or not hasattr(repo, '_lockref')
1531 ):
1531 ):
1532 return
1532 return
1533 if mode in (None, b'r', b'rb'):
1533 if mode in (None, b'r', b'rb'):
1534 return
1534 return
1535 if path.startswith(repo.path):
1535 if path.startswith(repo.path):
1536 # truncate name relative to the repository (.hg)
1536 # truncate name relative to the repository (.hg)
1537 path = path[len(repo.path) + 1 :]
1537 path = path[len(repo.path) + 1 :]
1538 if path.startswith(b'cache/'):
1538 if path.startswith(b'cache/'):
1539 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1539 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1540 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1540 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1541 # path prefixes covered by 'lock'
1541 # path prefixes covered by 'lock'
1542 vfs_path_prefixes = (
1542 vfs_path_prefixes = (
1543 b'journal.',
1543 b'journal.',
1544 b'undo.',
1544 b'undo.',
1545 b'strip-backup/',
1545 b'strip-backup/',
1546 b'cache/',
1546 b'cache/',
1547 )
1547 )
1548 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1548 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1549 if repo._currentlock(repo._lockref) is None:
1549 if repo._currentlock(repo._lockref) is None:
1550 repo.ui.develwarn(
1550 repo.ui.develwarn(
1551 b'write with no lock: "%s"' % path,
1551 b'write with no lock: "%s"' % path,
1552 stacklevel=3,
1552 stacklevel=3,
1553 config=b'check-locks',
1553 config=b'check-locks',
1554 )
1554 )
1555 elif repo._currentlock(repo._wlockref) is None:
1555 elif repo._currentlock(repo._wlockref) is None:
1556 # rest of vfs files are covered by 'wlock'
1556 # rest of vfs files are covered by 'wlock'
1557 #
1557 #
1558 # exclude special files
1558 # exclude special files
1559 for prefix in self._wlockfreeprefix:
1559 for prefix in self._wlockfreeprefix:
1560 if path.startswith(prefix):
1560 if path.startswith(prefix):
1561 return
1561 return
1562 repo.ui.develwarn(
1562 repo.ui.develwarn(
1563 b'write with no wlock: "%s"' % path,
1563 b'write with no wlock: "%s"' % path,
1564 stacklevel=3,
1564 stacklevel=3,
1565 config=b'check-locks',
1565 config=b'check-locks',
1566 )
1566 )
1567 return ret
1567 return ret
1568
1568
1569 return checkvfs
1569 return checkvfs
1570
1570
1571 def _getsvfsward(self, origfunc):
1571 def _getsvfsward(self, origfunc):
1572 """build a ward for self.svfs"""
1572 """build a ward for self.svfs"""
1573 rref = weakref.ref(self)
1573 rref = weakref.ref(self)
1574
1574
1575 def checksvfs(path, mode=None):
1575 def checksvfs(path, mode=None):
1576 ret = origfunc(path, mode=mode)
1576 ret = origfunc(path, mode=mode)
1577 repo = rref()
1577 repo = rref()
1578 if repo is None or not hasattr(repo, '_lockref'):
1578 if repo is None or not hasattr(repo, '_lockref'):
1579 return
1579 return
1580 if mode in (None, b'r', b'rb'):
1580 if mode in (None, b'r', b'rb'):
1581 return
1581 return
1582 if path.startswith(repo.sharedpath):
1582 if path.startswith(repo.sharedpath):
1583 # truncate name relative to the repository (.hg)
1583 # truncate name relative to the repository (.hg)
1584 path = path[len(repo.sharedpath) + 1 :]
1584 path = path[len(repo.sharedpath) + 1 :]
1585 if repo._currentlock(repo._lockref) is None:
1585 if repo._currentlock(repo._lockref) is None:
1586 repo.ui.develwarn(
1586 repo.ui.develwarn(
1587 b'write with no lock: "%s"' % path, stacklevel=4
1587 b'write with no lock: "%s"' % path, stacklevel=4
1588 )
1588 )
1589 return ret
1589 return ret
1590
1590
1591 return checksvfs
1591 return checksvfs
1592
1592
1593 @property
1593 @property
1594 def vfs_map(self):
1594 def vfs_map(self):
1595 return {
1595 return {
1596 b'': self.svfs,
1596 b'': self.svfs,
1597 b'plain': self.vfs,
1597 b'plain': self.vfs,
1598 b'store': self.svfs,
1598 b'store': self.svfs,
1599 }
1599 }
1600
1600
1601 def close(self):
1601 def close(self):
1602 self._writecaches()
1602 self._writecaches()
1603
1603
1604 def _writecaches(self):
1604 def _writecaches(self):
1605 if self._revbranchcache:
1605 if self._revbranchcache:
1606 self._revbranchcache.write()
1606 self._revbranchcache.write()
1607
1607
1608 def _restrictcapabilities(self, caps):
1608 def _restrictcapabilities(self, caps):
1609 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1609 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1610 caps = set(caps)
1610 caps = set(caps)
1611 capsblob = bundle2.encodecaps(
1611 capsblob = bundle2.encodecaps(
1612 bundle2.getrepocaps(self, role=b'client')
1612 bundle2.getrepocaps(self, role=b'client')
1613 )
1613 )
1614 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1614 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1615 if self.ui.configbool(b'experimental', b'narrow'):
1615 if self.ui.configbool(b'experimental', b'narrow'):
1616 caps.add(wireprototypes.NARROWCAP)
1616 caps.add(wireprototypes.NARROWCAP)
1617 return caps
1617 return caps
1618
1618
1619 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1619 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1620 # self -> auditor -> self._checknested -> self
1620 # self -> auditor -> self._checknested -> self
1621
1621
1622 @property
1622 @property
1623 def auditor(self):
1623 def auditor(self):
1624 # This is only used by context.workingctx.match in order to
1624 # This is only used by context.workingctx.match in order to
1625 # detect files in subrepos.
1625 # detect files in subrepos.
1626 return pathutil.pathauditor(self.root, callback=self._checknested)
1626 return pathutil.pathauditor(self.root, callback=self._checknested)
1627
1627
1628 @property
1628 @property
1629 def nofsauditor(self):
1629 def nofsauditor(self):
1630 # This is only used by context.basectx.match in order to detect
1630 # This is only used by context.basectx.match in order to detect
1631 # files in subrepos.
1631 # files in subrepos.
1632 return pathutil.pathauditor(
1632 return pathutil.pathauditor(
1633 self.root, callback=self._checknested, realfs=False, cached=True
1633 self.root, callback=self._checknested, realfs=False, cached=True
1634 )
1634 )
1635
1635
1636 def _checknested(self, path):
1636 def _checknested(self, path):
1637 """Determine if path is a legal nested repository."""
1637 """Determine if path is a legal nested repository."""
1638 if not path.startswith(self.root):
1638 if not path.startswith(self.root):
1639 return False
1639 return False
1640 subpath = path[len(self.root) + 1 :]
1640 subpath = path[len(self.root) + 1 :]
1641 normsubpath = util.pconvert(subpath)
1641 normsubpath = util.pconvert(subpath)
1642
1642
1643 # XXX: Checking against the current working copy is wrong in
1643 # XXX: Checking against the current working copy is wrong in
1644 # the sense that it can reject things like
1644 # the sense that it can reject things like
1645 #
1645 #
1646 # $ hg cat -r 10 sub/x.txt
1646 # $ hg cat -r 10 sub/x.txt
1647 #
1647 #
1648 # if sub/ is no longer a subrepository in the working copy
1648 # if sub/ is no longer a subrepository in the working copy
1649 # parent revision.
1649 # parent revision.
1650 #
1650 #
1651 # However, it can of course also allow things that would have
1651 # However, it can of course also allow things that would have
1652 # been rejected before, such as the above cat command if sub/
1652 # been rejected before, such as the above cat command if sub/
1653 # is a subrepository now, but was a normal directory before.
1653 # is a subrepository now, but was a normal directory before.
1654 # The old path auditor would have rejected by mistake since it
1654 # The old path auditor would have rejected by mistake since it
1655 # panics when it sees sub/.hg/.
1655 # panics when it sees sub/.hg/.
1656 #
1656 #
1657 # All in all, checking against the working copy seems sensible
1657 # All in all, checking against the working copy seems sensible
1658 # since we want to prevent access to nested repositories on
1658 # since we want to prevent access to nested repositories on
1659 # the filesystem *now*.
1659 # the filesystem *now*.
1660 ctx = self[None]
1660 ctx = self[None]
1661 parts = util.splitpath(subpath)
1661 parts = util.splitpath(subpath)
1662 while parts:
1662 while parts:
1663 prefix = b'/'.join(parts)
1663 prefix = b'/'.join(parts)
1664 if prefix in ctx.substate:
1664 if prefix in ctx.substate:
1665 if prefix == normsubpath:
1665 if prefix == normsubpath:
1666 return True
1666 return True
1667 else:
1667 else:
1668 sub = ctx.sub(prefix)
1668 sub = ctx.sub(prefix)
1669 return sub.checknested(subpath[len(prefix) + 1 :])
1669 return sub.checknested(subpath[len(prefix) + 1 :])
1670 else:
1670 else:
1671 parts.pop()
1671 parts.pop()
1672 return False
1672 return False
1673
1673
1674 def peer(self, path=None, remotehidden=False):
1674 def peer(self, path=None, remotehidden=False):
1675 return localpeer(
1675 return localpeer(
1676 self, path=path, remotehidden=remotehidden
1676 self, path=path, remotehidden=remotehidden
1677 ) # not cached to avoid reference cycle
1677 ) # not cached to avoid reference cycle
1678
1678
1679 def unfiltered(self):
1679 def unfiltered(self):
1680 """Return unfiltered version of the repository
1680 """Return unfiltered version of the repository
1681
1681
1682 Intended to be overwritten by filtered repo."""
1682 Intended to be overwritten by filtered repo."""
1683 return self
1683 return self
1684
1684
1685 def filtered(self, name, visibilityexceptions=None):
1685 def filtered(self, name, visibilityexceptions=None):
1686 """Return a filtered version of a repository
1686 """Return a filtered version of a repository
1687
1687
1688 The `name` parameter is the identifier of the requested view. This
1688 The `name` parameter is the identifier of the requested view. This
1689 will return a repoview object set "exactly" to the specified view.
1689 will return a repoview object set "exactly" to the specified view.
1690
1690
1691 This function does not apply recursive filtering to a repository. For
1691 This function does not apply recursive filtering to a repository. For
1692 example calling `repo.filtered("served")` will return a repoview using
1692 example calling `repo.filtered("served")` will return a repoview using
1693 the "served" view, regardless of the initial view used by `repo`.
1693 the "served" view, regardless of the initial view used by `repo`.
1694
1694
1695 In other word, there is always only one level of `repoview` "filtering".
1695 In other word, there is always only one level of `repoview` "filtering".
1696 """
1696 """
1697 if self._extrafilterid is not None and b'%' not in name:
1697 if self._extrafilterid is not None and b'%' not in name:
1698 name = name + b'%' + self._extrafilterid
1698 name = name + b'%' + self._extrafilterid
1699
1699
1700 cls = repoview.newtype(self.unfiltered().__class__)
1700 cls = repoview.newtype(self.unfiltered().__class__)
1701 return cls(self, name, visibilityexceptions)
1701 return cls(self, name, visibilityexceptions)
1702
1702
1703 @mixedrepostorecache(
1703 @mixedrepostorecache(
1704 (b'bookmarks', b'plain'),
1704 (b'bookmarks', b'plain'),
1705 (b'bookmarks.current', b'plain'),
1705 (b'bookmarks.current', b'plain'),
1706 (b'bookmarks', b''),
1706 (b'bookmarks', b''),
1707 (b'00changelog.i', b''),
1707 (b'00changelog.i', b''),
1708 )
1708 )
1709 def _bookmarks(self):
1709 def _bookmarks(self):
1710 # Since the multiple files involved in the transaction cannot be
1710 # Since the multiple files involved in the transaction cannot be
1711 # written atomically (with current repository format), there is a race
1711 # written atomically (with current repository format), there is a race
1712 # condition here.
1712 # condition here.
1713 #
1713 #
1714 # 1) changelog content A is read
1714 # 1) changelog content A is read
1715 # 2) outside transaction update changelog to content B
1715 # 2) outside transaction update changelog to content B
1716 # 3) outside transaction update bookmark file referring to content B
1716 # 3) outside transaction update bookmark file referring to content B
1717 # 4) bookmarks file content is read and filtered against changelog-A
1717 # 4) bookmarks file content is read and filtered against changelog-A
1718 #
1718 #
1719 # When this happens, bookmarks against nodes missing from A are dropped.
1719 # When this happens, bookmarks against nodes missing from A are dropped.
1720 #
1720 #
1721 # Having this happening during read is not great, but it become worse
1721 # Having this happening during read is not great, but it become worse
1722 # when this happen during write because the bookmarks to the "unknown"
1722 # when this happen during write because the bookmarks to the "unknown"
1723 # nodes will be dropped for good. However, writes happen within locks.
1723 # nodes will be dropped for good. However, writes happen within locks.
1724 # This locking makes it possible to have a race free consistent read.
1724 # This locking makes it possible to have a race free consistent read.
1725 # For this purpose data read from disc before locking are
1725 # For this purpose data read from disc before locking are
1726 # "invalidated" right after the locks are taken. This invalidations are
1726 # "invalidated" right after the locks are taken. This invalidations are
1727 # "light", the `filecache` mechanism keep the data in memory and will
1727 # "light", the `filecache` mechanism keep the data in memory and will
1728 # reuse them if the underlying files did not changed. Not parsing the
1728 # reuse them if the underlying files did not changed. Not parsing the
1729 # same data multiple times helps performances.
1729 # same data multiple times helps performances.
1730 #
1730 #
1731 # Unfortunately in the case describe above, the files tracked by the
1731 # Unfortunately in the case describe above, the files tracked by the
1732 # bookmarks file cache might not have changed, but the in-memory
1732 # bookmarks file cache might not have changed, but the in-memory
1733 # content is still "wrong" because we used an older changelog content
1733 # content is still "wrong" because we used an older changelog content
1734 # to process the on-disk data. So after locking, the changelog would be
1734 # to process the on-disk data. So after locking, the changelog would be
1735 # refreshed but `_bookmarks` would be preserved.
1735 # refreshed but `_bookmarks` would be preserved.
1736 # Adding `00changelog.i` to the list of tracked file is not
1736 # Adding `00changelog.i` to the list of tracked file is not
1737 # enough, because at the time we build the content for `_bookmarks` in
1737 # enough, because at the time we build the content for `_bookmarks` in
1738 # (4), the changelog file has already diverged from the content used
1738 # (4), the changelog file has already diverged from the content used
1739 # for loading `changelog` in (1)
1739 # for loading `changelog` in (1)
1740 #
1740 #
1741 # To prevent the issue, we force the changelog to be explicitly
1741 # To prevent the issue, we force the changelog to be explicitly
1742 # reloaded while computing `_bookmarks`. The data race can still happen
1742 # reloaded while computing `_bookmarks`. The data race can still happen
1743 # without the lock (with a narrower window), but it would no longer go
1743 # without the lock (with a narrower window), but it would no longer go
1744 # undetected during the lock time refresh.
1744 # undetected during the lock time refresh.
1745 #
1745 #
1746 # The new schedule is as follow
1746 # The new schedule is as follow
1747 #
1747 #
1748 # 1) filecache logic detect that `_bookmarks` needs to be computed
1748 # 1) filecache logic detect that `_bookmarks` needs to be computed
1749 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1749 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1750 # 3) We force `changelog` filecache to be tested
1750 # 3) We force `changelog` filecache to be tested
1751 # 4) cachestat for `changelog` are captured (for changelog)
1751 # 4) cachestat for `changelog` are captured (for changelog)
1752 # 5) `_bookmarks` is computed and cached
1752 # 5) `_bookmarks` is computed and cached
1753 #
1753 #
1754 # The step in (3) ensure we have a changelog at least as recent as the
1754 # The step in (3) ensure we have a changelog at least as recent as the
1755 # cache stat computed in (1). As a result at locking time:
1755 # cache stat computed in (1). As a result at locking time:
1756 # * if the changelog did not changed since (1) -> we can reuse the data
1756 # * if the changelog did not changed since (1) -> we can reuse the data
1757 # * otherwise -> the bookmarks get refreshed.
1757 # * otherwise -> the bookmarks get refreshed.
1758 self._refreshchangelog()
1758 self._refreshchangelog()
1759 return bookmarks.bmstore(self)
1759 return bookmarks.bmstore(self)
1760
1760
1761 def _refreshchangelog(self):
1761 def _refreshchangelog(self):
1762 """make sure the in memory changelog match the on-disk one"""
1762 """make sure the in memory changelog match the on-disk one"""
1763 if 'changelog' in vars(self) and self.currenttransaction() is None:
1763 if 'changelog' in vars(self) and self.currenttransaction() is None:
1764 del self.changelog
1764 del self.changelog
1765
1765
1766 @property
1766 @property
1767 def _activebookmark(self):
1767 def _activebookmark(self):
1768 return self._bookmarks.active
1768 return self._bookmarks.active
1769
1769
1770 # _phasesets depend on changelog. what we need is to call
1770 # _phasesets depend on changelog. what we need is to call
1771 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1771 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1772 # can't be easily expressed in filecache mechanism.
1772 # can't be easily expressed in filecache mechanism.
1773 @storecache(b'phaseroots', b'00changelog.i')
1773 @storecache(b'phaseroots', b'00changelog.i')
1774 def _phasecache(self):
1774 def _phasecache(self):
1775 return phases.phasecache(self, self._phasedefaults)
1775 return phases.phasecache(self, self._phasedefaults)
1776
1776
1777 @storecache(b'obsstore')
1777 @storecache(b'obsstore')
1778 def obsstore(self):
1778 def obsstore(self):
1779 return obsolete.makestore(self.ui, self)
1779 return obsolete.makestore(self.ui, self)
1780
1780
1781 @changelogcache()
1781 @changelogcache()
1782 def changelog(repo):
1782 def changelog(repo):
1783 # load dirstate before changelog to avoid race see issue6303
1783 # load dirstate before changelog to avoid race see issue6303
1784 repo.dirstate.prefetch_parents()
1784 repo.dirstate.prefetch_parents()
1785 return repo.store.changelog(
1785 return repo.store.changelog(
1786 txnutil.mayhavepending(repo.root),
1786 txnutil.mayhavepending(repo.root),
1787 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1787 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1788 )
1788 )
1789
1789
1790 @manifestlogcache()
1790 @manifestlogcache()
1791 def manifestlog(self):
1791 def manifestlog(self):
1792 return self.store.manifestlog(self, self._storenarrowmatch)
1792 return self.store.manifestlog(self, self._storenarrowmatch)
1793
1793
1794 @unfilteredpropertycache
1794 @unfilteredpropertycache
1795 def dirstate(self):
1795 def dirstate(self):
1796 if self._dirstate is None:
1796 if self._dirstate is None:
1797 self._dirstate = self._makedirstate()
1797 self._dirstate = self._makedirstate()
1798 else:
1798 else:
1799 self._dirstate.refresh()
1799 self._dirstate.refresh()
1800 return self._dirstate
1800 return self._dirstate
1801
1801
1802 def _makedirstate(self):
1802 def _makedirstate(self):
1803 """Extension point for wrapping the dirstate per-repo."""
1803 """Extension point for wrapping the dirstate per-repo."""
1804 sparsematchfn = None
1804 sparsematchfn = None
1805 if sparse.use_sparse(self):
1805 if sparse.use_sparse(self):
1806 sparsematchfn = lambda: sparse.matcher(self)
1806 sparsematchfn = lambda: sparse.matcher(self)
1807 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1807 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1808 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1808 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1809 use_dirstate_v2 = v2_req in self.requirements
1809 use_dirstate_v2 = v2_req in self.requirements
1810 use_tracked_hint = th in self.requirements
1810 use_tracked_hint = th in self.requirements
1811
1811
1812 return dirstate.dirstate(
1812 return dirstate.dirstate(
1813 self.vfs,
1813 self.vfs,
1814 self.ui,
1814 self.ui,
1815 self.root,
1815 self.root,
1816 self._dirstatevalidate,
1816 self._dirstatevalidate,
1817 sparsematchfn,
1817 sparsematchfn,
1818 self.nodeconstants,
1818 self.nodeconstants,
1819 use_dirstate_v2,
1819 use_dirstate_v2,
1820 use_tracked_hint=use_tracked_hint,
1820 use_tracked_hint=use_tracked_hint,
1821 )
1821 )
1822
1822
1823 def _dirstatevalidate(self, node):
1823 def _dirstatevalidate(self, node):
1824 okay = True
1824 okay = True
1825 try:
1825 try:
1826 self.changelog.rev(node)
1826 self.changelog.rev(node)
1827 except error.LookupError:
1827 except error.LookupError:
1828 # If the parent are unknown it might just be because the changelog
1828 # If the parent are unknown it might just be because the changelog
1829 # in memory is lagging behind the dirstate in memory. So try to
1829 # in memory is lagging behind the dirstate in memory. So try to
1830 # refresh the changelog first.
1830 # refresh the changelog first.
1831 #
1831 #
1832 # We only do so if we don't hold the lock, if we do hold the lock
1832 # We only do so if we don't hold the lock, if we do hold the lock
1833 # the invalidation at that time should have taken care of this and
1833 # the invalidation at that time should have taken care of this and
1834 # something is very fishy.
1834 # something is very fishy.
1835 if self.currentlock() is None:
1835 if self.currentlock() is None:
1836 self.invalidate()
1836 self.invalidate()
1837 try:
1837 try:
1838 self.changelog.rev(node)
1838 self.changelog.rev(node)
1839 except error.LookupError:
1839 except error.LookupError:
1840 okay = False
1840 okay = False
1841 else:
1841 else:
1842 # XXX we should consider raising an error here.
1842 # XXX we should consider raising an error here.
1843 okay = False
1843 okay = False
1844 if okay:
1844 if okay:
1845 return node
1845 return node
1846 else:
1846 else:
1847 if not self._dirstatevalidatewarned:
1847 if not self._dirstatevalidatewarned:
1848 self._dirstatevalidatewarned = True
1848 self._dirstatevalidatewarned = True
1849 self.ui.warn(
1849 self.ui.warn(
1850 _(b"warning: ignoring unknown working parent %s!\n")
1850 _(b"warning: ignoring unknown working parent %s!\n")
1851 % short(node)
1851 % short(node)
1852 )
1852 )
1853 return self.nullid
1853 return self.nullid
1854
1854
1855 @storecache(narrowspec.FILENAME)
1855 @storecache(narrowspec.FILENAME)
1856 def narrowpats(self):
1856 def narrowpats(self):
1857 """matcher patterns for this repository's narrowspec
1857 """matcher patterns for this repository's narrowspec
1858
1858
1859 A tuple of (includes, excludes).
1859 A tuple of (includes, excludes).
1860 """
1860 """
1861 # the narrow management should probably move into its own object
1861 # the narrow management should probably move into its own object
1862 val = self._pending_narrow_pats
1862 val = self._pending_narrow_pats
1863 if val is None:
1863 if val is None:
1864 val = narrowspec.load(self)
1864 val = narrowspec.load(self)
1865 return val
1865 return val
1866
1866
1867 @storecache(narrowspec.FILENAME)
1867 @storecache(narrowspec.FILENAME)
1868 def _storenarrowmatch(self):
1868 def _storenarrowmatch(self):
1869 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1869 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1870 return matchmod.always()
1870 return matchmod.always()
1871 include, exclude = self.narrowpats
1871 include, exclude = self.narrowpats
1872 return narrowspec.match(self.root, include=include, exclude=exclude)
1872 return narrowspec.match(self.root, include=include, exclude=exclude)
1873
1873
1874 @storecache(narrowspec.FILENAME)
1874 @storecache(narrowspec.FILENAME)
1875 def _narrowmatch(self):
1875 def _narrowmatch(self):
1876 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1876 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1877 return matchmod.always()
1877 return matchmod.always()
1878 narrowspec.checkworkingcopynarrowspec(self)
1878 narrowspec.checkworkingcopynarrowspec(self)
1879 include, exclude = self.narrowpats
1879 include, exclude = self.narrowpats
1880 return narrowspec.match(self.root, include=include, exclude=exclude)
1880 return narrowspec.match(self.root, include=include, exclude=exclude)
1881
1881
1882 def narrowmatch(self, match=None, includeexact=False):
1882 def narrowmatch(self, match=None, includeexact=False):
1883 """matcher corresponding the the repo's narrowspec
1883 """matcher corresponding the the repo's narrowspec
1884
1884
1885 If `match` is given, then that will be intersected with the narrow
1885 If `match` is given, then that will be intersected with the narrow
1886 matcher.
1886 matcher.
1887
1887
1888 If `includeexact` is True, then any exact matches from `match` will
1888 If `includeexact` is True, then any exact matches from `match` will
1889 be included even if they're outside the narrowspec.
1889 be included even if they're outside the narrowspec.
1890 """
1890 """
1891 if match:
1891 if match:
1892 if includeexact and not self._narrowmatch.always():
1892 if includeexact and not self._narrowmatch.always():
1893 # do not exclude explicitly-specified paths so that they can
1893 # do not exclude explicitly-specified paths so that they can
1894 # be warned later on
1894 # be warned later on
1895 em = matchmod.exact(match.files())
1895 em = matchmod.exact(match.files())
1896 nm = matchmod.unionmatcher([self._narrowmatch, em])
1896 nm = matchmod.unionmatcher([self._narrowmatch, em])
1897 return matchmod.intersectmatchers(match, nm)
1897 return matchmod.intersectmatchers(match, nm)
1898 return matchmod.intersectmatchers(match, self._narrowmatch)
1898 return matchmod.intersectmatchers(match, self._narrowmatch)
1899 return self._narrowmatch
1899 return self._narrowmatch
1900
1900
1901 def setnarrowpats(self, newincludes, newexcludes):
1901 def setnarrowpats(self, newincludes, newexcludes):
1902 narrowspec.save(self, newincludes, newexcludes)
1902 narrowspec.save(self, newincludes, newexcludes)
1903 self.invalidate(clearfilecache=True)
1903 self.invalidate(clearfilecache=True)
1904
1904
1905 @unfilteredpropertycache
1905 @unfilteredpropertycache
1906 def _quick_access_changeid_null(self):
1906 def _quick_access_changeid_null(self):
1907 return {
1907 return {
1908 b'null': (nullrev, self.nodeconstants.nullid),
1908 b'null': (nullrev, self.nodeconstants.nullid),
1909 nullrev: (nullrev, self.nodeconstants.nullid),
1909 nullrev: (nullrev, self.nodeconstants.nullid),
1910 self.nullid: (nullrev, self.nullid),
1910 self.nullid: (nullrev, self.nullid),
1911 }
1911 }
1912
1912
1913 @unfilteredpropertycache
1913 @unfilteredpropertycache
1914 def _quick_access_changeid_wc(self):
1914 def _quick_access_changeid_wc(self):
1915 # also fast path access to the working copy parents
1915 # also fast path access to the working copy parents
1916 # however, only do it for filter that ensure wc is visible.
1916 # however, only do it for filter that ensure wc is visible.
1917 quick = self._quick_access_changeid_null.copy()
1917 quick = self._quick_access_changeid_null.copy()
1918 cl = self.unfiltered().changelog
1918 cl = self.unfiltered().changelog
1919 for node in self.dirstate.parents():
1919 for node in self.dirstate.parents():
1920 if node == self.nullid:
1920 if node == self.nullid:
1921 continue
1921 continue
1922 rev = cl.index.get_rev(node)
1922 rev = cl.index.get_rev(node)
1923 if rev is None:
1923 if rev is None:
1924 # unknown working copy parent case:
1924 # unknown working copy parent case:
1925 #
1925 #
1926 # skip the fast path and let higher code deal with it
1926 # skip the fast path and let higher code deal with it
1927 continue
1927 continue
1928 pair = (rev, node)
1928 pair = (rev, node)
1929 quick[rev] = pair
1929 quick[rev] = pair
1930 quick[node] = pair
1930 quick[node] = pair
1931 # also add the parents of the parents
1931 # also add the parents of the parents
1932 for r in cl.parentrevs(rev):
1932 for r in cl.parentrevs(rev):
1933 if r == nullrev:
1933 if r == nullrev:
1934 continue
1934 continue
1935 n = cl.node(r)
1935 n = cl.node(r)
1936 pair = (r, n)
1936 pair = (r, n)
1937 quick[r] = pair
1937 quick[r] = pair
1938 quick[n] = pair
1938 quick[n] = pair
1939 p1node = self.dirstate.p1()
1939 p1node = self.dirstate.p1()
1940 if p1node != self.nullid:
1940 if p1node != self.nullid:
1941 quick[b'.'] = quick[p1node]
1941 quick[b'.'] = quick[p1node]
1942 return quick
1942 return quick
1943
1943
1944 @unfilteredmethod
1944 @unfilteredmethod
1945 def _quick_access_changeid_invalidate(self):
1945 def _quick_access_changeid_invalidate(self):
1946 if '_quick_access_changeid_wc' in vars(self):
1946 if '_quick_access_changeid_wc' in vars(self):
1947 del self.__dict__['_quick_access_changeid_wc']
1947 del self.__dict__['_quick_access_changeid_wc']
1948
1948
1949 @property
1949 @property
1950 def _quick_access_changeid(self):
1950 def _quick_access_changeid(self):
1951 """an helper dictionnary for __getitem__ calls
1951 """an helper dictionnary for __getitem__ calls
1952
1952
1953 This contains a list of symbol we can recognise right away without
1953 This contains a list of symbol we can recognise right away without
1954 further processing.
1954 further processing.
1955 """
1955 """
1956 if self.filtername in repoview.filter_has_wc:
1956 if self.filtername in repoview.filter_has_wc:
1957 return self._quick_access_changeid_wc
1957 return self._quick_access_changeid_wc
1958 return self._quick_access_changeid_null
1958 return self._quick_access_changeid_null
1959
1959
1960 def __getitem__(self, changeid):
1960 def __getitem__(self, changeid):
1961 # dealing with special cases
1961 # dealing with special cases
1962 if changeid is None:
1962 if changeid is None:
1963 return context.workingctx(self)
1963 return context.workingctx(self)
1964 if isinstance(changeid, context.basectx):
1964 if isinstance(changeid, context.basectx):
1965 return changeid
1965 return changeid
1966
1966
1967 # dealing with multiple revisions
1967 # dealing with multiple revisions
1968 if isinstance(changeid, slice):
1968 if isinstance(changeid, slice):
1969 # wdirrev isn't contiguous so the slice shouldn't include it
1969 # wdirrev isn't contiguous so the slice shouldn't include it
1970 return [
1970 return [
1971 self[i]
1971 self[i]
1972 for i in range(*changeid.indices(len(self)))
1972 for i in range(*changeid.indices(len(self)))
1973 if i not in self.changelog.filteredrevs
1973 if i not in self.changelog.filteredrevs
1974 ]
1974 ]
1975
1975
1976 # dealing with some special values
1976 # dealing with some special values
1977 quick_access = self._quick_access_changeid.get(changeid)
1977 quick_access = self._quick_access_changeid.get(changeid)
1978 if quick_access is not None:
1978 if quick_access is not None:
1979 rev, node = quick_access
1979 rev, node = quick_access
1980 return context.changectx(self, rev, node, maybe_filtered=False)
1980 return context.changectx(self, rev, node, maybe_filtered=False)
1981 if changeid == b'tip':
1981 if changeid == b'tip':
1982 node = self.changelog.tip()
1982 node = self.changelog.tip()
1983 rev = self.changelog.rev(node)
1983 rev = self.changelog.rev(node)
1984 return context.changectx(self, rev, node)
1984 return context.changectx(self, rev, node)
1985
1985
1986 # dealing with arbitrary values
1986 # dealing with arbitrary values
1987 try:
1987 try:
1988 if isinstance(changeid, int):
1988 if isinstance(changeid, int):
1989 node = self.changelog.node(changeid)
1989 node = self.changelog.node(changeid)
1990 rev = changeid
1990 rev = changeid
1991 elif changeid == b'.':
1991 elif changeid == b'.':
1992 # this is a hack to delay/avoid loading obsmarkers
1992 # this is a hack to delay/avoid loading obsmarkers
1993 # when we know that '.' won't be hidden
1993 # when we know that '.' won't be hidden
1994 node = self.dirstate.p1()
1994 node = self.dirstate.p1()
1995 rev = self.unfiltered().changelog.rev(node)
1995 rev = self.unfiltered().changelog.rev(node)
1996 elif len(changeid) == self.nodeconstants.nodelen:
1996 elif len(changeid) == self.nodeconstants.nodelen:
1997 try:
1997 try:
1998 node = changeid
1998 node = changeid
1999 rev = self.changelog.rev(changeid)
1999 rev = self.changelog.rev(changeid)
2000 except error.FilteredLookupError:
2000 except error.FilteredLookupError:
2001 changeid = hex(changeid) # for the error message
2001 changeid = hex(changeid) # for the error message
2002 raise
2002 raise
2003 except LookupError:
2003 except LookupError:
2004 # check if it might have come from damaged dirstate
2004 # check if it might have come from damaged dirstate
2005 #
2005 #
2006 # XXX we could avoid the unfiltered if we had a recognizable
2006 # XXX we could avoid the unfiltered if we had a recognizable
2007 # exception for filtered changeset access
2007 # exception for filtered changeset access
2008 if (
2008 if (
2009 self.local()
2009 self.local()
2010 and changeid in self.unfiltered().dirstate.parents()
2010 and changeid in self.unfiltered().dirstate.parents()
2011 ):
2011 ):
2012 msg = _(b"working directory has unknown parent '%s'!")
2012 msg = _(b"working directory has unknown parent '%s'!")
2013 raise error.Abort(msg % short(changeid))
2013 raise error.Abort(msg % short(changeid))
2014 changeid = hex(changeid) # for the error message
2014 changeid = hex(changeid) # for the error message
2015 raise
2015 raise
2016
2016
2017 elif len(changeid) == 2 * self.nodeconstants.nodelen:
2017 elif len(changeid) == 2 * self.nodeconstants.nodelen:
2018 node = bin(changeid)
2018 node = bin(changeid)
2019 rev = self.changelog.rev(node)
2019 rev = self.changelog.rev(node)
2020 else:
2020 else:
2021 raise error.ProgrammingError(
2021 raise error.ProgrammingError(
2022 b"unsupported changeid '%s' of type %s"
2022 b"unsupported changeid '%s' of type %s"
2023 % (changeid, pycompat.bytestr(type(changeid)))
2023 % (changeid, pycompat.bytestr(type(changeid)))
2024 )
2024 )
2025
2025
2026 return context.changectx(self, rev, node)
2026 return context.changectx(self, rev, node)
2027
2027
2028 except (error.FilteredIndexError, error.FilteredLookupError):
2028 except (error.FilteredIndexError, error.FilteredLookupError):
2029 raise error.FilteredRepoLookupError(
2029 raise error.FilteredRepoLookupError(
2030 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
2030 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
2031 )
2031 )
2032 except (IndexError, LookupError):
2032 except (IndexError, LookupError):
2033 raise error.RepoLookupError(
2033 raise error.RepoLookupError(
2034 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
2034 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
2035 )
2035 )
2036 except error.WdirUnsupported:
2036 except error.WdirUnsupported:
2037 return context.workingctx(self)
2037 return context.workingctx(self)
2038
2038
2039 def __contains__(self, changeid):
2039 def __contains__(self, changeid):
2040 """True if the given changeid exists"""
2040 """True if the given changeid exists"""
2041 try:
2041 try:
2042 self[changeid]
2042 self[changeid]
2043 return True
2043 return True
2044 except error.RepoLookupError:
2044 except error.RepoLookupError:
2045 return False
2045 return False
2046
2046
2047 def __nonzero__(self):
2047 def __nonzero__(self):
2048 return True
2048 return True
2049
2049
2050 __bool__ = __nonzero__
2050 __bool__ = __nonzero__
2051
2051
2052 def __len__(self):
2052 def __len__(self):
2053 # no need to pay the cost of repoview.changelog
2053 # no need to pay the cost of repoview.changelog
2054 unfi = self.unfiltered()
2054 unfi = self.unfiltered()
2055 return len(unfi.changelog)
2055 return len(unfi.changelog)
2056
2056
2057 def __iter__(self):
2057 def __iter__(self):
2058 return iter(self.changelog)
2058 return iter(self.changelog)
2059
2059
2060 def revs(self, expr: bytes, *args):
2060 def revs(self, expr: bytes, *args):
2061 """Find revisions matching a revset.
2061 """Find revisions matching a revset.
2062
2062
2063 The revset is specified as a string ``expr`` that may contain
2063 The revset is specified as a string ``expr`` that may contain
2064 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2064 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2065
2065
2066 Revset aliases from the configuration are not expanded. To expand
2066 Revset aliases from the configuration are not expanded. To expand
2067 user aliases, consider calling ``scmutil.revrange()`` or
2067 user aliases, consider calling ``scmutil.revrange()`` or
2068 ``repo.anyrevs([expr], user=True)``.
2068 ``repo.anyrevs([expr], user=True)``.
2069
2069
2070 Returns a smartset.abstractsmartset, which is a list-like interface
2070 Returns a smartset.abstractsmartset, which is a list-like interface
2071 that contains integer revisions.
2071 that contains integer revisions.
2072 """
2072 """
2073 tree = revsetlang.spectree(expr, *args)
2073 tree = revsetlang.spectree(expr, *args)
2074 return revset.makematcher(tree)(self)
2074 return revset.makematcher(tree)(self)
2075
2075
2076 def set(self, expr: bytes, *args):
2076 def set(self, expr: bytes, *args):
2077 """Find revisions matching a revset and emit changectx instances.
2077 """Find revisions matching a revset and emit changectx instances.
2078
2078
2079 This is a convenience wrapper around ``revs()`` that iterates the
2079 This is a convenience wrapper around ``revs()`` that iterates the
2080 result and is a generator of changectx instances.
2080 result and is a generator of changectx instances.
2081
2081
2082 Revset aliases from the configuration are not expanded. To expand
2082 Revset aliases from the configuration are not expanded. To expand
2083 user aliases, consider calling ``scmutil.revrange()``.
2083 user aliases, consider calling ``scmutil.revrange()``.
2084 """
2084 """
2085 for r in self.revs(expr, *args):
2085 for r in self.revs(expr, *args):
2086 yield self[r]
2086 yield self[r]
2087
2087
2088 def anyrevs(self, specs: bytes, user=False, localalias=None):
2088 def anyrevs(self, specs: bytes, user=False, localalias=None):
2089 """Find revisions matching one of the given revsets.
2089 """Find revisions matching one of the given revsets.
2090
2090
2091 Revset aliases from the configuration are not expanded by default. To
2091 Revset aliases from the configuration are not expanded by default. To
2092 expand user aliases, specify ``user=True``. To provide some local
2092 expand user aliases, specify ``user=True``. To provide some local
2093 definitions overriding user aliases, set ``localalias`` to
2093 definitions overriding user aliases, set ``localalias`` to
2094 ``{name: definitionstring}``.
2094 ``{name: definitionstring}``.
2095 """
2095 """
2096 if specs == [b'null']:
2096 if specs == [b'null']:
2097 return revset.baseset([nullrev])
2097 return revset.baseset([nullrev])
2098 if specs == [b'.']:
2098 if specs == [b'.']:
2099 quick_data = self._quick_access_changeid.get(b'.')
2099 quick_data = self._quick_access_changeid.get(b'.')
2100 if quick_data is not None:
2100 if quick_data is not None:
2101 return revset.baseset([quick_data[0]])
2101 return revset.baseset([quick_data[0]])
2102 if user:
2102 if user:
2103 m = revset.matchany(
2103 m = revset.matchany(
2104 self.ui,
2104 self.ui,
2105 specs,
2105 specs,
2106 lookup=revset.lookupfn(self),
2106 lookup=revset.lookupfn(self),
2107 localalias=localalias,
2107 localalias=localalias,
2108 )
2108 )
2109 else:
2109 else:
2110 m = revset.matchany(None, specs, localalias=localalias)
2110 m = revset.matchany(None, specs, localalias=localalias)
2111 return m(self)
2111 return m(self)
2112
2112
2113 def url(self) -> bytes:
2113 def url(self) -> bytes:
2114 return b'file:' + self.root
2114 return b'file:' + self.root
2115
2115
2116 def hook(self, name, throw=False, **args):
2116 def hook(self, name, throw=False, **args):
2117 """Call a hook, passing this repo instance.
2117 """Call a hook, passing this repo instance.
2118
2118
2119 This a convenience method to aid invoking hooks. Extensions likely
2119 This a convenience method to aid invoking hooks. Extensions likely
2120 won't call this unless they have registered a custom hook or are
2120 won't call this unless they have registered a custom hook or are
2121 replacing code that is expected to call a hook.
2121 replacing code that is expected to call a hook.
2122 """
2122 """
2123 return hook.hook(self.ui, self, name, throw, **args)
2123 return hook.hook(self.ui, self, name, throw, **args)
2124
2124
2125 @filteredpropertycache
2125 @filteredpropertycache
2126 def _tagscache(self):
2126 def _tagscache(self):
2127 """Returns a tagscache object that contains various tags related
2127 """Returns a tagscache object that contains various tags related
2128 caches."""
2128 caches."""
2129
2129
2130 # This simplifies its cache management by having one decorated
2130 # This simplifies its cache management by having one decorated
2131 # function (this one) and the rest simply fetch things from it.
2131 # function (this one) and the rest simply fetch things from it.
2132 class tagscache:
2132 class tagscache:
2133 def __init__(self):
2133 def __init__(self):
2134 # These two define the set of tags for this repository. tags
2134 # These two define the set of tags for this repository. tags
2135 # maps tag name to node; tagtypes maps tag name to 'global' or
2135 # maps tag name to node; tagtypes maps tag name to 'global' or
2136 # 'local'. (Global tags are defined by .hgtags across all
2136 # 'local'. (Global tags are defined by .hgtags across all
2137 # heads, and local tags are defined in .hg/localtags.)
2137 # heads, and local tags are defined in .hg/localtags.)
2138 # They constitute the in-memory cache of tags.
2138 # They constitute the in-memory cache of tags.
2139 self.tags = self.tagtypes = None
2139 self.tags = self.tagtypes = None
2140
2140
2141 self.nodetagscache = self.tagslist = None
2141 self.nodetagscache = self.tagslist = None
2142
2142
2143 cache = tagscache()
2143 cache = tagscache()
2144 cache.tags, cache.tagtypes = self._findtags()
2144 cache.tags, cache.tagtypes = self._findtags()
2145
2145
2146 return cache
2146 return cache
2147
2147
2148 def tags(self):
2148 def tags(self):
2149 '''return a mapping of tag to node'''
2149 '''return a mapping of tag to node'''
2150 t = {}
2150 t = {}
2151 if self.changelog.filteredrevs:
2151 if self.changelog.filteredrevs:
2152 tags, tt = self._findtags()
2152 tags, tt = self._findtags()
2153 else:
2153 else:
2154 tags = self._tagscache.tags
2154 tags = self._tagscache.tags
2155 rev = self.changelog.rev
2155 rev = self.changelog.rev
2156 for k, v in tags.items():
2156 for k, v in tags.items():
2157 try:
2157 try:
2158 # ignore tags to unknown nodes
2158 # ignore tags to unknown nodes
2159 rev(v)
2159 rev(v)
2160 t[k] = v
2160 t[k] = v
2161 except (error.LookupError, ValueError):
2161 except (error.LookupError, ValueError):
2162 pass
2162 pass
2163 return t
2163 return t
2164
2164
2165 def _findtags(self):
2165 def _findtags(self):
2166 """Do the hard work of finding tags. Return a pair of dicts
2166 """Do the hard work of finding tags. Return a pair of dicts
2167 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2167 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2168 maps tag name to a string like \'global\' or \'local\'.
2168 maps tag name to a string like \'global\' or \'local\'.
2169 Subclasses or extensions are free to add their own tags, but
2169 Subclasses or extensions are free to add their own tags, but
2170 should be aware that the returned dicts will be retained for the
2170 should be aware that the returned dicts will be retained for the
2171 duration of the localrepo object."""
2171 duration of the localrepo object."""
2172
2172
2173 # XXX what tagtype should subclasses/extensions use? Currently
2173 # XXX what tagtype should subclasses/extensions use? Currently
2174 # mq and bookmarks add tags, but do not set the tagtype at all.
2174 # mq and bookmarks add tags, but do not set the tagtype at all.
2175 # Should each extension invent its own tag type? Should there
2175 # Should each extension invent its own tag type? Should there
2176 # be one tagtype for all such "virtual" tags? Or is the status
2176 # be one tagtype for all such "virtual" tags? Or is the status
2177 # quo fine?
2177 # quo fine?
2178
2178
2179 # map tag name to (node, hist)
2179 # map tag name to (node, hist)
2180 alltags = tagsmod.findglobaltags(self.ui, self)
2180 alltags = tagsmod.findglobaltags(self.ui, self)
2181 # map tag name to tag type
2181 # map tag name to tag type
2182 tagtypes = {tag: b'global' for tag in alltags}
2182 tagtypes = {tag: b'global' for tag in alltags}
2183
2183
2184 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2184 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2185
2185
2186 # Build the return dicts. Have to re-encode tag names because
2186 # Build the return dicts. Have to re-encode tag names because
2187 # the tags module always uses UTF-8 (in order not to lose info
2187 # the tags module always uses UTF-8 (in order not to lose info
2188 # writing to the cache), but the rest of Mercurial wants them in
2188 # writing to the cache), but the rest of Mercurial wants them in
2189 # local encoding.
2189 # local encoding.
2190 tags = {}
2190 tags = {}
2191 for name, (node, hist) in alltags.items():
2191 for name, (node, hist) in alltags.items():
2192 if node != self.nullid:
2192 if node != self.nullid:
2193 tags[encoding.tolocal(name)] = node
2193 tags[encoding.tolocal(name)] = node
2194 tags[b'tip'] = self.changelog.tip()
2194 tags[b'tip'] = self.changelog.tip()
2195 tagtypes = {
2195 tagtypes = {
2196 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2196 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2197 }
2197 }
2198 return (tags, tagtypes)
2198 return (tags, tagtypes)
2199
2199
2200 def tagtype(self, tagname):
2200 def tagtype(self, tagname):
2201 """
2201 """
2202 return the type of the given tag. result can be:
2202 return the type of the given tag. result can be:
2203
2203
2204 'local' : a local tag
2204 'local' : a local tag
2205 'global' : a global tag
2205 'global' : a global tag
2206 None : tag does not exist
2206 None : tag does not exist
2207 """
2207 """
2208
2208
2209 return self._tagscache.tagtypes.get(tagname)
2209 return self._tagscache.tagtypes.get(tagname)
2210
2210
2211 def tagslist(self):
2211 def tagslist(self):
2212 '''return a list of tags ordered by revision'''
2212 '''return a list of tags ordered by revision'''
2213 if not self._tagscache.tagslist:
2213 if not self._tagscache.tagslist:
2214 l = []
2214 l = []
2215 for t, n in self.tags().items():
2215 for t, n in self.tags().items():
2216 l.append((self.changelog.rev(n), t, n))
2216 l.append((self.changelog.rev(n), t, n))
2217 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2217 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2218
2218
2219 return self._tagscache.tagslist
2219 return self._tagscache.tagslist
2220
2220
2221 def nodetags(self, node):
2221 def nodetags(self, node):
2222 '''return the tags associated with a node'''
2222 '''return the tags associated with a node'''
2223 if not self._tagscache.nodetagscache:
2223 if not self._tagscache.nodetagscache:
2224 nodetagscache = {}
2224 nodetagscache = {}
2225 for t, n in self._tagscache.tags.items():
2225 for t, n in self._tagscache.tags.items():
2226 nodetagscache.setdefault(n, []).append(t)
2226 nodetagscache.setdefault(n, []).append(t)
2227 for tags in nodetagscache.values():
2227 for tags in nodetagscache.values():
2228 tags.sort()
2228 tags.sort()
2229 self._tagscache.nodetagscache = nodetagscache
2229 self._tagscache.nodetagscache = nodetagscache
2230 return self._tagscache.nodetagscache.get(node, [])
2230 return self._tagscache.nodetagscache.get(node, [])
2231
2231
2232 def nodebookmarks(self, node):
2232 def nodebookmarks(self, node):
2233 """return the list of bookmarks pointing to the specified node"""
2233 """return the list of bookmarks pointing to the specified node"""
2234 return self._bookmarks.names(node)
2234 return self._bookmarks.names(node)
2235
2235
2236 def branchmap(self):
2236 def branchmap(self):
2237 """returns a dictionary {branch: [branchheads]} with branchheads
2237 """returns a dictionary {branch: [branchheads]} with branchheads
2238 ordered by increasing revision number"""
2238 ordered by increasing revision number"""
2239 return self._branchcaches[self]
2239 return self._branchcaches[self]
2240
2240
2241 @unfilteredmethod
2241 @unfilteredmethod
2242 def revbranchcache(self):
2242 def revbranchcache(self):
2243 if not self._revbranchcache:
2243 if not self._revbranchcache:
2244 unfi = self.unfiltered()
2244 unfi = self.unfiltered()
2245 self._revbranchcache = rev_branch_cache.revbranchcache(unfi)
2245 self._revbranchcache = rev_branch_cache.revbranchcache(unfi)
2246 return self._revbranchcache
2246 return self._revbranchcache
2247
2247
2248 def register_changeset(self, rev, changelogrevision):
2248 def register_changeset(self, rev, changelogrevision):
2249 self.revbranchcache().setdata(rev, changelogrevision)
2249 self.revbranchcache().setdata(rev, changelogrevision)
2250
2250
2251 def branchtip(self, branch, ignoremissing=False):
2251 def branchtip(self, branch, ignoremissing=False):
2252 """return the tip node for a given branch
2252 """return the tip node for a given branch
2253
2253
2254 If ignoremissing is True, then this method will not raise an error.
2254 If ignoremissing is True, then this method will not raise an error.
2255 This is helpful for callers that only expect None for a missing branch
2255 This is helpful for callers that only expect None for a missing branch
2256 (e.g. namespace).
2256 (e.g. namespace).
2257
2257
2258 """
2258 """
2259 try:
2259 try:
2260 return self.branchmap().branchtip(branch)
2260 return self.branchmap().branchtip(branch)
2261 except KeyError:
2261 except KeyError:
2262 if not ignoremissing:
2262 if not ignoremissing:
2263 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2263 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2264 else:
2264 else:
2265 pass
2265 pass
2266
2266
2267 def lookup(self, key):
2267 def lookup(self, key):
2268 node = scmutil.revsymbol(self, key).node()
2268 node = scmutil.revsymbol(self, key).node()
2269 if node is None:
2269 if node is None:
2270 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2270 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2271 return node
2271 return node
2272
2272
2273 def lookupbranch(self, key):
2273 def lookupbranch(self, key):
2274 if self.branchmap().hasbranch(key):
2274 if self.branchmap().hasbranch(key):
2275 return key
2275 return key
2276
2276
2277 return scmutil.revsymbol(self, key).branch()
2277 return scmutil.revsymbol(self, key).branch()
2278
2278
2279 def known(self, nodes):
2279 def known(self, nodes):
2280 cl = self.changelog
2280 cl = self.changelog
2281 get_rev = cl.index.get_rev
2281 get_rev = cl.index.get_rev
2282 filtered = cl.filteredrevs
2282 filtered = cl.filteredrevs
2283 result = []
2283 result = []
2284 for n in nodes:
2284 for n in nodes:
2285 r = get_rev(n)
2285 r = get_rev(n)
2286 resp = not (r is None or r in filtered)
2286 resp = not (r is None or r in filtered)
2287 result.append(resp)
2287 result.append(resp)
2288 return result
2288 return result
2289
2289
2290 def local(self):
2290 def local(self):
2291 return self
2291 return self
2292
2292
2293 def publishing(self):
2293 def publishing(self):
2294 # it's safe (and desirable) to trust the publish flag unconditionally
2294 # it's safe (and desirable) to trust the publish flag unconditionally
2295 # so that we don't finalize changes shared between users via ssh or nfs
2295 # so that we don't finalize changes shared between users via ssh or nfs
2296 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2296 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2297
2297
2298 def cancopy(self):
2298 def cancopy(self):
2299 # so statichttprepo's override of local() works
2299 # so statichttprepo's override of local() works
2300 if not self.local():
2300 if not self.local():
2301 return False
2301 return False
2302 if not self.publishing():
2302 if not self.publishing():
2303 return True
2303 return True
2304 # if publishing we can't copy if there is filtered content
2304 # if publishing we can't copy if there is filtered content
2305 return not self.filtered(b'visible').changelog.filteredrevs
2305 return not self.filtered(b'visible').changelog.filteredrevs
2306
2306
2307 def shared(self):
2307 def shared(self):
2308 '''the type of shared repository (None if not shared)'''
2308 '''the type of shared repository (None if not shared)'''
2309 if self.sharedpath != self.path:
2309 if self.sharedpath != self.path:
2310 return b'store'
2310 return b'store'
2311 return None
2311 return None
2312
2312
2313 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2313 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2314 return self.vfs.reljoin(self.root, f, *insidef)
2314 return self.vfs.reljoin(self.root, f, *insidef)
2315
2315
2316 def setparents(self, p1, p2=None):
2316 def setparents(self, p1, p2=None):
2317 if p2 is None:
2317 if p2 is None:
2318 p2 = self.nullid
2318 p2 = self.nullid
2319 self[None].setparents(p1, p2)
2319 self[None].setparents(p1, p2)
2320 self._quick_access_changeid_invalidate()
2320 self._quick_access_changeid_invalidate()
2321
2321
2322 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2322 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2323 """changeid must be a changeset revision, if specified.
2323 """changeid must be a changeset revision, if specified.
2324 fileid can be a file revision or node."""
2324 fileid can be a file revision or node."""
2325 return context.filectx(
2325 return context.filectx(
2326 self, path, changeid, fileid, changectx=changectx
2326 self, path, changeid, fileid, changectx=changectx
2327 )
2327 )
2328
2328
2329 def getcwd(self) -> bytes:
2329 def getcwd(self) -> bytes:
2330 return self.dirstate.getcwd()
2330 return self.dirstate.getcwd()
2331
2331
2332 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2332 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2333 return self.dirstate.pathto(f, cwd)
2333 return self.dirstate.pathto(f, cwd)
2334
2334
2335 def _loadfilter(self, filter):
2335 def _loadfilter(self, filter):
2336 if filter not in self._filterpats:
2336 if filter not in self._filterpats:
2337 l = []
2337 l = []
2338 for pat, cmd in self.ui.configitems(filter):
2338 for pat, cmd in self.ui.configitems(filter):
2339 if cmd == b'!':
2339 if cmd == b'!':
2340 continue
2340 continue
2341 mf = matchmod.match(self.root, b'', [pat])
2341 mf = matchmod.match(self.root, b'', [pat])
2342 fn = None
2342 fn = None
2343 params = cmd
2343 params = cmd
2344 for name, filterfn in self._datafilters.items():
2344 for name, filterfn in self._datafilters.items():
2345 if cmd.startswith(name):
2345 if cmd.startswith(name):
2346 fn = filterfn
2346 fn = filterfn
2347 params = cmd[len(name) :].lstrip()
2347 params = cmd[len(name) :].lstrip()
2348 break
2348 break
2349 if not fn:
2349 if not fn:
2350 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2350 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2351 fn.__name__ = 'commandfilter'
2351 fn.__name__ = 'commandfilter'
2352 # Wrap old filters not supporting keyword arguments
2352 # Wrap old filters not supporting keyword arguments
2353 if not pycompat.getargspec(fn)[2]:
2353 if not pycompat.getargspec(fn)[2]:
2354 oldfn = fn
2354 oldfn = fn
2355 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2355 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2356 fn.__name__ = 'compat-' + oldfn.__name__
2356 fn.__name__ = 'compat-' + oldfn.__name__
2357 l.append((mf, fn, params))
2357 l.append((mf, fn, params))
2358 self._filterpats[filter] = l
2358 self._filterpats[filter] = l
2359 return self._filterpats[filter]
2359 return self._filterpats[filter]
2360
2360
2361 def _filter(self, filterpats, filename, data):
2361 def _filter(self, filterpats, filename, data):
2362 for mf, fn, cmd in filterpats:
2362 for mf, fn, cmd in filterpats:
2363 if mf(filename):
2363 if mf(filename):
2364 self.ui.debug(
2364 self.ui.debug(
2365 b"filtering %s through %s\n"
2365 b"filtering %s through %s\n"
2366 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2366 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2367 )
2367 )
2368 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2368 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2369 break
2369 break
2370
2370
2371 return data
2371 return data
2372
2372
2373 @unfilteredpropertycache
2373 @unfilteredpropertycache
2374 def _encodefilterpats(self):
2374 def _encodefilterpats(self):
2375 return self._loadfilter(b'encode')
2375 return self._loadfilter(b'encode')
2376
2376
2377 @unfilteredpropertycache
2377 @unfilteredpropertycache
2378 def _decodefilterpats(self):
2378 def _decodefilterpats(self):
2379 return self._loadfilter(b'decode')
2379 return self._loadfilter(b'decode')
2380
2380
2381 def adddatafilter(self, name, filter):
2381 def adddatafilter(self, name, filter):
2382 self._datafilters[name] = filter
2382 self._datafilters[name] = filter
2383
2383
2384 def wread(self, filename: bytes) -> bytes:
2384 def wread(self, filename: bytes) -> bytes:
2385 if self.wvfs.islink(filename):
2385 if self.wvfs.islink(filename):
2386 data = self.wvfs.readlink(filename)
2386 data = self.wvfs.readlink(filename)
2387 else:
2387 else:
2388 data = self.wvfs.read(filename)
2388 data = self.wvfs.read(filename)
2389 return self._filter(self._encodefilterpats, filename, data)
2389 return self._filter(self._encodefilterpats, filename, data)
2390
2390
2391 def wwrite(
2391 def wwrite(
2392 self,
2392 self,
2393 filename: bytes,
2393 filename: bytes,
2394 data: bytes,
2394 data: bytes,
2395 flags: bytes,
2395 flags: bytes,
2396 backgroundclose=False,
2396 backgroundclose=False,
2397 **kwargs,
2397 **kwargs,
2398 ) -> int:
2398 ) -> int:
2399 """write ``data`` into ``filename`` in the working directory
2399 """write ``data`` into ``filename`` in the working directory
2400
2400
2401 This returns length of written (maybe decoded) data.
2401 This returns length of written (maybe decoded) data.
2402 """
2402 """
2403 data = self._filter(self._decodefilterpats, filename, data)
2403 data = self._filter(self._decodefilterpats, filename, data)
2404 if b'l' in flags:
2404 if b'l' in flags:
2405 self.wvfs.symlink(data, filename)
2405 self.wvfs.symlink(data, filename)
2406 else:
2406 else:
2407 self.wvfs.write(
2407 self.wvfs.write(
2408 filename, data, backgroundclose=backgroundclose, **kwargs
2408 filename, data, backgroundclose=backgroundclose, **kwargs
2409 )
2409 )
2410 if b'x' in flags:
2410 if b'x' in flags:
2411 self.wvfs.setflags(filename, False, True)
2411 self.wvfs.setflags(filename, False, True)
2412 else:
2412 else:
2413 self.wvfs.setflags(filename, False, False)
2413 self.wvfs.setflags(filename, False, False)
2414 return len(data)
2414 return len(data)
2415
2415
2416 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2416 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2417 return self._filter(self._decodefilterpats, filename, data)
2417 return self._filter(self._decodefilterpats, filename, data)
2418
2418
2419 def currenttransaction(self):
2419 def currenttransaction(self):
2420 """return the current transaction or None if non exists"""
2420 """return the current transaction or None if non exists"""
2421 if self._transref:
2421 if self._transref:
2422 tr = self._transref()
2422 tr = self._transref()
2423 else:
2423 else:
2424 tr = None
2424 tr = None
2425
2425
2426 if tr and tr.running():
2426 if tr and tr.running():
2427 return tr
2427 return tr
2428 return None
2428 return None
2429
2429
2430 def transaction(self, desc, report=None):
2430 def transaction(self, desc, report=None):
2431 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2431 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2432 b'devel', b'check-locks'
2432 b'devel', b'check-locks'
2433 ):
2433 ):
2434 if self._currentlock(self._lockref) is None:
2434 if self._currentlock(self._lockref) is None:
2435 raise error.ProgrammingError(b'transaction requires locking')
2435 raise error.ProgrammingError(b'transaction requires locking')
2436 tr = self.currenttransaction()
2436 tr = self.currenttransaction()
2437 if tr is not None:
2437 if tr is not None:
2438 return tr.nest(name=desc)
2438 return tr.nest(name=desc)
2439
2439
2440 # abort here if the journal already exists
2440 # abort here if the journal already exists
2441 if self.svfs.exists(b"journal"):
2441 if self.svfs.exists(b"journal"):
2442 raise error.RepoError(
2442 raise error.RepoError(
2443 _(b"abandoned transaction found"),
2443 _(b"abandoned transaction found"),
2444 hint=_(b"run 'hg recover' to clean up transaction"),
2444 hint=_(b"run 'hg recover' to clean up transaction"),
2445 )
2445 )
2446
2446
2447 # At that point your dirstate should be clean:
2447 # At that point your dirstate should be clean:
2448 #
2448 #
2449 # - If you don't have the wlock, why would you still have a dirty
2449 # - If you don't have the wlock, why would you still have a dirty
2450 # dirstate ?
2450 # dirstate ?
2451 #
2451 #
2452 # - If you hold the wlock, you should not be opening a transaction in
2452 # - If you hold the wlock, you should not be opening a transaction in
2453 # the middle of a `distate.changing_*` block. The transaction needs to
2453 # the middle of a `distate.changing_*` block. The transaction needs to
2454 # be open before that and wrap the change-context.
2454 # be open before that and wrap the change-context.
2455 #
2455 #
2456 # - If you are not within a `dirstate.changing_*` context, why is our
2456 # - If you are not within a `dirstate.changing_*` context, why is our
2457 # dirstate dirty?
2457 # dirstate dirty?
2458 if self.dirstate._dirty:
2458 if self.dirstate._dirty:
2459 m = "cannot open a transaction with a dirty dirstate"
2459 m = "cannot open a transaction with a dirty dirstate"
2460 raise error.ProgrammingError(m)
2460 raise error.ProgrammingError(m)
2461
2461
2462 idbase = b"%.40f#%f" % (random.random(), time.time())
2462 idbase = b"%.40f#%f" % (random.random(), time.time())
2463 ha = hex(hashutil.sha1(idbase).digest())
2463 ha = hex(hashutil.sha1(idbase).digest())
2464 txnid = b'TXN:' + ha
2464 txnid = b'TXN:' + ha
2465 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2465 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2466
2466
2467 self._writejournal(desc)
2467 self._writejournal(desc)
2468 if report:
2468 if report:
2469 rp = report
2469 rp = report
2470 else:
2470 else:
2471 rp = self.ui.warn
2471 rp = self.ui.warn
2472 vfsmap = self.vfs_map
2472 vfsmap = self.vfs_map
2473 # we must avoid cyclic reference between repo and transaction.
2473 # we must avoid cyclic reference between repo and transaction.
2474 reporef = weakref.ref(self)
2474 reporef = weakref.ref(self)
2475 # Code to track tag movement
2475 # Code to track tag movement
2476 #
2476 #
2477 # Since tags are all handled as file content, it is actually quite hard
2477 # Since tags are all handled as file content, it is actually quite hard
2478 # to track these movement from a code perspective. So we fallback to a
2478 # to track these movement from a code perspective. So we fallback to a
2479 # tracking at the repository level. One could envision to track changes
2479 # tracking at the repository level. One could envision to track changes
2480 # to the '.hgtags' file through changegroup apply but that fails to
2480 # to the '.hgtags' file through changegroup apply but that fails to
2481 # cope with case where transaction expose new heads without changegroup
2481 # cope with case where transaction expose new heads without changegroup
2482 # being involved (eg: phase movement).
2482 # being involved (eg: phase movement).
2483 #
2483 #
2484 # For now, We gate the feature behind a flag since this likely comes
2484 # For now, We gate the feature behind a flag since this likely comes
2485 # with performance impacts. The current code run more often than needed
2485 # with performance impacts. The current code run more often than needed
2486 # and do not use caches as much as it could. The current focus is on
2486 # and do not use caches as much as it could. The current focus is on
2487 # the behavior of the feature so we disable it by default. The flag
2487 # the behavior of the feature so we disable it by default. The flag
2488 # will be removed when we are happy with the performance impact.
2488 # will be removed when we are happy with the performance impact.
2489 #
2489 #
2490 # Once this feature is no longer experimental move the following
2490 # Once this feature is no longer experimental move the following
2491 # documentation to the appropriate help section:
2491 # documentation to the appropriate help section:
2492 #
2492 #
2493 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2493 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2494 # tags (new or changed or deleted tags). In addition the details of
2494 # tags (new or changed or deleted tags). In addition the details of
2495 # these changes are made available in a file at:
2495 # these changes are made available in a file at:
2496 # ``REPOROOT/.hg/changes/tags.changes``.
2496 # ``REPOROOT/.hg/changes/tags.changes``.
2497 # Make sure you check for HG_TAG_MOVED before reading that file as it
2497 # Make sure you check for HG_TAG_MOVED before reading that file as it
2498 # might exist from a previous transaction even if no tag were touched
2498 # might exist from a previous transaction even if no tag were touched
2499 # in this one. Changes are recorded in a line base format::
2499 # in this one. Changes are recorded in a line base format::
2500 #
2500 #
2501 # <action> <hex-node> <tag-name>\n
2501 # <action> <hex-node> <tag-name>\n
2502 #
2502 #
2503 # Actions are defined as follow:
2503 # Actions are defined as follow:
2504 # "-R": tag is removed,
2504 # "-R": tag is removed,
2505 # "+A": tag is added,
2505 # "+A": tag is added,
2506 # "-M": tag is moved (old value),
2506 # "-M": tag is moved (old value),
2507 # "+M": tag is moved (new value),
2507 # "+M": tag is moved (new value),
2508 tracktags = lambda x: None
2508 tracktags = lambda x: None
2509 # experimental config: experimental.hook-track-tags
2509 # experimental config: experimental.hook-track-tags
2510 shouldtracktags = self.ui.configbool(
2510 shouldtracktags = self.ui.configbool(
2511 b'experimental', b'hook-track-tags'
2511 b'experimental', b'hook-track-tags'
2512 )
2512 )
2513 if desc != b'strip' and shouldtracktags:
2513 if desc != b'strip' and shouldtracktags:
2514 oldheads = self.changelog.headrevs()
2514 oldheads = self.changelog.headrevs()
2515
2515
2516 def tracktags(tr2):
2516 def tracktags(tr2):
2517 repo = reporef()
2517 repo = reporef()
2518 assert repo is not None # help pytype
2518 assert repo is not None # help pytype
2519 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2519 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2520 newheads = repo.changelog.headrevs()
2520 newheads = repo.changelog.headrevs()
2521 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2521 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2522 # notes: we compare lists here.
2522 # notes: we compare lists here.
2523 # As we do it only once buiding set would not be cheaper
2523 # As we do it only once buiding set would not be cheaper
2524 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2524 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2525 if changes:
2525 if changes:
2526 tr2.hookargs[b'tag_moved'] = b'1'
2526 tr2.hookargs[b'tag_moved'] = b'1'
2527 with repo.vfs(
2527 with repo.vfs(
2528 b'changes/tags.changes', b'w', atomictemp=True
2528 b'changes/tags.changes', b'w', atomictemp=True
2529 ) as changesfile:
2529 ) as changesfile:
2530 # note: we do not register the file to the transaction
2530 # note: we do not register the file to the transaction
2531 # because we needs it to still exist on the transaction
2531 # because we needs it to still exist on the transaction
2532 # is close (for txnclose hooks)
2532 # is close (for txnclose hooks)
2533 tagsmod.writediff(changesfile, changes)
2533 tagsmod.writediff(changesfile, changes)
2534
2534
2535 def validate(tr2):
2535 def validate(tr2):
2536 """will run pre-closing hooks"""
2536 """will run pre-closing hooks"""
2537 # XXX the transaction API is a bit lacking here so we take a hacky
2537 # XXX the transaction API is a bit lacking here so we take a hacky
2538 # path for now
2538 # path for now
2539 #
2539 #
2540 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2540 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2541 # dict is copied before these run. In addition we needs the data
2541 # dict is copied before these run. In addition we needs the data
2542 # available to in memory hooks too.
2542 # available to in memory hooks too.
2543 #
2543 #
2544 # Moreover, we also need to make sure this runs before txnclose
2544 # Moreover, we also need to make sure this runs before txnclose
2545 # hooks and there is no "pending" mechanism that would execute
2545 # hooks and there is no "pending" mechanism that would execute
2546 # logic only if hooks are about to run.
2546 # logic only if hooks are about to run.
2547 #
2547 #
2548 # Fixing this limitation of the transaction is also needed to track
2548 # Fixing this limitation of the transaction is also needed to track
2549 # other families of changes (bookmarks, phases, obsolescence).
2549 # other families of changes (bookmarks, phases, obsolescence).
2550 #
2550 #
2551 # This will have to be fixed before we remove the experimental
2551 # This will have to be fixed before we remove the experimental
2552 # gating.
2552 # gating.
2553 tracktags(tr2)
2553 tracktags(tr2)
2554 repo = reporef()
2554 repo = reporef()
2555 assert repo is not None # help pytype
2555 assert repo is not None # help pytype
2556
2556
2557 singleheadopt = (b'experimental', b'single-head-per-branch')
2557 singleheadopt = (b'experimental', b'single-head-per-branch')
2558 singlehead = repo.ui.configbool(*singleheadopt)
2558 singlehead = repo.ui.configbool(*singleheadopt)
2559 if singlehead:
2559 if singlehead:
2560 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2560 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2561 accountclosed = singleheadsub.get(
2561 accountclosed = singleheadsub.get(
2562 b"account-closed-heads", False
2562 b"account-closed-heads", False
2563 )
2563 )
2564 if singleheadsub.get(b"public-changes-only", False):
2564 if singleheadsub.get(b"public-changes-only", False):
2565 filtername = b"immutable"
2565 filtername = b"immutable"
2566 else:
2566 else:
2567 filtername = b"visible"
2567 filtername = b"visible"
2568 scmutil.enforcesinglehead(
2568 scmutil.enforcesinglehead(
2569 repo, tr2, desc, accountclosed, filtername
2569 repo, tr2, desc, accountclosed, filtername
2570 )
2570 )
2571 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2571 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2572 for name, (old, new) in sorted(
2572 for name, (old, new) in sorted(
2573 tr.changes[b'bookmarks'].items()
2573 tr.changes[b'bookmarks'].items()
2574 ):
2574 ):
2575 args = tr.hookargs.copy()
2575 args = tr.hookargs.copy()
2576 args.update(bookmarks.preparehookargs(name, old, new))
2576 args.update(bookmarks.preparehookargs(name, old, new))
2577 repo.hook(
2577 repo.hook(
2578 b'pretxnclose-bookmark',
2578 b'pretxnclose-bookmark',
2579 throw=True,
2579 throw=True,
2580 **pycompat.strkwargs(args),
2580 **pycompat.strkwargs(args),
2581 )
2581 )
2582 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2582 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2583 cl = repo.unfiltered().changelog
2583 cl = repo.unfiltered().changelog
2584 for revs, (old, new) in tr.changes[b'phases']:
2584 for revs, (old, new) in tr.changes[b'phases']:
2585 for rev in revs:
2585 for rev in revs:
2586 args = tr.hookargs.copy()
2586 args = tr.hookargs.copy()
2587 node = hex(cl.node(rev))
2587 node = hex(cl.node(rev))
2588 args.update(phases.preparehookargs(node, old, new))
2588 args.update(phases.preparehookargs(node, old, new))
2589 repo.hook(
2589 repo.hook(
2590 b'pretxnclose-phase',
2590 b'pretxnclose-phase',
2591 throw=True,
2591 throw=True,
2592 **pycompat.strkwargs(args),
2592 **pycompat.strkwargs(args),
2593 )
2593 )
2594
2594
2595 repo.hook(
2595 repo.hook(
2596 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2596 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2597 )
2597 )
2598
2598
2599 def releasefn(tr, success):
2599 def releasefn(tr, success):
2600 repo = reporef()
2600 repo = reporef()
2601 if repo is None:
2601 if repo is None:
2602 # If the repo has been GC'd (and this release function is being
2602 # If the repo has been GC'd (and this release function is being
2603 # called from transaction.__del__), there's not much we can do,
2603 # called from transaction.__del__), there's not much we can do,
2604 # so just leave the unfinished transaction there and let the
2604 # so just leave the unfinished transaction there and let the
2605 # user run `hg recover`.
2605 # user run `hg recover`.
2606 return
2606 return
2607 if success:
2607 if success:
2608 # this should be explicitly invoked here, because
2608 # this should be explicitly invoked here, because
2609 # in-memory changes aren't written out at closing
2609 # in-memory changes aren't written out at closing
2610 # transaction, if tr.addfilegenerator (via
2610 # transaction, if tr.addfilegenerator (via
2611 # dirstate.write or so) isn't invoked while
2611 # dirstate.write or so) isn't invoked while
2612 # transaction running
2612 # transaction running
2613 repo.dirstate.write(None)
2613 repo.dirstate.write(None)
2614 else:
2614 else:
2615 # discard all changes (including ones already written
2615 # discard all changes (including ones already written
2616 # out) in this transaction
2616 # out) in this transaction
2617 repo.invalidate(clearfilecache=True)
2617 repo.invalidate(clearfilecache=True)
2618
2618
2619 tr = transaction.transaction(
2619 tr = transaction.transaction(
2620 rp,
2620 rp,
2621 self.svfs,
2621 self.svfs,
2622 vfsmap,
2622 vfsmap,
2623 b"journal",
2623 b"journal",
2624 b"undo",
2624 b"undo",
2625 lambda: None,
2625 lambda: None,
2626 self.store.createmode,
2626 self.store.createmode,
2627 validator=validate,
2627 validator=validate,
2628 releasefn=releasefn,
2628 releasefn=releasefn,
2629 checkambigfiles=_cachedfiles,
2629 checkambigfiles=_cachedfiles,
2630 name=desc,
2630 name=desc,
2631 )
2631 )
2632 for vfs_id, path in self._journalfiles():
2632 for vfs_id, path in self._journalfiles():
2633 tr.add_journal(vfs_id, path)
2633 tr.add_journal(vfs_id, path)
2634 tr.changes[b'origrepolen'] = len(self)
2634 tr.changes[b'origrepolen'] = len(self)
2635 tr.changes[b'obsmarkers'] = set()
2635 tr.changes[b'obsmarkers'] = set()
2636 tr.changes[b'phases'] = []
2636 tr.changes[b'phases'] = []
2637 tr.changes[b'bookmarks'] = {}
2637 tr.changes[b'bookmarks'] = {}
2638
2638
2639 tr.hookargs[b'txnid'] = txnid
2639 tr.hookargs[b'txnid'] = txnid
2640 tr.hookargs[b'txnname'] = desc
2640 tr.hookargs[b'txnname'] = desc
2641 tr.hookargs[b'changes'] = tr.changes
2641 tr.hookargs[b'changes'] = tr.changes
2642 # note: writing the fncache only during finalize mean that the file is
2642 # note: writing the fncache only during finalize mean that the file is
2643 # outdated when running hooks. As fncache is used for streaming clone,
2643 # outdated when running hooks. As fncache is used for streaming clone,
2644 # this is not expected to break anything that happen during the hooks.
2644 # this is not expected to break anything that happen during the hooks.
2645 tr.addfinalize(b'flush-fncache', self.store.write)
2645 tr.addfinalize(b'flush-fncache', self.store.write)
2646
2646
2647 def txnclosehook(tr2):
2647 def txnclosehook(tr2):
2648 """To be run if transaction is successful, will schedule a hook run"""
2648 """To be run if transaction is successful, will schedule a hook run"""
2649 # Don't reference tr2 in hook() so we don't hold a reference.
2649 # Don't reference tr2 in hook() so we don't hold a reference.
2650 # This reduces memory consumption when there are multiple
2650 # This reduces memory consumption when there are multiple
2651 # transactions per lock. This can likely go away if issue5045
2651 # transactions per lock. This can likely go away if issue5045
2652 # fixes the function accumulation.
2652 # fixes the function accumulation.
2653 hookargs = tr2.hookargs
2653 hookargs = tr2.hookargs
2654
2654
2655 def hookfunc(unused_success):
2655 def hookfunc(unused_success):
2656 repo = reporef()
2656 repo = reporef()
2657 assert repo is not None # help pytype
2657 assert repo is not None # help pytype
2658
2658
2659 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2659 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2660 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2660 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2661 for name, (old, new) in bmchanges:
2661 for name, (old, new) in bmchanges:
2662 args = tr.hookargs.copy()
2662 args = tr.hookargs.copy()
2663 args.update(bookmarks.preparehookargs(name, old, new))
2663 args.update(bookmarks.preparehookargs(name, old, new))
2664 repo.hook(
2664 repo.hook(
2665 b'txnclose-bookmark',
2665 b'txnclose-bookmark',
2666 throw=False,
2666 throw=False,
2667 **pycompat.strkwargs(args),
2667 **pycompat.strkwargs(args),
2668 )
2668 )
2669
2669
2670 if hook.hashook(repo.ui, b'txnclose-phase'):
2670 if hook.hashook(repo.ui, b'txnclose-phase'):
2671 cl = repo.unfiltered().changelog
2671 cl = repo.unfiltered().changelog
2672 phasemv = sorted(
2672 phasemv = sorted(
2673 tr.changes[b'phases'], key=lambda r: r[0][0]
2673 tr.changes[b'phases'], key=lambda r: r[0][0]
2674 )
2674 )
2675 for revs, (old, new) in phasemv:
2675 for revs, (old, new) in phasemv:
2676 for rev in revs:
2676 for rev in revs:
2677 args = tr.hookargs.copy()
2677 args = tr.hookargs.copy()
2678 node = hex(cl.node(rev))
2678 node = hex(cl.node(rev))
2679 args.update(phases.preparehookargs(node, old, new))
2679 args.update(phases.preparehookargs(node, old, new))
2680 repo.hook(
2680 repo.hook(
2681 b'txnclose-phase',
2681 b'txnclose-phase',
2682 throw=False,
2682 throw=False,
2683 **pycompat.strkwargs(args),
2683 **pycompat.strkwargs(args),
2684 )
2684 )
2685
2685
2686 repo.hook(
2686 repo.hook(
2687 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2687 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2688 )
2688 )
2689
2689
2690 repo = reporef()
2690 repo = reporef()
2691 assert repo is not None # help pytype
2691 assert repo is not None # help pytype
2692 repo._afterlock(hookfunc)
2692 repo._afterlock(hookfunc)
2693
2693
2694 tr.addfinalize(b'txnclose-hook', txnclosehook)
2694 tr.addfinalize(b'txnclose-hook', txnclosehook)
2695 # Include a leading "-" to make it happen before the transaction summary
2695 # Include a leading "-" to make it happen before the transaction summary
2696 # reports registered via scmutil.registersummarycallback() whose names
2696 # reports registered via scmutil.registersummarycallback() whose names
2697 # are 00-txnreport etc. That way, the caches will be warm when the
2697 # are 00-txnreport etc. That way, the caches will be warm when the
2698 # callbacks run.
2698 # callbacks run.
2699 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2699 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2700
2700
2701 def txnaborthook(tr2):
2701 def txnaborthook(tr2):
2702 """To be run if transaction is aborted"""
2702 """To be run if transaction is aborted"""
2703 repo = reporef()
2703 repo = reporef()
2704 assert repo is not None # help pytype
2704 assert repo is not None # help pytype
2705 repo.hook(
2705 repo.hook(
2706 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2706 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2707 )
2707 )
2708
2708
2709 tr.addabort(b'txnabort-hook', txnaborthook)
2709 tr.addabort(b'txnabort-hook', txnaborthook)
2710 # avoid eager cache invalidation. in-memory data should be identical
2710 # avoid eager cache invalidation. in-memory data should be identical
2711 # to stored data if transaction has no error.
2711 # to stored data if transaction has no error.
2712 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2712 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2713 self._transref = weakref.ref(tr)
2713 self._transref = weakref.ref(tr)
2714 scmutil.registersummarycallback(self, tr, desc)
2714 scmutil.registersummarycallback(self, tr, desc)
2715 # This only exist to deal with the need of rollback to have viable
2715 # This only exist to deal with the need of rollback to have viable
2716 # parents at the end of the operation. So backup viable parents at the
2716 # parents at the end of the operation. So backup viable parents at the
2717 # time of this operation.
2717 # time of this operation.
2718 #
2718 #
2719 # We only do it when the `wlock` is taken, otherwise other might be
2719 # We only do it when the `wlock` is taken, otherwise other might be
2720 # altering the dirstate under us.
2720 # altering the dirstate under us.
2721 #
2721 #
2722 # This is really not a great way to do this (first, because we cannot
2722 # This is really not a great way to do this (first, because we cannot
2723 # always do it). There are more viable alternative that exists
2723 # always do it). There are more viable alternative that exists
2724 #
2724 #
2725 # - backing only the working copy parent in a dedicated files and doing
2725 # - backing only the working copy parent in a dedicated files and doing
2726 # a clean "keep-update" to them on `hg rollback`.
2726 # a clean "keep-update" to them on `hg rollback`.
2727 #
2727 #
2728 # - slightly changing the behavior an applying a logic similar to "hg
2728 # - slightly changing the behavior an applying a logic similar to "hg
2729 # strip" to pick a working copy destination on `hg rollback`
2729 # strip" to pick a working copy destination on `hg rollback`
2730 if self.currentwlock() is not None:
2730 if self.currentwlock() is not None:
2731 ds = self.dirstate
2731 ds = self.dirstate
2732 if not self.vfs.exists(b'branch'):
2732 if not self.vfs.exists(b'branch'):
2733 # force a file to be written if None exist
2733 # force a file to be written if None exist
2734 ds.setbranch(b'default', None)
2734 ds.setbranch(b'default', None)
2735
2735
2736 def backup_dirstate(tr):
2736 def backup_dirstate(tr):
2737 for f in ds.all_file_names():
2737 for f in ds.all_file_names():
2738 # hardlink backup is okay because `dirstate` is always
2738 # hardlink backup is okay because `dirstate` is always
2739 # atomically written and possible data file are append only
2739 # atomically written and possible data file are append only
2740 # and resistant to trailing data.
2740 # and resistant to trailing data.
2741 tr.addbackup(f, hardlink=True, location=b'plain')
2741 tr.addbackup(f, hardlink=True, location=b'plain')
2742
2742
2743 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2743 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2744 return tr
2744 return tr
2745
2745
2746 def _journalfiles(self):
2746 def _journalfiles(self):
2747 return (
2747 return (
2748 (self.svfs, b'journal'),
2748 (self.svfs, b'journal'),
2749 (self.vfs, b'journal.desc'),
2749 (self.vfs, b'journal.desc'),
2750 )
2750 )
2751
2751
2752 def undofiles(self):
2752 def undofiles(self):
2753 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2753 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2754
2754
2755 @unfilteredmethod
2755 @unfilteredmethod
2756 def _writejournal(self, desc):
2756 def _writejournal(self, desc):
2757 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2757 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2758
2758
2759 def recover(self):
2759 def recover(self):
2760 with self.lock():
2760 with self.lock():
2761 if self.svfs.exists(b"journal"):
2761 if self.svfs.exists(b"journal"):
2762 self.ui.status(_(b"rolling back interrupted transaction\n"))
2762 self.ui.status(_(b"rolling back interrupted transaction\n"))
2763 vfsmap = self.vfs_map
2763 vfsmap = self.vfs_map
2764 transaction.rollback(
2764 transaction.rollback(
2765 self.svfs,
2765 self.svfs,
2766 vfsmap,
2766 vfsmap,
2767 b"journal",
2767 b"journal",
2768 self.ui.warn,
2768 self.ui.warn,
2769 checkambigfiles=_cachedfiles,
2769 checkambigfiles=_cachedfiles,
2770 )
2770 )
2771 self.invalidate()
2771 self.invalidate()
2772 return True
2772 return True
2773 else:
2773 else:
2774 self.ui.warn(_(b"no interrupted transaction available\n"))
2774 self.ui.warn(_(b"no interrupted transaction available\n"))
2775 return False
2775 return False
2776
2776
2777 def rollback(self, dryrun=False, force=False):
2777 def rollback(self, dryrun=False, force=False):
2778 wlock = lock = None
2778 wlock = lock = None
2779 try:
2779 try:
2780 wlock = self.wlock()
2780 wlock = self.wlock()
2781 lock = self.lock()
2781 lock = self.lock()
2782 if self.svfs.exists(b"undo"):
2782 if self.svfs.exists(b"undo"):
2783 return self._rollback(dryrun, force)
2783 return self._rollback(dryrun, force)
2784 else:
2784 else:
2785 self.ui.warn(_(b"no rollback information available\n"))
2785 self.ui.warn(_(b"no rollback information available\n"))
2786 return 1
2786 return 1
2787 finally:
2787 finally:
2788 release(lock, wlock)
2788 release(lock, wlock)
2789
2789
2790 @unfilteredmethod # Until we get smarter cache management
2790 @unfilteredmethod # Until we get smarter cache management
2791 def _rollback(self, dryrun, force):
2791 def _rollback(self, dryrun, force):
2792 ui = self.ui
2792 ui = self.ui
2793
2793
2794 parents = self.dirstate.parents()
2794 parents = self.dirstate.parents()
2795 try:
2795 try:
2796 args = self.vfs.read(b'undo.desc').splitlines()
2796 args = self.vfs.read(b'undo.desc').splitlines()
2797 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2797 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2798 if len(args) >= 3:
2798 if len(args) >= 3:
2799 detail = args[2]
2799 detail = args[2]
2800 oldtip = oldlen - 1
2800 oldtip = oldlen - 1
2801
2801
2802 if detail and ui.verbose:
2802 if detail and ui.verbose:
2803 msg = _(
2803 msg = _(
2804 b'repository tip rolled back to revision %d'
2804 b'repository tip rolled back to revision %d'
2805 b' (undo %s: %s)\n'
2805 b' (undo %s: %s)\n'
2806 ) % (oldtip, desc, detail)
2806 ) % (oldtip, desc, detail)
2807 else:
2807 else:
2808 msg = _(
2808 msg = _(
2809 b'repository tip rolled back to revision %d (undo %s)\n'
2809 b'repository tip rolled back to revision %d (undo %s)\n'
2810 ) % (oldtip, desc)
2810 ) % (oldtip, desc)
2811 parentgone = any(self[p].rev() > oldtip for p in parents)
2811 parentgone = any(self[p].rev() > oldtip for p in parents)
2812 except IOError:
2812 except IOError:
2813 msg = _(b'rolling back unknown transaction\n')
2813 msg = _(b'rolling back unknown transaction\n')
2814 desc = None
2814 desc = None
2815 parentgone = True
2815 parentgone = True
2816
2816
2817 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2817 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2818 raise error.Abort(
2818 raise error.Abort(
2819 _(
2819 _(
2820 b'rollback of last commit while not checked out '
2820 b'rollback of last commit while not checked out '
2821 b'may lose data'
2821 b'may lose data'
2822 ),
2822 ),
2823 hint=_(b'use -f to force'),
2823 hint=_(b'use -f to force'),
2824 )
2824 )
2825
2825
2826 ui.status(msg)
2826 ui.status(msg)
2827 if dryrun:
2827 if dryrun:
2828 return 0
2828 return 0
2829
2829
2830 self.destroying()
2830 self.destroying()
2831 vfsmap = self.vfs_map
2831 vfsmap = self.vfs_map
2832 skip_journal_pattern = None
2832 skip_journal_pattern = None
2833 if not parentgone:
2833 if not parentgone:
2834 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2834 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2835 transaction.rollback(
2835 transaction.rollback(
2836 self.svfs,
2836 self.svfs,
2837 vfsmap,
2837 vfsmap,
2838 b'undo',
2838 b'undo',
2839 ui.warn,
2839 ui.warn,
2840 checkambigfiles=_cachedfiles,
2840 checkambigfiles=_cachedfiles,
2841 skip_journal_pattern=skip_journal_pattern,
2841 skip_journal_pattern=skip_journal_pattern,
2842 )
2842 )
2843 self.invalidate()
2843 self.invalidate()
2844 self.dirstate.invalidate()
2844 self.dirstate.invalidate()
2845
2845
2846 if parentgone:
2846 if parentgone:
2847 # replace this with some explicit parent update in the future.
2847 # replace this with some explicit parent update in the future.
2848 has_node = self.changelog.index.has_node
2848 has_node = self.changelog.index.has_node
2849 if not all(has_node(p) for p in self.dirstate._pl):
2849 if not all(has_node(p) for p in self.dirstate._pl):
2850 # There was no dirstate to backup initially, we need to drop
2850 # There was no dirstate to backup initially, we need to drop
2851 # the existing one.
2851 # the existing one.
2852 with self.dirstate.changing_parents(self):
2852 with self.dirstate.changing_parents(self):
2853 self.dirstate.setparents(self.nullid)
2853 self.dirstate.setparents(self.nullid)
2854 self.dirstate.clear()
2854 self.dirstate.clear()
2855
2855
2856 parents = tuple([p.rev() for p in self[None].parents()])
2856 parents = tuple([p.rev() for p in self[None].parents()])
2857 if len(parents) > 1:
2857 if len(parents) > 1:
2858 ui.status(
2858 ui.status(
2859 _(
2859 _(
2860 b'working directory now based on '
2860 b'working directory now based on '
2861 b'revisions %d and %d\n'
2861 b'revisions %d and %d\n'
2862 )
2862 )
2863 % parents
2863 % parents
2864 )
2864 )
2865 else:
2865 else:
2866 ui.status(
2866 ui.status(
2867 _(b'working directory now based on revision %d\n') % parents
2867 _(b'working directory now based on revision %d\n') % parents
2868 )
2868 )
2869 mergestatemod.mergestate.clean(self)
2869 mergestatemod.mergestate.clean(self)
2870
2870
2871 # TODO: if we know which new heads may result from this rollback, pass
2871 # TODO: if we know which new heads may result from this rollback, pass
2872 # them to destroy(), which will prevent the branchhead cache from being
2872 # them to destroy(), which will prevent the branchhead cache from being
2873 # invalidated.
2873 # invalidated.
2874 self.destroyed()
2874 self.destroyed()
2875 return 0
2875 return 0
2876
2876
2877 def _buildcacheupdater(self, newtransaction):
2877 def _buildcacheupdater(self, newtransaction):
2878 """called during transaction to build the callback updating cache
2878 """called during transaction to build the callback updating cache
2879
2879
2880 Lives on the repository to help extension who might want to augment
2880 Lives on the repository to help extension who might want to augment
2881 this logic. For this purpose, the created transaction is passed to the
2881 this logic. For this purpose, the created transaction is passed to the
2882 method.
2882 method.
2883 """
2883 """
2884 # we must avoid cyclic reference between repo and transaction.
2884 # we must avoid cyclic reference between repo and transaction.
2885 reporef = weakref.ref(self)
2885 reporef = weakref.ref(self)
2886
2886
2887 def updater(tr):
2887 def updater(tr):
2888 repo = reporef()
2888 repo = reporef()
2889 assert repo is not None # help pytype
2889 assert repo is not None # help pytype
2890 repo.updatecaches(tr)
2890 repo.updatecaches(tr)
2891
2891
2892 return updater
2892 return updater
2893
2893
2894 @unfilteredmethod
2894 @unfilteredmethod
2895 def updatecaches(self, tr=None, full=False, caches=None):
2895 def updatecaches(self, tr=None, full=False, caches=None):
2896 """warm appropriate caches
2896 """warm appropriate caches
2897
2897
2898 If this function is called after a transaction closed. The transaction
2898 If this function is called after a transaction closed. The transaction
2899 will be available in the 'tr' argument. This can be used to selectively
2899 will be available in the 'tr' argument. This can be used to selectively
2900 update caches relevant to the changes in that transaction.
2900 update caches relevant to the changes in that transaction.
2901
2901
2902 If 'full' is set, make sure all caches the function knows about have
2902 If 'full' is set, make sure all caches the function knows about have
2903 up-to-date data. Even the ones usually loaded more lazily.
2903 up-to-date data. Even the ones usually loaded more lazily.
2904
2904
2905 The `full` argument can take a special "post-clone" value. In this case
2905 The `full` argument can take a special "post-clone" value. In this case
2906 the cache warming is made after a clone and of the slower cache might
2906 the cache warming is made after a clone and of the slower cache might
2907 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2907 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2908 as we plan for a cleaner way to deal with this for 5.9.
2908 as we plan for a cleaner way to deal with this for 5.9.
2909 """
2909 """
2910 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2910 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2911 # During strip, many caches are invalid but
2911 # During strip, many caches are invalid but
2912 # later call to `destroyed` will refresh them.
2912 # later call to `destroyed` will refresh them.
2913 return
2913 return
2914
2914
2915 unfi = self.unfiltered()
2915 unfi = self.unfiltered()
2916
2916
2917 if caches is None:
2917 if caches is None:
2918 caches = repository.CACHES_DEFAULT
2918 caches = repository.CACHES_DEFAULT
2919
2919
2920 if repository.CACHE_BRANCHMAP_SERVED in caches:
2920 if repository.CACHE_BRANCHMAP_SERVED in caches:
2921 if tr is None or tr.changes[b'origrepolen'] < len(self):
2921 if tr is None or tr.changes[b'origrepolen'] < len(self):
2922 self.ui.debug(b'updating the branch cache\n')
2922 self.ui.debug(b'updating the branch cache\n')
2923 dpt = repository.CACHE_BRANCHMAP_DETECT_PURE_TOPO in caches
2923 dpt = repository.CACHE_BRANCHMAP_DETECT_PURE_TOPO in caches
2924 served = self.filtered(b'served')
2924 served = self.filtered(b'served')
2925 self._branchcaches.update_disk(served, detect_pure_topo=dpt)
2925 self._branchcaches.update_disk(served, detect_pure_topo=dpt)
2926 served_hidden = self.filtered(b'served.hidden')
2926 served_hidden = self.filtered(b'served.hidden')
2927 self._branchcaches.update_disk(
2927 self._branchcaches.update_disk(
2928 served_hidden, detect_pure_topo=dpt
2928 served_hidden, detect_pure_topo=dpt
2929 )
2929 )
2930
2930
2931 if repository.CACHE_CHANGELOG_CACHE in caches:
2931 if repository.CACHE_CHANGELOG_CACHE in caches:
2932 self.changelog.update_caches(transaction=tr)
2932 self.changelog.update_caches(transaction=tr)
2933
2933
2934 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2934 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2935 self.manifestlog.update_caches(transaction=tr)
2935 self.manifestlog.update_caches(transaction=tr)
2936 for entry in self.store.walk():
2936 for entry in self.store.walk():
2937 if not entry.is_revlog:
2937 if not entry.is_revlog:
2938 continue
2938 continue
2939 if not entry.is_manifestlog:
2939 if not entry.is_manifestlog:
2940 continue
2940 continue
2941 manifestrevlog = entry.get_revlog_instance(self).get_revlog()
2941 manifestrevlog = entry.get_revlog_instance(self).get_revlog()
2942 if manifestrevlog is not None:
2942 if manifestrevlog is not None:
2943 manifestrevlog.update_caches(transaction=tr)
2943 manifestrevlog.update_caches(transaction=tr)
2944
2944
2945 if repository.CACHE_REV_BRANCH in caches:
2945 if repository.CACHE_REV_BRANCH in caches:
2946 rbc = unfi.revbranchcache()
2946 rbc = unfi.revbranchcache()
2947 for r in unfi.changelog:
2947 for r in unfi.changelog:
2948 rbc.branchinfo(r)
2948 rbc.branchinfo(r)
2949 rbc.write()
2949 rbc.write()
2950
2950
2951 if repository.CACHE_FULL_MANIFEST in caches:
2951 if repository.CACHE_FULL_MANIFEST in caches:
2952 # ensure the working copy parents are in the manifestfulltextcache
2952 # ensure the working copy parents are in the manifestfulltextcache
2953 for ctx in self[b'.'].parents():
2953 for ctx in self[b'.'].parents():
2954 ctx.manifest() # accessing the manifest is enough
2954 ctx.manifest() # accessing the manifest is enough
2955
2955
2956 if repository.CACHE_FILE_NODE_TAGS in caches:
2956 if repository.CACHE_FILE_NODE_TAGS in caches:
2957 # accessing fnode cache warms the cache
2957 # accessing fnode cache warms the cache
2958 tagsmod.warm_cache(self)
2958 tagsmod.warm_cache(self)
2959
2959
2960 if repository.CACHE_TAGS_DEFAULT in caches:
2960 if repository.CACHE_TAGS_DEFAULT in caches:
2961 # accessing tags warm the cache
2961 # accessing tags warm the cache
2962 self.tags()
2962 self.tags()
2963 if repository.CACHE_TAGS_SERVED in caches:
2963 if repository.CACHE_TAGS_SERVED in caches:
2964 self.filtered(b'served').tags()
2964 self.filtered(b'served').tags()
2965
2965
2966 if repository.CACHE_BRANCHMAP_ALL in caches:
2966 if repository.CACHE_BRANCHMAP_ALL in caches:
2967 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2967 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2968 # so we're forcing a write to cause these caches to be warmed up
2968 # so we're forcing a write to cause these caches to be warmed up
2969 # even if they haven't explicitly been requested yet (if they've
2969 # even if they haven't explicitly been requested yet (if they've
2970 # never been used by hg, they won't ever have been written, even if
2970 # never been used by hg, they won't ever have been written, even if
2971 # they're a subset of another kind of cache that *has* been used).
2971 # they're a subset of another kind of cache that *has* been used).
2972 dpt = repository.CACHE_BRANCHMAP_DETECT_PURE_TOPO in caches
2972 dpt = repository.CACHE_BRANCHMAP_DETECT_PURE_TOPO in caches
2973
2973
2974 for filt in repoview.filtertable.keys():
2974 for filt in repoview.filtertable.keys():
2975 filtered = self.filtered(filt)
2975 filtered = self.filtered(filt)
2976 self._branchcaches.update_disk(filtered, detect_pure_topo=dpt)
2976 self._branchcaches.update_disk(filtered, detect_pure_topo=dpt)
2977
2977
2978 # flush all possibly delayed write.
2978 # flush all possibly delayed write.
2979 self._branchcaches.write_dirty(self)
2979 self._branchcaches.write_dirty(self)
2980
2980
2981 def invalidatecaches(self):
2981 def invalidatecaches(self):
2982 if '_tagscache' in vars(self):
2982 if '_tagscache' in vars(self):
2983 # can't use delattr on proxy
2983 # can't use delattr on proxy
2984 del self.__dict__['_tagscache']
2984 del self.__dict__['_tagscache']
2985
2985
2986 self._branchcaches.clear()
2986 self._branchcaches.clear()
2987 self.invalidatevolatilesets()
2987 self.invalidatevolatilesets()
2988 self._sparsesignaturecache.clear()
2988 self._sparsesignaturecache.clear()
2989
2989
2990 def invalidatevolatilesets(self):
2990 def invalidatevolatilesets(self):
2991 self.filteredrevcache.clear()
2991 self.filteredrevcache.clear()
2992 obsolete.clearobscaches(self)
2992 obsolete.clearobscaches(self)
2993 self._quick_access_changeid_invalidate()
2993 self._quick_access_changeid_invalidate()
2994
2994
2995 def invalidatedirstate(self):
2995 def invalidatedirstate(self):
2996 """Invalidates the dirstate, causing the next call to dirstate
2996 """Invalidates the dirstate, causing the next call to dirstate
2997 to check if it was modified since the last time it was read,
2997 to check if it was modified since the last time it was read,
2998 rereading it if it has.
2998 rereading it if it has.
2999
2999
3000 This is different to dirstate.invalidate() that it doesn't always
3000 This is different to dirstate.invalidate() that it doesn't always
3001 rereads the dirstate. Use dirstate.invalidate() if you want to
3001 rereads the dirstate. Use dirstate.invalidate() if you want to
3002 explicitly read the dirstate again (i.e. restoring it to a previous
3002 explicitly read the dirstate again (i.e. restoring it to a previous
3003 known good state)."""
3003 known good state)."""
3004 unfi = self.unfiltered()
3004 unfi = self.unfiltered()
3005 if 'dirstate' in unfi.__dict__:
3005 if 'dirstate' in unfi.__dict__:
3006 assert not self.dirstate.is_changing_any
3006 assert not self.dirstate.is_changing_any
3007 del unfi.__dict__['dirstate']
3007 del unfi.__dict__['dirstate']
3008
3008
3009 def invalidate(self, clearfilecache=False):
3009 def invalidate(self, clearfilecache=False):
3010 """Invalidates both store and non-store parts other than dirstate
3010 """Invalidates both store and non-store parts other than dirstate
3011
3011
3012 If a transaction is running, invalidation of store is omitted,
3012 If a transaction is running, invalidation of store is omitted,
3013 because discarding in-memory changes might cause inconsistency
3013 because discarding in-memory changes might cause inconsistency
3014 (e.g. incomplete fncache causes unintentional failure, but
3014 (e.g. incomplete fncache causes unintentional failure, but
3015 redundant one doesn't).
3015 redundant one doesn't).
3016 """
3016 """
3017 unfiltered = self.unfiltered() # all file caches are stored unfiltered
3017 unfiltered = self.unfiltered() # all file caches are stored unfiltered
3018 for k in list(self._filecache.keys()):
3018 for k in list(self._filecache.keys()):
3019 if (
3019 if (
3020 k == b'changelog'
3020 k == b'changelog'
3021 and self.currenttransaction()
3021 and self.currenttransaction()
3022 and self.changelog.is_delaying
3022 and self.changelog.is_delaying
3023 ):
3023 ):
3024 # The changelog object may store unwritten revisions. We don't
3024 # The changelog object may store unwritten revisions. We don't
3025 # want to lose them.
3025 # want to lose them.
3026 # TODO: Solve the problem instead of working around it.
3026 # TODO: Solve the problem instead of working around it.
3027 continue
3027 continue
3028
3028
3029 if clearfilecache:
3029 if clearfilecache:
3030 del self._filecache[k]
3030 del self._filecache[k]
3031 try:
3031 try:
3032 # XXX ideally, the key would be a unicode string to match the
3032 # XXX ideally, the key would be a unicode string to match the
3033 # fact it refers to an attribut name. However changing this was
3033 # fact it refers to an attribut name. However changing this was
3034 # a bit a scope creep compared to the series cleaning up
3034 # a bit a scope creep compared to the series cleaning up
3035 # del/set/getattr so we kept thing simple here.
3035 # del/set/getattr so we kept thing simple here.
3036 delattr(unfiltered, pycompat.sysstr(k))
3036 delattr(unfiltered, pycompat.sysstr(k))
3037 except AttributeError:
3037 except AttributeError:
3038 pass
3038 pass
3039 self.invalidatecaches()
3039 self.invalidatecaches()
3040 if not self.currenttransaction():
3040 if not self.currenttransaction():
3041 # TODO: Changing contents of store outside transaction
3041 # TODO: Changing contents of store outside transaction
3042 # causes inconsistency. We should make in-memory store
3042 # causes inconsistency. We should make in-memory store
3043 # changes detectable, and abort if changed.
3043 # changes detectable, and abort if changed.
3044 self.store.invalidatecaches()
3044 self.store.invalidatecaches()
3045
3045
3046 def invalidateall(self):
3046 def invalidateall(self):
3047 """Fully invalidates both store and non-store parts, causing the
3047 """Fully invalidates both store and non-store parts, causing the
3048 subsequent operation to reread any outside changes."""
3048 subsequent operation to reread any outside changes."""
3049 # extension should hook this to invalidate its caches
3049 # extension should hook this to invalidate its caches
3050 self.invalidate()
3050 self.invalidate()
3051 self.invalidatedirstate()
3051 self.invalidatedirstate()
3052
3052
3053 @unfilteredmethod
3053 @unfilteredmethod
3054 def _refreshfilecachestats(self, tr):
3054 def _refreshfilecachestats(self, tr):
3055 """Reload stats of cached files so that they are flagged as valid"""
3055 """Reload stats of cached files so that they are flagged as valid"""
3056 for k, ce in self._filecache.items():
3056 for k, ce in self._filecache.items():
3057 k = pycompat.sysstr(k)
3057 k = pycompat.sysstr(k)
3058 if k == 'dirstate' or k not in self.__dict__:
3058 if k == 'dirstate' or k not in self.__dict__:
3059 continue
3059 continue
3060 ce.refresh()
3060 ce.refresh()
3061
3061
3062 def _lock(
3062 def _lock(
3063 self,
3063 self,
3064 vfs,
3064 vfs,
3065 lockname,
3065 lockname,
3066 wait,
3066 wait,
3067 releasefn,
3067 releasefn,
3068 acquirefn,
3068 acquirefn,
3069 desc,
3069 desc,
3070 ):
3070 ):
3071 timeout = 0
3071 timeout = 0
3072 warntimeout = 0
3072 warntimeout = 0
3073 if wait:
3073 if wait:
3074 timeout = self.ui.configint(b"ui", b"timeout")
3074 timeout = self.ui.configint(b"ui", b"timeout")
3075 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3075 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3076 # internal config: ui.signal-safe-lock
3076 # internal config: ui.signal-safe-lock
3077 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3077 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3078 sync_file = self.ui.config(b'devel', b'lock-wait-sync-file')
3078 sync_file = self.ui.config(b'devel', b'lock-wait-sync-file')
3079 if not sync_file:
3079 if not sync_file:
3080 sync_file = None
3080 sync_file = None
3081
3081
3082 l = lockmod.trylock(
3082 l = lockmod.trylock(
3083 self.ui,
3083 self.ui,
3084 vfs,
3084 vfs,
3085 lockname,
3085 lockname,
3086 timeout,
3086 timeout,
3087 warntimeout,
3087 warntimeout,
3088 releasefn=releasefn,
3088 releasefn=releasefn,
3089 acquirefn=acquirefn,
3089 acquirefn=acquirefn,
3090 desc=desc,
3090 desc=desc,
3091 signalsafe=signalsafe,
3091 signalsafe=signalsafe,
3092 devel_wait_sync_file=sync_file,
3092 devel_wait_sync_file=sync_file,
3093 )
3093 )
3094 return l
3094 return l
3095
3095
3096 def _afterlock(self, callback):
3096 def _afterlock(self, callback):
3097 """add a callback to be run when the repository is fully unlocked
3097 """add a callback to be run when the repository is fully unlocked
3098
3098
3099 The callback will be executed when the outermost lock is released
3099 The callback will be executed when the outermost lock is released
3100 (with wlock being higher level than 'lock')."""
3100 (with wlock being higher level than 'lock')."""
3101 for ref in (self._wlockref, self._lockref):
3101 for ref in (self._wlockref, self._lockref):
3102 l = ref and ref()
3102 l = ref and ref()
3103 if l and l.held:
3103 if l and l.held:
3104 l.postrelease.append(callback)
3104 l.postrelease.append(callback)
3105 break
3105 break
3106 else: # no lock have been found.
3106 else: # no lock have been found.
3107 callback(True)
3107 callback(True)
3108
3108
3109 def lock(self, wait=True):
3109 def lock(self, wait=True):
3110 """Lock the repository store (.hg/store) and return a weak reference
3110 """Lock the repository store (.hg/store) and return a weak reference
3111 to the lock. Use this before modifying the store (e.g. committing or
3111 to the lock. Use this before modifying the store (e.g. committing or
3112 stripping). If you are opening a transaction, get a lock as well.)
3112 stripping). If you are opening a transaction, get a lock as well.)
3113
3113
3114 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3114 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3115 'wlock' first to avoid a dead-lock hazard."""
3115 'wlock' first to avoid a dead-lock hazard."""
3116 l = self._currentlock(self._lockref)
3116 l = self._currentlock(self._lockref)
3117 if l is not None:
3117 if l is not None:
3118 l.lock()
3118 l.lock()
3119 return l
3119 return l
3120
3120
3121 self.hook(b'prelock', throw=True)
3121 self.hook(b'prelock', throw=True)
3122 l = self._lock(
3122 l = self._lock(
3123 vfs=self.svfs,
3123 vfs=self.svfs,
3124 lockname=b"lock",
3124 lockname=b"lock",
3125 wait=wait,
3125 wait=wait,
3126 releasefn=None,
3126 releasefn=None,
3127 acquirefn=self.invalidate,
3127 acquirefn=self.invalidate,
3128 desc=_(b'repository %s') % self.origroot,
3128 desc=_(b'repository %s') % self.origroot,
3129 )
3129 )
3130 self._lockref = weakref.ref(l)
3130 self._lockref = weakref.ref(l)
3131 return l
3131 return l
3132
3132
3133 def wlock(self, wait=True):
3133 def wlock(self, wait=True):
3134 """Lock the non-store parts of the repository (everything under
3134 """Lock the non-store parts of the repository (everything under
3135 .hg except .hg/store) and return a weak reference to the lock.
3135 .hg except .hg/store) and return a weak reference to the lock.
3136
3136
3137 Use this before modifying files in .hg.
3137 Use this before modifying files in .hg.
3138
3138
3139 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3139 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3140 'wlock' first to avoid a dead-lock hazard."""
3140 'wlock' first to avoid a dead-lock hazard."""
3141 l = self._wlockref() if self._wlockref else None
3141 l = self._wlockref() if self._wlockref else None
3142 if l is not None and l.held:
3142 if l is not None and l.held:
3143 l.lock()
3143 l.lock()
3144 return l
3144 return l
3145
3145
3146 self.hook(b'prewlock', throw=True)
3146 self.hook(b'prewlock', throw=True)
3147 # We do not need to check for non-waiting lock acquisition. Such
3147 # We do not need to check for non-waiting lock acquisition. Such
3148 # acquisition would not cause dead-lock as they would just fail.
3148 # acquisition would not cause dead-lock as they would just fail.
3149 if wait and (
3149 if wait and (
3150 self.ui.configbool(b'devel', b'all-warnings')
3150 self.ui.configbool(b'devel', b'all-warnings')
3151 or self.ui.configbool(b'devel', b'check-locks')
3151 or self.ui.configbool(b'devel', b'check-locks')
3152 ):
3152 ):
3153 if self._currentlock(self._lockref) is not None:
3153 if self._currentlock(self._lockref) is not None:
3154 self.ui.develwarn(b'"wlock" acquired after "lock"')
3154 self.ui.develwarn(b'"wlock" acquired after "lock"')
3155
3155
3156 def unlock():
3156 def unlock():
3157 if self.dirstate.is_changing_any:
3157 if self.dirstate.is_changing_any:
3158 msg = b"wlock release in the middle of a changing parents"
3158 msg = b"wlock release in the middle of a changing parents"
3159 self.ui.develwarn(msg)
3159 self.ui.develwarn(msg)
3160 self.dirstate.invalidate()
3160 self.dirstate.invalidate()
3161 else:
3161 else:
3162 if self.dirstate._dirty:
3162 if self.dirstate._dirty:
3163 msg = b"dirty dirstate on wlock release"
3163 msg = b"dirty dirstate on wlock release"
3164 self.ui.develwarn(msg)
3164 self.ui.develwarn(msg)
3165 self.dirstate.write(None)
3165 self.dirstate.write(None)
3166
3166
3167 unfi = self.unfiltered()
3167 unfi = self.unfiltered()
3168 if 'dirstate' in unfi.__dict__:
3168 if 'dirstate' in unfi.__dict__:
3169 del unfi.__dict__['dirstate']
3169 del unfi.__dict__['dirstate']
3170
3170
3171 l = self._lock(
3171 l = self._lock(
3172 self.vfs,
3172 self.vfs,
3173 b"wlock",
3173 b"wlock",
3174 wait,
3174 wait,
3175 unlock,
3175 unlock,
3176 self.invalidatedirstate,
3176 self.invalidatedirstate,
3177 _(b'working directory of %s') % self.origroot,
3177 _(b'working directory of %s') % self.origroot,
3178 )
3178 )
3179 self._wlockref = weakref.ref(l)
3179 self._wlockref = weakref.ref(l)
3180 return l
3180 return l
3181
3181
3182 def _currentlock(self, lockref):
3182 def _currentlock(self, lockref):
3183 """Returns the lock if it's held, or None if it's not."""
3183 """Returns the lock if it's held, or None if it's not."""
3184 if lockref is None:
3184 if lockref is None:
3185 return None
3185 return None
3186 l = lockref()
3186 l = lockref()
3187 if l is None or not l.held:
3187 if l is None or not l.held:
3188 return None
3188 return None
3189 return l
3189 return l
3190
3190
3191 def currentwlock(self):
3191 def currentwlock(self):
3192 """Returns the wlock if it's held, or None if it's not."""
3192 """Returns the wlock if it's held, or None if it's not."""
3193 return self._currentlock(self._wlockref)
3193 return self._currentlock(self._wlockref)
3194
3194
3195 def currentlock(self):
3195 def currentlock(self):
3196 """Returns the lock if it's held, or None if it's not."""
3196 """Returns the lock if it's held, or None if it's not."""
3197 return self._currentlock(self._lockref)
3197 return self._currentlock(self._lockref)
3198
3198
3199 def checkcommitpatterns(self, wctx, match, status, fail):
3199 def checkcommitpatterns(self, wctx, match, status, fail):
3200 """check for commit arguments that aren't committable"""
3200 """check for commit arguments that aren't committable"""
3201 if match.isexact() or match.prefix():
3201 if match.isexact() or match.prefix():
3202 matched = set(status.modified + status.added + status.removed)
3202 matched = set(status.modified + status.added + status.removed)
3203
3203
3204 for f in match.files():
3204 for f in match.files():
3205 f = self.dirstate.normalize(f)
3205 f = self.dirstate.normalize(f)
3206 if f == b'.' or f in matched or f in wctx.substate:
3206 if f == b'.' or f in matched or f in wctx.substate:
3207 continue
3207 continue
3208 if f in status.deleted:
3208 if f in status.deleted:
3209 fail(f, _(b'file not found!'))
3209 fail(f, _(b'file not found!'))
3210 # Is it a directory that exists or used to exist?
3210 # Is it a directory that exists or used to exist?
3211 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3211 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3212 d = f + b'/'
3212 d = f + b'/'
3213 for mf in matched:
3213 for mf in matched:
3214 if mf.startswith(d):
3214 if mf.startswith(d):
3215 break
3215 break
3216 else:
3216 else:
3217 fail(f, _(b"no match under directory!"))
3217 fail(f, _(b"no match under directory!"))
3218 elif f not in self.dirstate:
3218 elif f not in self.dirstate:
3219 fail(f, _(b"file not tracked!"))
3219 fail(f, _(b"file not tracked!"))
3220
3220
3221 @unfilteredmethod
3221 @unfilteredmethod
3222 def commit(
3222 def commit(
3223 self,
3223 self,
3224 text=b"",
3224 text=b"",
3225 user=None,
3225 user=None,
3226 date=None,
3226 date=None,
3227 match=None,
3227 match=None,
3228 force=False,
3228 force=False,
3229 editor=None,
3229 editor=None,
3230 extra=None,
3230 extra=None,
3231 ):
3231 ):
3232 """Add a new revision to current repository.
3232 """Add a new revision to current repository.
3233
3233
3234 Revision information is gathered from the working directory,
3234 Revision information is gathered from the working directory,
3235 match can be used to filter the committed files. If editor is
3235 match can be used to filter the committed files. If editor is
3236 supplied, it is called to get a commit message.
3236 supplied, it is called to get a commit message.
3237 """
3237 """
3238 if extra is None:
3238 if extra is None:
3239 extra = {}
3239 extra = {}
3240
3240
3241 def fail(f, msg):
3241 def fail(f, msg):
3242 raise error.InputError(b'%s: %s' % (f, msg))
3242 raise error.InputError(b'%s: %s' % (f, msg))
3243
3243
3244 if not match:
3244 if not match:
3245 match = matchmod.always()
3245 match = matchmod.always()
3246
3246
3247 if not force:
3247 if not force:
3248 match.bad = fail
3248 match.bad = fail
3249
3249
3250 # lock() for recent changelog (see issue4368)
3250 # lock() for recent changelog (see issue4368)
3251 with self.wlock(), self.lock():
3251 with self.wlock(), self.lock():
3252 wctx = self[None]
3252 wctx = self[None]
3253 merge = len(wctx.parents()) > 1
3253 merge = len(wctx.parents()) > 1
3254
3254
3255 if not force and merge and not match.always():
3255 if not force and merge and not match.always():
3256 raise error.Abort(
3256 raise error.Abort(
3257 _(
3257 _(
3258 b'cannot partially commit a merge '
3258 b'cannot partially commit a merge '
3259 b'(do not specify files or patterns)'
3259 b'(do not specify files or patterns)'
3260 )
3260 )
3261 )
3261 )
3262
3262
3263 status = self.status(match=match, clean=force)
3263 status = self.status(match=match, clean=force)
3264 if force:
3264 if force:
3265 status.modified.extend(
3265 status.modified.extend(
3266 status.clean
3266 status.clean
3267 ) # mq may commit clean files
3267 ) # mq may commit clean files
3268
3268
3269 # check subrepos
3269 # check subrepos
3270 subs, commitsubs, newstate = subrepoutil.precommit(
3270 subs, commitsubs, newstate = subrepoutil.precommit(
3271 self.ui, wctx, status, match, force=force
3271 self.ui, wctx, status, match, force=force
3272 )
3272 )
3273
3273
3274 # make sure all explicit patterns are matched
3274 # make sure all explicit patterns are matched
3275 if not force:
3275 if not force:
3276 self.checkcommitpatterns(wctx, match, status, fail)
3276 self.checkcommitpatterns(wctx, match, status, fail)
3277
3277
3278 cctx = context.workingcommitctx(
3278 cctx = context.workingcommitctx(
3279 self, status, text, user, date, extra
3279 self, status, text, user, date, extra
3280 )
3280 )
3281
3281
3282 ms = mergestatemod.mergestate.read(self)
3282 ms = mergestatemod.mergestate.read(self)
3283 mergeutil.checkunresolved(ms)
3283 mergeutil.checkunresolved(ms)
3284
3284
3285 # internal config: ui.allowemptycommit
3285 # internal config: ui.allowemptycommit
3286 if cctx.isempty() and not self.ui.configbool(
3286 if cctx.isempty() and not self.ui.configbool(
3287 b'ui', b'allowemptycommit'
3287 b'ui', b'allowemptycommit'
3288 ):
3288 ):
3289 self.ui.debug(b'nothing to commit, clearing merge state\n')
3289 self.ui.debug(b'nothing to commit, clearing merge state\n')
3290 ms.reset()
3290 ms.reset()
3291 return None
3291 return None
3292
3292
3293 if merge and cctx.deleted():
3293 if merge and cctx.deleted():
3294 raise error.Abort(_(b"cannot commit merge with missing files"))
3294 raise error.Abort(_(b"cannot commit merge with missing files"))
3295
3295
3296 if editor:
3296 if editor:
3297 cctx._text = editor(self, cctx, subs)
3297 cctx._text = editor(self, cctx, subs)
3298 edited = text != cctx._text
3298 edited = text != cctx._text
3299
3299
3300 # Save commit message in case this transaction gets rolled back
3300 # Save commit message in case this transaction gets rolled back
3301 # (e.g. by a pretxncommit hook). Leave the content alone on
3301 # (e.g. by a pretxncommit hook). Leave the content alone on
3302 # the assumption that the user will use the same editor again.
3302 # the assumption that the user will use the same editor again.
3303 msg_path = self.savecommitmessage(cctx._text)
3303 msg_path = self.savecommitmessage(cctx._text)
3304
3304
3305 # commit subs and write new state
3305 # commit subs and write new state
3306 if subs:
3306 if subs:
3307 uipathfn = scmutil.getuipathfn(self)
3307 uipathfn = scmutil.getuipathfn(self)
3308 for s in sorted(commitsubs):
3308 for s in sorted(commitsubs):
3309 sub = wctx.sub(s)
3309 sub = wctx.sub(s)
3310 self.ui.status(
3310 self.ui.status(
3311 _(b'committing subrepository %s\n')
3311 _(b'committing subrepository %s\n')
3312 % uipathfn(subrepoutil.subrelpath(sub))
3312 % uipathfn(subrepoutil.subrelpath(sub))
3313 )
3313 )
3314 sr = sub.commit(cctx._text, user, date)
3314 sr = sub.commit(cctx._text, user, date)
3315 newstate[s] = (newstate[s][0], sr)
3315 newstate[s] = (newstate[s][0], sr)
3316 subrepoutil.writestate(self, newstate)
3316 subrepoutil.writestate(self, newstate)
3317
3317
3318 p1, p2 = self.dirstate.parents()
3318 p1, p2 = self.dirstate.parents()
3319 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3319 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3320 try:
3320 try:
3321 self.hook(
3321 self.hook(
3322 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3322 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3323 )
3323 )
3324 with self.transaction(b'commit'):
3324 with self.transaction(b'commit'):
3325 ret = self.commitctx(cctx, True)
3325 ret = self.commitctx(cctx, True)
3326 # update bookmarks, dirstate and mergestate
3326 # update bookmarks, dirstate and mergestate
3327 bookmarks.update(self, [p1, p2], ret)
3327 bookmarks.update(self, [p1, p2], ret)
3328 cctx.markcommitted(ret)
3328 cctx.markcommitted(ret)
3329 ms.reset()
3329 ms.reset()
3330 except: # re-raises
3330 except: # re-raises
3331 if edited:
3331 if edited:
3332 self.ui.write(
3332 self.ui.write(
3333 _(b'note: commit message saved in %s\n') % msg_path
3333 _(b'note: commit message saved in %s\n') % msg_path
3334 )
3334 )
3335 self.ui.write(
3335 self.ui.write(
3336 _(
3336 _(
3337 b"note: use 'hg commit --logfile "
3337 b"note: use 'hg commit --logfile "
3338 b"%s --edit' to reuse it\n"
3338 b"%s --edit' to reuse it\n"
3339 )
3339 )
3340 % msg_path
3340 % msg_path
3341 )
3341 )
3342 raise
3342 raise
3343
3343
3344 def commithook(unused_success):
3344 def commithook(unused_success):
3345 # hack for command that use a temporary commit (eg: histedit)
3345 # hack for command that use a temporary commit (eg: histedit)
3346 # temporary commit got stripped before hook release
3346 # temporary commit got stripped before hook release
3347 if self.changelog.hasnode(ret):
3347 if self.changelog.hasnode(ret):
3348 self.hook(
3348 self.hook(
3349 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3349 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3350 )
3350 )
3351
3351
3352 self._afterlock(commithook)
3352 self._afterlock(commithook)
3353 return ret
3353 return ret
3354
3354
3355 @unfilteredmethod
3355 @unfilteredmethod
3356 def commitctx(self, ctx, error=False, origctx=None):
3356 def commitctx(self, ctx, error=False, origctx=None):
3357 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3357 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3358
3358
3359 @unfilteredmethod
3359 @unfilteredmethod
3360 def destroying(self):
3360 def destroying(self):
3361 """Inform the repository that nodes are about to be destroyed.
3361 """Inform the repository that nodes are about to be destroyed.
3362 Intended for use by strip and rollback, so there's a common
3362 Intended for use by strip and rollback, so there's a common
3363 place for anything that has to be done before destroying history.
3363 place for anything that has to be done before destroying history.
3364
3364
3365 This is mostly useful for saving state that is in memory and waiting
3365 This is mostly useful for saving state that is in memory and waiting
3366 to be flushed when the current lock is released. Because a call to
3366 to be flushed when the current lock is released. Because a call to
3367 destroyed is imminent, the repo will be invalidated causing those
3367 destroyed is imminent, the repo will be invalidated causing those
3368 changes to stay in memory (waiting for the next unlock), or vanish
3368 changes to stay in memory (waiting for the next unlock), or vanish
3369 completely.
3369 completely.
3370 """
3370 """
3371 # When using the same lock to commit and strip, the phasecache is left
3371 # When using the same lock to commit and strip, the phasecache is left
3372 # dirty after committing. Then when we strip, the repo is invalidated,
3372 # dirty after committing. Then when we strip, the repo is invalidated,
3373 # causing those changes to disappear.
3373 # causing those changes to disappear.
3374 if '_phasecache' in vars(self):
3374 if '_phasecache' in vars(self):
3375 self._phasecache.write(self)
3375 self._phasecache.write(self)
3376
3376
3377 @unfilteredmethod
3377 @unfilteredmethod
3378 def destroyed(self):
3378 def destroyed(self):
3379 """Inform the repository that nodes have been destroyed.
3379 """Inform the repository that nodes have been destroyed.
3380 Intended for use by strip and rollback, so there's a common
3380 Intended for use by strip and rollback, so there's a common
3381 place for anything that has to be done after destroying history.
3381 place for anything that has to be done after destroying history.
3382 """
3382 """
3383 # refresh all repository caches
3383 # refresh all repository caches
3384 self.updatecaches()
3384 self.updatecaches()
3385
3385
3386 # Ensure the persistent tag cache is updated. Doing it now
3386 # Ensure the persistent tag cache is updated. Doing it now
3387 # means that the tag cache only has to worry about destroyed
3387 # means that the tag cache only has to worry about destroyed
3388 # heads immediately after a strip/rollback. That in turn
3388 # heads immediately after a strip/rollback. That in turn
3389 # guarantees that "cachetip == currenttip" (comparing both rev
3389 # guarantees that "cachetip == currenttip" (comparing both rev
3390 # and node) always means no nodes have been added or destroyed.
3390 # and node) always means no nodes have been added or destroyed.
3391
3391
3392 # XXX this is suboptimal when qrefresh'ing: we strip the current
3392 # XXX this is suboptimal when qrefresh'ing: we strip the current
3393 # head, refresh the tag cache, then immediately add a new head.
3393 # head, refresh the tag cache, then immediately add a new head.
3394 # But I think doing it this way is necessary for the "instant
3394 # But I think doing it this way is necessary for the "instant
3395 # tag cache retrieval" case to work.
3395 # tag cache retrieval" case to work.
3396 self.invalidate()
3396 self.invalidate()
3397
3397
3398 def status(
3398 def status(
3399 self,
3399 self,
3400 node1=b'.',
3400 node1=b'.',
3401 node2=None,
3401 node2=None,
3402 match=None,
3402 match=None,
3403 ignored=False,
3403 ignored=False,
3404 clean=False,
3404 clean=False,
3405 unknown=False,
3405 unknown=False,
3406 listsubrepos=False,
3406 listsubrepos=False,
3407 ):
3407 ):
3408 '''a convenience method that calls node1.status(node2)'''
3408 '''a convenience method that calls node1.status(node2)'''
3409 return self[node1].status(
3409 return self[node1].status(
3410 node2, match, ignored, clean, unknown, listsubrepos
3410 node2, match, ignored, clean, unknown, listsubrepos
3411 )
3411 )
3412
3412
3413 def addpostdsstatus(self, ps):
3413 def addpostdsstatus(self, ps):
3414 """Add a callback to run within the wlock, at the point at which status
3414 """Add a callback to run within the wlock, at the point at which status
3415 fixups happen.
3415 fixups happen.
3416
3416
3417 On status completion, callback(wctx, status) will be called with the
3417 On status completion, callback(wctx, status) will be called with the
3418 wlock held, unless the dirstate has changed from underneath or the wlock
3418 wlock held, unless the dirstate has changed from underneath or the wlock
3419 couldn't be grabbed.
3419 couldn't be grabbed.
3420
3420
3421 Callbacks should not capture and use a cached copy of the dirstate --
3421 Callbacks should not capture and use a cached copy of the dirstate --
3422 it might change in the meanwhile. Instead, they should access the
3422 it might change in the meanwhile. Instead, they should access the
3423 dirstate via wctx.repo().dirstate.
3423 dirstate via wctx.repo().dirstate.
3424
3424
3425 This list is emptied out after each status run -- extensions should
3425 This list is emptied out after each status run -- extensions should
3426 make sure it adds to this list each time dirstate.status is called.
3426 make sure it adds to this list each time dirstate.status is called.
3427 Extensions should also make sure they don't call this for statuses
3427 Extensions should also make sure they don't call this for statuses
3428 that don't involve the dirstate.
3428 that don't involve the dirstate.
3429 """
3429 """
3430
3430
3431 # The list is located here for uniqueness reasons -- it is actually
3431 # The list is located here for uniqueness reasons -- it is actually
3432 # managed by the workingctx, but that isn't unique per-repo.
3432 # managed by the workingctx, but that isn't unique per-repo.
3433 self._postdsstatus.append(ps)
3433 self._postdsstatus.append(ps)
3434
3434
3435 def postdsstatus(self):
3435 def postdsstatus(self):
3436 """Used by workingctx to get the list of post-dirstate-status hooks."""
3436 """Used by workingctx to get the list of post-dirstate-status hooks."""
3437 return self._postdsstatus
3437 return self._postdsstatus
3438
3438
3439 def clearpostdsstatus(self):
3439 def clearpostdsstatus(self):
3440 """Used by workingctx to clear post-dirstate-status hooks."""
3440 """Used by workingctx to clear post-dirstate-status hooks."""
3441 del self._postdsstatus[:]
3441 del self._postdsstatus[:]
3442
3442
3443 def heads(self, start=None):
3443 def heads(self, start=None):
3444 if start is None:
3444 if start is None:
3445 cl = self.changelog
3445 cl = self.changelog
3446 headrevs = reversed(cl.headrevs())
3446 headrevs = reversed(cl.headrevs())
3447 return [cl.node(rev) for rev in headrevs]
3447 return [cl.node(rev) for rev in headrevs]
3448
3448
3449 heads = self.changelog.heads(start)
3449 heads = self.changelog.heads(start)
3450 # sort the output in rev descending order
3450 # sort the output in rev descending order
3451 return sorted(heads, key=self.changelog.rev, reverse=True)
3451 return sorted(heads, key=self.changelog.rev, reverse=True)
3452
3452
3453 def branchheads(self, branch=None, start=None, closed=False):
3453 def branchheads(self, branch=None, start=None, closed=False):
3454 """return a (possibly filtered) list of heads for the given branch
3454 """return a (possibly filtered) list of heads for the given branch
3455
3455
3456 Heads are returned in topological order, from newest to oldest.
3456 Heads are returned in topological order, from newest to oldest.
3457 If branch is None, use the dirstate branch.
3457 If branch is None, use the dirstate branch.
3458 If start is not None, return only heads reachable from start.
3458 If start is not None, return only heads reachable from start.
3459 If closed is True, return heads that are marked as closed as well.
3459 If closed is True, return heads that are marked as closed as well.
3460 """
3460 """
3461 if branch is None:
3461 if branch is None:
3462 branch = self[None].branch()
3462 branch = self[None].branch()
3463 branches = self.branchmap()
3463 branches = self.branchmap()
3464 if not branches.hasbranch(branch):
3464 if not branches.hasbranch(branch):
3465 return []
3465 return []
3466 # the cache returns heads ordered lowest to highest
3466 # the cache returns heads ordered lowest to highest
3467 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3467 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3468 if start is not None:
3468 if start is not None:
3469 # filter out the heads that cannot be reached from startrev
3469 # filter out the heads that cannot be reached from startrev
3470 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3470 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3471 bheads = [h for h in bheads if h in fbheads]
3471 bheads = [h for h in bheads if h in fbheads]
3472 return bheads
3472 return bheads
3473
3473
3474 def branches(self, nodes):
3474 def branches(self, nodes):
3475 if not nodes:
3475 if not nodes:
3476 nodes = [self.changelog.tip()]
3476 nodes = [self.changelog.tip()]
3477 b = []
3477 b = []
3478 for n in nodes:
3478 for n in nodes:
3479 t = n
3479 t = n
3480 while True:
3480 while True:
3481 p = self.changelog.parents(n)
3481 p = self.changelog.parents(n)
3482 if p[1] != self.nullid or p[0] == self.nullid:
3482 if p[1] != self.nullid or p[0] == self.nullid:
3483 b.append((t, n, p[0], p[1]))
3483 b.append((t, n, p[0], p[1]))
3484 break
3484 break
3485 n = p[0]
3485 n = p[0]
3486 return b
3486 return b
3487
3487
3488 def between(self, pairs):
3488 def between(self, pairs):
3489 r = []
3489 r = []
3490
3490
3491 for top, bottom in pairs:
3491 for top, bottom in pairs:
3492 n, l, i = top, [], 0
3492 n, l, i = top, [], 0
3493 f = 1
3493 f = 1
3494
3494
3495 while n != bottom and n != self.nullid:
3495 while n != bottom and n != self.nullid:
3496 p = self.changelog.parents(n)[0]
3496 p = self.changelog.parents(n)[0]
3497 if i == f:
3497 if i == f:
3498 l.append(n)
3498 l.append(n)
3499 f = f * 2
3499 f = f * 2
3500 n = p
3500 n = p
3501 i += 1
3501 i += 1
3502
3502
3503 r.append(l)
3503 r.append(l)
3504
3504
3505 return r
3505 return r
3506
3506
3507 def checkpush(self, pushop):
3507 def checkpush(self, pushop):
3508 """Extensions can override this function if additional checks have
3508 """Extensions can override this function if additional checks have
3509 to be performed before pushing, or call it if they override push
3509 to be performed before pushing, or call it if they override push
3510 command.
3510 command.
3511 """
3511 """
3512
3512
3513 @unfilteredpropertycache
3513 @unfilteredpropertycache
3514 def prepushoutgoinghooks(self):
3514 def prepushoutgoinghooks(self):
3515 """Return util.hooks consists of a pushop with repo, remote, outgoing
3515 """Return util.hooks consists of a pushop with repo, remote, outgoing
3516 methods, which are called before pushing changesets.
3516 methods, which are called before pushing changesets.
3517 """
3517 """
3518 return util.hooks()
3518 return util.hooks()
3519
3519
3520 def pushkey(self, namespace, key, old, new):
3520 def pushkey(self, namespace, key, old, new):
3521 try:
3521 try:
3522 tr = self.currenttransaction()
3522 tr = self.currenttransaction()
3523 hookargs = {}
3523 hookargs = {}
3524 if tr is not None:
3524 if tr is not None:
3525 hookargs.update(tr.hookargs)
3525 hookargs.update(tr.hookargs)
3526 hookargs = pycompat.strkwargs(hookargs)
3526 hookargs = pycompat.strkwargs(hookargs)
3527 hookargs['namespace'] = namespace
3527 hookargs['namespace'] = namespace
3528 hookargs['key'] = key
3528 hookargs['key'] = key
3529 hookargs['old'] = old
3529 hookargs['old'] = old
3530 hookargs['new'] = new
3530 hookargs['new'] = new
3531 self.hook(b'prepushkey', throw=True, **hookargs)
3531 self.hook(b'prepushkey', throw=True, **hookargs)
3532 except error.HookAbort as exc:
3532 except error.HookAbort as exc:
3533 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3533 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3534 if exc.hint:
3534 if exc.hint:
3535 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3535 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3536 return False
3536 return False
3537 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3537 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3538 ret = pushkey.push(self, namespace, key, old, new)
3538 ret = pushkey.push(self, namespace, key, old, new)
3539
3539
3540 def runhook(unused_success):
3540 def runhook(unused_success):
3541 self.hook(
3541 self.hook(
3542 b'pushkey',
3542 b'pushkey',
3543 namespace=namespace,
3543 namespace=namespace,
3544 key=key,
3544 key=key,
3545 old=old,
3545 old=old,
3546 new=new,
3546 new=new,
3547 ret=ret,
3547 ret=ret,
3548 )
3548 )
3549
3549
3550 self._afterlock(runhook)
3550 self._afterlock(runhook)
3551 return ret
3551 return ret
3552
3552
3553 def listkeys(self, namespace):
3553 def listkeys(self, namespace):
3554 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3554 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3555 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3555 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3556 values = pushkey.list(self, namespace)
3556 values = pushkey.list(self, namespace)
3557 self.hook(b'listkeys', namespace=namespace, values=values)
3557 self.hook(b'listkeys', namespace=namespace, values=values)
3558 return values
3558 return values
3559
3559
3560 def debugwireargs(self, one, two, three=None, four=None, five=None):
3560 def debugwireargs(self, one, two, three=None, four=None, five=None):
3561 '''used to test argument passing over the wire'''
3561 '''used to test argument passing over the wire'''
3562 return b"%s %s %s %s %s" % (
3562 return b"%s %s %s %s %s" % (
3563 one,
3563 one,
3564 two,
3564 two,
3565 pycompat.bytestr(three),
3565 pycompat.bytestr(three),
3566 pycompat.bytestr(four),
3566 pycompat.bytestr(four),
3567 pycompat.bytestr(five),
3567 pycompat.bytestr(five),
3568 )
3568 )
3569
3569
3570 def savecommitmessage(self, text):
3570 def savecommitmessage(self, text):
3571 fp = self.vfs(b'last-message.txt', b'wb')
3571 fp = self.vfs(b'last-message.txt', b'wb')
3572 try:
3572 try:
3573 fp.write(text)
3573 fp.write(text)
3574 finally:
3574 finally:
3575 fp.close()
3575 fp.close()
3576 return self.pathto(fp.name[len(self.root) + 1 :])
3576 return self.pathto(fp.name[len(self.root) + 1 :])
3577
3577
3578 def register_wanted_sidedata(self, category):
3578 def register_wanted_sidedata(self, category):
3579 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3579 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3580 # Only revlogv2 repos can want sidedata.
3580 # Only revlogv2 repos can want sidedata.
3581 return
3581 return
3582 self._wanted_sidedata.add(pycompat.bytestr(category))
3582 self._wanted_sidedata.add(pycompat.bytestr(category))
3583
3583
3584 def register_sidedata_computer(
3584 def register_sidedata_computer(
3585 self, kind, category, keys, computer, flags, replace=False
3585 self, kind, category, keys, computer, flags, replace=False
3586 ):
3586 ):
3587 if kind not in revlogconst.ALL_KINDS:
3587 if kind not in revlogconst.ALL_KINDS:
3588 msg = _(b"unexpected revlog kind '%s'.")
3588 msg = _(b"unexpected revlog kind '%s'.")
3589 raise error.ProgrammingError(msg % kind)
3589 raise error.ProgrammingError(msg % kind)
3590 category = pycompat.bytestr(category)
3590 category = pycompat.bytestr(category)
3591 already_registered = category in self._sidedata_computers.get(kind, [])
3591 already_registered = category in self._sidedata_computers.get(kind, [])
3592 if already_registered and not replace:
3592 if already_registered and not replace:
3593 msg = _(
3593 msg = _(
3594 b"cannot register a sidedata computer twice for category '%s'."
3594 b"cannot register a sidedata computer twice for category '%s'."
3595 )
3595 )
3596 raise error.ProgrammingError(msg % category)
3596 raise error.ProgrammingError(msg % category)
3597 if replace and not already_registered:
3597 if replace and not already_registered:
3598 msg = _(
3598 msg = _(
3599 b"cannot replace a sidedata computer that isn't registered "
3599 b"cannot replace a sidedata computer that isn't registered "
3600 b"for category '%s'."
3600 b"for category '%s'."
3601 )
3601 )
3602 raise error.ProgrammingError(msg % category)
3602 raise error.ProgrammingError(msg % category)
3603 self._sidedata_computers.setdefault(kind, {})
3603 self._sidedata_computers.setdefault(kind, {})
3604 self._sidedata_computers[kind][category] = (keys, computer, flags)
3604 self._sidedata_computers[kind][category] = (keys, computer, flags)
3605
3605
3606
3606
3607 def undoname(fn: bytes) -> bytes:
3607 def undoname(fn: bytes) -> bytes:
3608 base, name = os.path.split(fn)
3608 base, name = os.path.split(fn)
3609 assert name.startswith(b'journal')
3609 assert name.startswith(b'journal')
3610 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3610 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3611
3611
3612
3612
3613 def instance(ui, path: bytes, create, intents=None, createopts=None):
3613 def instance(ui, path: bytes, create, intents=None, createopts=None):
3614 # prevent cyclic import localrepo -> upgrade -> localrepo
3614 # prevent cyclic import localrepo -> upgrade -> localrepo
3615 from . import upgrade
3615 from . import upgrade
3616
3616
3617 localpath = urlutil.urllocalpath(path)
3617 localpath = urlutil.urllocalpath(path)
3618 if create:
3618 if create:
3619 createrepository(ui, localpath, createopts=createopts)
3619 createrepository(ui, localpath, createopts=createopts)
3620
3620
3621 def repo_maker():
3621 def repo_maker():
3622 return makelocalrepository(ui, localpath, intents=intents)
3622 return makelocalrepository(ui, localpath, intents=intents)
3623
3623
3624 repo = repo_maker()
3624 repo = repo_maker()
3625 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3625 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3626 return repo
3626 return repo
3627
3627
3628
3628
3629 def islocal(path: bytes) -> bool:
3629 def islocal(path: bytes) -> bool:
3630 return True
3630 return True
3631
3631
3632
3632
3633 def defaultcreateopts(ui, createopts=None):
3633 def defaultcreateopts(ui, createopts=None):
3634 """Populate the default creation options for a repository.
3634 """Populate the default creation options for a repository.
3635
3635
3636 A dictionary of explicitly requested creation options can be passed
3636 A dictionary of explicitly requested creation options can be passed
3637 in. Missing keys will be populated.
3637 in. Missing keys will be populated.
3638 """
3638 """
3639 createopts = dict(createopts or {})
3639 createopts = dict(createopts or {})
3640
3640
3641 if b'backend' not in createopts:
3641 if b'backend' not in createopts:
3642 # experimental config: storage.new-repo-backend
3642 # experimental config: storage.new-repo-backend
3643 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3643 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3644
3644
3645 return createopts
3645 return createopts
3646
3646
3647
3647
3648 def clone_requirements(ui, createopts, srcrepo):
3648 def clone_requirements(ui, createopts, srcrepo):
3649 """clone the requirements of a local repo for a local clone
3649 """clone the requirements of a local repo for a local clone
3650
3650
3651 The store requirements are unchanged while the working copy requirements
3651 The store requirements are unchanged while the working copy requirements
3652 depends on the configuration
3652 depends on the configuration
3653 """
3653 """
3654 target_requirements = set()
3654 target_requirements = set()
3655 if not srcrepo.requirements:
3655 if not srcrepo.requirements:
3656 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3656 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3657 # with it.
3657 # with it.
3658 return target_requirements
3658 return target_requirements
3659 createopts = defaultcreateopts(ui, createopts=createopts)
3659 createopts = defaultcreateopts(ui, createopts=createopts)
3660 for r in newreporequirements(ui, createopts):
3660 for r in newreporequirements(ui, createopts):
3661 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3661 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3662 target_requirements.add(r)
3662 target_requirements.add(r)
3663
3663
3664 for r in srcrepo.requirements:
3664 for r in srcrepo.requirements:
3665 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3665 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3666 target_requirements.add(r)
3666 target_requirements.add(r)
3667 return target_requirements
3667 return target_requirements
3668
3668
3669
3669
3670 def newreporequirements(ui, createopts):
3670 def newreporequirements(ui, createopts):
3671 """Determine the set of requirements for a new local repository.
3671 """Determine the set of requirements for a new local repository.
3672
3672
3673 Extensions can wrap this function to specify custom requirements for
3673 Extensions can wrap this function to specify custom requirements for
3674 new repositories.
3674 new repositories.
3675 """
3675 """
3676
3676
3677 if b'backend' not in createopts:
3677 if b'backend' not in createopts:
3678 raise error.ProgrammingError(
3678 raise error.ProgrammingError(
3679 b'backend key not present in createopts; '
3679 b'backend key not present in createopts; '
3680 b'was defaultcreateopts() called?'
3680 b'was defaultcreateopts() called?'
3681 )
3681 )
3682
3682
3683 if createopts[b'backend'] != b'revlogv1':
3683 if createopts[b'backend'] != b'revlogv1':
3684 raise error.Abort(
3684 raise error.Abort(
3685 _(
3685 _(
3686 b'unable to determine repository requirements for '
3686 b'unable to determine repository requirements for '
3687 b'storage backend: %s'
3687 b'storage backend: %s'
3688 )
3688 )
3689 % createopts[b'backend']
3689 % createopts[b'backend']
3690 )
3690 )
3691
3691
3692 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3692 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3693 if ui.configbool(b'format', b'usestore'):
3693 if ui.configbool(b'format', b'usestore'):
3694 requirements.add(requirementsmod.STORE_REQUIREMENT)
3694 requirements.add(requirementsmod.STORE_REQUIREMENT)
3695 if ui.configbool(b'format', b'usefncache'):
3695 if ui.configbool(b'format', b'usefncache'):
3696 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3696 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3697 if ui.configbool(b'format', b'dotencode'):
3697 if ui.configbool(b'format', b'dotencode'):
3698 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3698 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3699
3699
3700 compengines = ui.configlist(b'format', b'revlog-compression')
3700 compengines = ui.configlist(b'format', b'revlog-compression')
3701 for compengine in compengines:
3701 for compengine in compengines:
3702 if compengine in util.compengines:
3702 if compengine in util.compengines:
3703 engine = util.compengines[compengine]
3703 engine = util.compengines[compengine]
3704 if engine.available() and engine.revlogheader():
3704 if engine.available() and engine.revlogheader():
3705 break
3705 break
3706 else:
3706 else:
3707 raise error.Abort(
3707 raise error.Abort(
3708 _(
3708 _(
3709 b'compression engines %s defined by '
3709 b'compression engines %s defined by '
3710 b'format.revlog-compression not available'
3710 b'format.revlog-compression not available'
3711 )
3711 )
3712 % b', '.join(b'"%s"' % e for e in compengines),
3712 % b', '.join(b'"%s"' % e for e in compengines),
3713 hint=_(
3713 hint=_(
3714 b'run "hg debuginstall" to list available '
3714 b'run "hg debuginstall" to list available '
3715 b'compression engines'
3715 b'compression engines'
3716 ),
3716 ),
3717 )
3717 )
3718
3718
3719 # zlib is the historical default and doesn't need an explicit requirement.
3719 # zlib is the historical default and doesn't need an explicit requirement.
3720 if compengine == b'zstd':
3720 if compengine == b'zstd':
3721 requirements.add(b'revlog-compression-zstd')
3721 requirements.add(b'revlog-compression-zstd')
3722 elif compengine != b'zlib':
3722 elif compengine != b'zlib':
3723 requirements.add(b'exp-compression-%s' % compengine)
3723 requirements.add(b'exp-compression-%s' % compengine)
3724
3724
3725 if scmutil.gdinitconfig(ui):
3725 if scmutil.gdinitconfig(ui):
3726 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3726 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3727 if ui.configbool(b'format', b'sparse-revlog'):
3727 if ui.configbool(b'format', b'sparse-revlog'):
3728 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3728 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3729
3729
3730 # experimental config: format.use-dirstate-v2
3730 # experimental config: format.use-dirstate-v2
3731 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3731 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3732 if ui.configbool(b'format', b'use-dirstate-v2'):
3732 if ui.configbool(b'format', b'use-dirstate-v2'):
3733 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3733 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3734
3734
3735 # experimental config: format.exp-use-copies-side-data-changeset
3735 # experimental config: format.exp-use-copies-side-data-changeset
3736 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3736 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3737 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3737 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3738 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3738 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3739 if ui.configbool(b'experimental', b'treemanifest'):
3739 if ui.configbool(b'experimental', b'treemanifest'):
3740 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3740 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3741
3741
3742 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3742 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3743 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3743 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3744 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3744 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3745
3745
3746 revlogv2 = ui.config(b'experimental', b'revlogv2')
3746 revlogv2 = ui.config(b'experimental', b'revlogv2')
3747 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3747 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3748 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3748 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3749 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3749 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3750 # experimental config: format.internal-phase
3750 # experimental config: format.internal-phase
3751 if ui.configbool(b'format', b'use-internal-phase'):
3751 if ui.configbool(b'format', b'use-internal-phase'):
3752 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3752 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3753
3753
3754 # experimental config: format.exp-archived-phase
3754 # experimental config: format.exp-archived-phase
3755 if ui.configbool(b'format', b'exp-archived-phase'):
3755 if ui.configbool(b'format', b'exp-archived-phase'):
3756 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3756 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3757
3757
3758 if createopts.get(b'narrowfiles'):
3758 if createopts.get(b'narrowfiles'):
3759 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3759 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3760
3760
3761 if createopts.get(b'lfs'):
3761 if createopts.get(b'lfs'):
3762 requirements.add(b'lfs')
3762 requirements.add(b'lfs')
3763
3763
3764 if ui.configbool(b'format', b'bookmarks-in-store'):
3764 if ui.configbool(b'format', b'bookmarks-in-store'):
3765 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3765 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3766
3766
3767 # The feature is disabled unless a fast implementation is available.
3767 # The feature is disabled unless a fast implementation is available.
3768 persistent_nodemap_default = policy.importrust('revlog') is not None
3768 persistent_nodemap_default = policy.importrust('revlog') is not None
3769 if ui.configbool(
3769 if ui.configbool(
3770 b'format', b'use-persistent-nodemap', persistent_nodemap_default
3770 b'format', b'use-persistent-nodemap', persistent_nodemap_default
3771 ):
3771 ):
3772 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3772 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3773
3773
3774 # if share-safe is enabled, let's create the new repository with the new
3774 # if share-safe is enabled, let's create the new repository with the new
3775 # requirement
3775 # requirement
3776 if ui.configbool(b'format', b'use-share-safe'):
3776 if ui.configbool(b'format', b'use-share-safe'):
3777 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3777 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3778
3778
3779 # if we are creating a share-repoΒΉ we have to handle requirement
3779 # if we are creating a share-repoΒΉ we have to handle requirement
3780 # differently.
3780 # differently.
3781 #
3781 #
3782 # [1] (i.e. reusing the store from another repository, just having a
3782 # [1] (i.e. reusing the store from another repository, just having a
3783 # working copy)
3783 # working copy)
3784 if b'sharedrepo' in createopts:
3784 if b'sharedrepo' in createopts:
3785 source_requirements = set(createopts[b'sharedrepo'].requirements)
3785 source_requirements = set(createopts[b'sharedrepo'].requirements)
3786
3786
3787 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3787 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3788 # share to an old school repository, we have to copy the
3788 # share to an old school repository, we have to copy the
3789 # requirements and hope for the best.
3789 # requirements and hope for the best.
3790 requirements = source_requirements
3790 requirements = source_requirements
3791 else:
3791 else:
3792 # We have control on the working copy only, so "copy" the non
3792 # We have control on the working copy only, so "copy" the non
3793 # working copy part over, ignoring previous logic.
3793 # working copy part over, ignoring previous logic.
3794 to_drop = set()
3794 to_drop = set()
3795 for req in requirements:
3795 for req in requirements:
3796 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3796 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3797 continue
3797 continue
3798 if req in source_requirements:
3798 if req in source_requirements:
3799 continue
3799 continue
3800 to_drop.add(req)
3800 to_drop.add(req)
3801 requirements -= to_drop
3801 requirements -= to_drop
3802 requirements |= source_requirements
3802 requirements |= source_requirements
3803
3803
3804 if createopts.get(b'sharedrelative'):
3804 if createopts.get(b'sharedrelative'):
3805 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3805 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3806 else:
3806 else:
3807 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3807 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3808
3808
3809 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3809 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3810 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3810 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3811 msg = _(b"ignoring unknown tracked key version: %d\n")
3811 msg = _(b"ignoring unknown tracked key version: %d\n")
3812 hint = _(
3812 hint = _(
3813 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3813 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3814 )
3814 )
3815 if version != 1:
3815 if version != 1:
3816 ui.warn(msg % version, hint=hint)
3816 ui.warn(msg % version, hint=hint)
3817 else:
3817 else:
3818 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3818 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3819
3819
3820 return requirements
3820 return requirements
3821
3821
3822
3822
3823 def checkrequirementscompat(ui, requirements):
3823 def checkrequirementscompat(ui, requirements):
3824 """Checks compatibility of repository requirements enabled and disabled.
3824 """Checks compatibility of repository requirements enabled and disabled.
3825
3825
3826 Returns a set of requirements which needs to be dropped because dependend
3826 Returns a set of requirements which needs to be dropped because dependend
3827 requirements are not enabled. Also warns users about it"""
3827 requirements are not enabled. Also warns users about it"""
3828
3828
3829 dropped = set()
3829 dropped = set()
3830
3830
3831 if requirementsmod.STORE_REQUIREMENT not in requirements:
3831 if requirementsmod.STORE_REQUIREMENT not in requirements:
3832 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3832 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3833 ui.warn(
3833 ui.warn(
3834 _(
3834 _(
3835 b'ignoring enabled \'format.bookmarks-in-store\' config '
3835 b'ignoring enabled \'format.bookmarks-in-store\' config '
3836 b'beacuse it is incompatible with disabled '
3836 b'beacuse it is incompatible with disabled '
3837 b'\'format.usestore\' config\n'
3837 b'\'format.usestore\' config\n'
3838 )
3838 )
3839 )
3839 )
3840 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3840 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3841
3841
3842 if (
3842 if (
3843 requirementsmod.SHARED_REQUIREMENT in requirements
3843 requirementsmod.SHARED_REQUIREMENT in requirements
3844 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3844 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3845 ):
3845 ):
3846 raise error.Abort(
3846 raise error.Abort(
3847 _(
3847 _(
3848 b"cannot create shared repository as source was created"
3848 b"cannot create shared repository as source was created"
3849 b" with 'format.usestore' config disabled"
3849 b" with 'format.usestore' config disabled"
3850 )
3850 )
3851 )
3851 )
3852
3852
3853 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3853 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3854 if ui.hasconfig(b'format', b'use-share-safe'):
3854 if ui.hasconfig(b'format', b'use-share-safe'):
3855 msg = _(
3855 msg = _(
3856 b"ignoring enabled 'format.use-share-safe' config because "
3856 b"ignoring enabled 'format.use-share-safe' config because "
3857 b"it is incompatible with disabled 'format.usestore'"
3857 b"it is incompatible with disabled 'format.usestore'"
3858 b" config\n"
3858 b" config\n"
3859 )
3859 )
3860 ui.warn(msg)
3860 ui.warn(msg)
3861 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3861 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3862
3862
3863 return dropped
3863 return dropped
3864
3864
3865
3865
3866 def filterknowncreateopts(ui, createopts):
3866 def filterknowncreateopts(ui, createopts):
3867 """Filters a dict of repo creation options against options that are known.
3867 """Filters a dict of repo creation options against options that are known.
3868
3868
3869 Receives a dict of repo creation options and returns a dict of those
3869 Receives a dict of repo creation options and returns a dict of those
3870 options that we don't know how to handle.
3870 options that we don't know how to handle.
3871
3871
3872 This function is called as part of repository creation. If the
3872 This function is called as part of repository creation. If the
3873 returned dict contains any items, repository creation will not
3873 returned dict contains any items, repository creation will not
3874 be allowed, as it means there was a request to create a repository
3874 be allowed, as it means there was a request to create a repository
3875 with options not recognized by loaded code.
3875 with options not recognized by loaded code.
3876
3876
3877 Extensions can wrap this function to filter out creation options
3877 Extensions can wrap this function to filter out creation options
3878 they know how to handle.
3878 they know how to handle.
3879 """
3879 """
3880 known = {
3880 known = {
3881 b'backend',
3881 b'backend',
3882 b'lfs',
3882 b'lfs',
3883 b'narrowfiles',
3883 b'narrowfiles',
3884 b'sharedrepo',
3884 b'sharedrepo',
3885 b'sharedrelative',
3885 b'sharedrelative',
3886 b'shareditems',
3886 b'shareditems',
3887 b'shallowfilestore',
3887 b'shallowfilestore',
3888 }
3888 }
3889
3889
3890 return {k: v for k, v in createopts.items() if k not in known}
3890 return {k: v for k, v in createopts.items() if k not in known}
3891
3891
3892
3892
3893 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3893 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3894 """Create a new repository in a vfs.
3894 """Create a new repository in a vfs.
3895
3895
3896 ``path`` path to the new repo's working directory.
3896 ``path`` path to the new repo's working directory.
3897 ``createopts`` options for the new repository.
3897 ``createopts`` options for the new repository.
3898 ``requirement`` predefined set of requirements.
3898 ``requirement`` predefined set of requirements.
3899 (incompatible with ``createopts``)
3899 (incompatible with ``createopts``)
3900
3900
3901 The following keys for ``createopts`` are recognized:
3901 The following keys for ``createopts`` are recognized:
3902
3902
3903 backend
3903 backend
3904 The storage backend to use.
3904 The storage backend to use.
3905 lfs
3905 lfs
3906 Repository will be created with ``lfs`` requirement. The lfs extension
3906 Repository will be created with ``lfs`` requirement. The lfs extension
3907 will automatically be loaded when the repository is accessed.
3907 will automatically be loaded when the repository is accessed.
3908 narrowfiles
3908 narrowfiles
3909 Set up repository to support narrow file storage.
3909 Set up repository to support narrow file storage.
3910 sharedrepo
3910 sharedrepo
3911 Repository object from which storage should be shared.
3911 Repository object from which storage should be shared.
3912 sharedrelative
3912 sharedrelative
3913 Boolean indicating if the path to the shared repo should be
3913 Boolean indicating if the path to the shared repo should be
3914 stored as relative. By default, the pointer to the "parent" repo
3914 stored as relative. By default, the pointer to the "parent" repo
3915 is stored as an absolute path.
3915 is stored as an absolute path.
3916 shareditems
3916 shareditems
3917 Set of items to share to the new repository (in addition to storage).
3917 Set of items to share to the new repository (in addition to storage).
3918 shallowfilestore
3918 shallowfilestore
3919 Indicates that storage for files should be shallow (not all ancestor
3919 Indicates that storage for files should be shallow (not all ancestor
3920 revisions are known).
3920 revisions are known).
3921 """
3921 """
3922
3922
3923 if requirements is not None:
3923 if requirements is not None:
3924 if createopts is not None:
3924 if createopts is not None:
3925 msg = b'cannot specify both createopts and requirements'
3925 msg = b'cannot specify both createopts and requirements'
3926 raise error.ProgrammingError(msg)
3926 raise error.ProgrammingError(msg)
3927 createopts = {}
3927 createopts = {}
3928 else:
3928 else:
3929 createopts = defaultcreateopts(ui, createopts=createopts)
3929 createopts = defaultcreateopts(ui, createopts=createopts)
3930
3930
3931 unknownopts = filterknowncreateopts(ui, createopts)
3931 unknownopts = filterknowncreateopts(ui, createopts)
3932
3932
3933 if not isinstance(unknownopts, dict):
3933 if not isinstance(unknownopts, dict):
3934 raise error.ProgrammingError(
3934 raise error.ProgrammingError(
3935 b'filterknowncreateopts() did not return a dict'
3935 b'filterknowncreateopts() did not return a dict'
3936 )
3936 )
3937
3937
3938 if unknownopts:
3938 if unknownopts:
3939 raise error.Abort(
3939 raise error.Abort(
3940 _(
3940 _(
3941 b'unable to create repository because of unknown '
3941 b'unable to create repository because of unknown '
3942 b'creation option: %s'
3942 b'creation option: %s'
3943 )
3943 )
3944 % b', '.join(sorted(unknownopts)),
3944 % b', '.join(sorted(unknownopts)),
3945 hint=_(b'is a required extension not loaded?'),
3945 hint=_(b'is a required extension not loaded?'),
3946 )
3946 )
3947
3947
3948 requirements = newreporequirements(ui, createopts=createopts)
3948 requirements = newreporequirements(ui, createopts=createopts)
3949 requirements -= checkrequirementscompat(ui, requirements)
3949 requirements -= checkrequirementscompat(ui, requirements)
3950
3950
3951 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3951 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3952
3952
3953 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3953 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3954 if hgvfs.exists():
3954 if hgvfs.exists():
3955 raise error.RepoError(_(b'repository %s already exists') % path)
3955 raise error.RepoError(_(b'repository %s already exists') % path)
3956
3956
3957 if b'sharedrepo' in createopts:
3957 if b'sharedrepo' in createopts:
3958 sharedpath = createopts[b'sharedrepo'].sharedpath
3958 sharedpath = createopts[b'sharedrepo'].sharedpath
3959
3959
3960 if createopts.get(b'sharedrelative'):
3960 if createopts.get(b'sharedrelative'):
3961 try:
3961 try:
3962 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3962 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3963 sharedpath = util.pconvert(sharedpath)
3963 sharedpath = util.pconvert(sharedpath)
3964 except (IOError, ValueError) as e:
3964 except (IOError, ValueError) as e:
3965 # ValueError is raised on Windows if the drive letters differ
3965 # ValueError is raised on Windows if the drive letters differ
3966 # on each path.
3966 # on each path.
3967 raise error.Abort(
3967 raise error.Abort(
3968 _(b'cannot calculate relative path'),
3968 _(b'cannot calculate relative path'),
3969 hint=stringutil.forcebytestr(e),
3969 hint=stringutil.forcebytestr(e),
3970 )
3970 )
3971
3971
3972 if not wdirvfs.exists():
3972 if not wdirvfs.exists():
3973 wdirvfs.makedirs()
3973 wdirvfs.makedirs()
3974
3974
3975 hgvfs.makedir(notindexed=True)
3975 hgvfs.makedir(notindexed=True)
3976 if b'sharedrepo' not in createopts:
3976 if b'sharedrepo' not in createopts:
3977 hgvfs.mkdir(b'cache')
3977 hgvfs.mkdir(b'cache')
3978 hgvfs.mkdir(b'wcache')
3978 hgvfs.mkdir(b'wcache')
3979
3979
3980 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3980 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3981 if has_store and b'sharedrepo' not in createopts:
3981 if has_store and b'sharedrepo' not in createopts:
3982 hgvfs.mkdir(b'store')
3982 hgvfs.mkdir(b'store')
3983
3983
3984 # We create an invalid changelog outside the store so very old
3984 # We create an invalid changelog outside the store so very old
3985 # Mercurial versions (which didn't know about the requirements
3985 # Mercurial versions (which didn't know about the requirements
3986 # file) encounter an error on reading the changelog. This
3986 # file) encounter an error on reading the changelog. This
3987 # effectively locks out old clients and prevents them from
3987 # effectively locks out old clients and prevents them from
3988 # mucking with a repo in an unknown format.
3988 # mucking with a repo in an unknown format.
3989 #
3989 #
3990 # The revlog header has version 65535, which won't be recognized by
3990 # The revlog header has version 65535, which won't be recognized by
3991 # such old clients.
3991 # such old clients.
3992 hgvfs.append(
3992 hgvfs.append(
3993 b'00changelog.i',
3993 b'00changelog.i',
3994 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3994 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3995 b'layout',
3995 b'layout',
3996 )
3996 )
3997
3997
3998 # Filter the requirements into working copy and store ones
3998 # Filter the requirements into working copy and store ones
3999 wcreq, storereq = scmutil.filterrequirements(requirements)
3999 wcreq, storereq = scmutil.filterrequirements(requirements)
4000 # write working copy ones
4000 # write working copy ones
4001 scmutil.writerequires(hgvfs, wcreq)
4001 scmutil.writerequires(hgvfs, wcreq)
4002 # If there are store requirements and the current repository
4002 # If there are store requirements and the current repository
4003 # is not a shared one, write stored requirements
4003 # is not a shared one, write stored requirements
4004 # For new shared repository, we don't need to write the store
4004 # For new shared repository, we don't need to write the store
4005 # requirements as they are already present in store requires
4005 # requirements as they are already present in store requires
4006 if storereq and b'sharedrepo' not in createopts:
4006 if storereq and b'sharedrepo' not in createopts:
4007 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
4007 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
4008 scmutil.writerequires(storevfs, storereq)
4008 scmutil.writerequires(storevfs, storereq)
4009
4009
4010 # Write out file telling readers where to find the shared store.
4010 # Write out file telling readers where to find the shared store.
4011 if b'sharedrepo' in createopts:
4011 if b'sharedrepo' in createopts:
4012 hgvfs.write(b'sharedpath', sharedpath)
4012 hgvfs.write(b'sharedpath', sharedpath)
4013
4013
4014 if createopts.get(b'shareditems'):
4014 if createopts.get(b'shareditems'):
4015 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
4015 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
4016 hgvfs.write(b'shared', shared)
4016 hgvfs.write(b'shared', shared)
4017
4017
4018
4018
4019 def poisonrepository(repo):
4019 def poisonrepository(repo):
4020 """Poison a repository instance so it can no longer be used."""
4020 """Poison a repository instance so it can no longer be used."""
4021 # Perform any cleanup on the instance.
4021 # Perform any cleanup on the instance.
4022 repo.close()
4022 repo.close()
4023
4023
4024 # Our strategy is to replace the type of the object with one that
4024 # Our strategy is to replace the type of the object with one that
4025 # has all attribute lookups result in error.
4025 # has all attribute lookups result in error.
4026 #
4026 #
4027 # But we have to allow the close() method because some constructors
4027 # But we have to allow the close() method because some constructors
4028 # of repos call close() on repo references.
4028 # of repos call close() on repo references.
4029 class poisonedrepository:
4029 class poisonedrepository:
4030 def __getattribute__(self, item):
4030 def __getattribute__(self, item):
4031 if item == 'close':
4031 if item == 'close':
4032 return object.__getattribute__(self, item)
4032 return object.__getattribute__(self, item)
4033
4033
4034 raise error.ProgrammingError(
4034 raise error.ProgrammingError(
4035 b'repo instances should not be used after unshare'
4035 b'repo instances should not be used after unshare'
4036 )
4036 )
4037
4037
4038 def close(self):
4038 def close(self):
4039 pass
4039 pass
4040
4040
4041 # We may have a repoview, which intercepts __setattr__. So be sure
4041 # We may have a repoview, which intercepts __setattr__. So be sure
4042 # we operate at the lowest level possible.
4042 # we operate at the lowest level possible.
4043 object.__setattr__(repo, '__class__', poisonedrepository)
4043 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,666 +1,665
1 # wireprotov1peer.py - Client-side functionality for wire protocol version 1.
1 # wireprotov1peer.py - Client-side functionality for wire protocol version 1.
2 #
2 #
3 # Copyright 2005-2010 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2010 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import annotations
8 from __future__ import annotations
9
9
10 import sys
10 import sys
11 import weakref
11 import weakref
12
12
13 from concurrent import futures
13 from concurrent import futures
14 from .i18n import _
14 from .i18n import _
15 from .node import bin
15 from .node import bin
16 from . import (
16 from . import (
17 bundle2,
17 bundle2,
18 changegroup as changegroupmod,
18 changegroup as changegroupmod,
19 encoding,
19 encoding,
20 error,
20 error,
21 pushkey as pushkeymod,
21 pushkey as pushkeymod,
22 pycompat,
22 pycompat,
23 util,
23 util,
24 wireprototypes,
24 wireprototypes,
25 )
25 )
26 from .interfaces import (
26 from .interfaces import (
27 repository,
27 repository,
28 util as interfaceutil,
28 util as interfaceutil,
29 )
29 )
30 from .utils import hashutil
30 from .utils import hashutil
31
31
32 urlreq = util.urlreq
32 urlreq = util.urlreq
33
33
34
34
35 def batchable(f):
35 def batchable(f):
36 """annotation for batchable methods
36 """annotation for batchable methods
37
37
38 Such methods must implement a coroutine as follows:
38 Such methods must implement a coroutine as follows:
39
39
40 @batchable
40 @batchable
41 def sample(self, one, two=None):
41 def sample(self, one, two=None):
42 # Build list of encoded arguments suitable for your wire protocol:
42 # Build list of encoded arguments suitable for your wire protocol:
43 encoded_args = [('one', encode(one),), ('two', encode(two),)]
43 encoded_args = [('one', encode(one),), ('two', encode(two),)]
44 # Return it, along with a function that will receive the result
44 # Return it, along with a function that will receive the result
45 # from the batched request.
45 # from the batched request.
46 return encoded_args, decode
46 return encoded_args, decode
47
47
48 The decorator returns a function which wraps this coroutine as a plain
48 The decorator returns a function which wraps this coroutine as a plain
49 method, but adds the original method as an attribute called "batchable",
49 method, but adds the original method as an attribute called "batchable",
50 which is used by remotebatch to split the call into separate encoding and
50 which is used by remotebatch to split the call into separate encoding and
51 decoding phases.
51 decoding phases.
52 """
52 """
53
53
54 def plain(*args, **opts):
54 def plain(*args, **opts):
55 encoded_args_or_res, decode = f(*args, **opts)
55 encoded_args_or_res, decode = f(*args, **opts)
56 if not decode:
56 if not decode:
57 return encoded_args_or_res # a local result in this case
57 return encoded_args_or_res # a local result in this case
58 self = args[0]
58 self = args[0]
59 cmd = pycompat.bytesurl(f.__name__) # ensure cmd is ascii bytestr
59 cmd = pycompat.bytesurl(f.__name__) # ensure cmd is ascii bytestr
60 encoded_res = self._submitone(cmd, encoded_args_or_res)
60 encoded_res = self._submitone(cmd, encoded_args_or_res)
61 return decode(encoded_res)
61 return decode(encoded_res)
62
62
63 setattr(plain, 'batchable', f)
63 setattr(plain, 'batchable', f)
64 setattr(plain, '__name__', f.__name__)
64 setattr(plain, '__name__', f.__name__)
65 return plain
65 return plain
66
66
67
67
68 def encodebatchcmds(req):
68 def encodebatchcmds(req):
69 """Return a ``cmds`` argument value for the ``batch`` command."""
69 """Return a ``cmds`` argument value for the ``batch`` command."""
70 escapearg = wireprototypes.escapebatcharg
70 escapearg = wireprototypes.escapebatcharg
71
71
72 cmds = []
72 cmds = []
73 for op, argsdict in req:
73 for op, argsdict in req:
74 # Old servers didn't properly unescape argument names. So prevent
74 # Old servers didn't properly unescape argument names. So prevent
75 # the sending of argument names that may not be decoded properly by
75 # the sending of argument names that may not be decoded properly by
76 # servers.
76 # servers.
77 assert all(escapearg(k) == k for k in argsdict)
77 assert all(escapearg(k) == k for k in argsdict)
78
78
79 args = b','.join(
79 args = b','.join(
80 b'%s=%s' % (escapearg(k), escapearg(v)) for k, v in argsdict.items()
80 b'%s=%s' % (escapearg(k), escapearg(v)) for k, v in argsdict.items()
81 )
81 )
82 cmds.append(b'%s %s' % (op, args))
82 cmds.append(b'%s %s' % (op, args))
83
83
84 return b';'.join(cmds)
84 return b';'.join(cmds)
85
85
86
86
87 class unsentfuture(futures.Future):
87 class unsentfuture(futures.Future):
88 """A Future variation to represent an unsent command.
88 """A Future variation to represent an unsent command.
89
89
90 Because we buffer commands and don't submit them immediately, calling
90 Because we buffer commands and don't submit them immediately, calling
91 ``result()`` on an unsent future could deadlock. Futures for buffered
91 ``result()`` on an unsent future could deadlock. Futures for buffered
92 commands are represented by this type, which wraps ``result()`` to
92 commands are represented by this type, which wraps ``result()`` to
93 call ``sendcommands()``.
93 call ``sendcommands()``.
94 """
94 """
95
95
96 _peerexecutor: "peerexecutor"
96 _peerexecutor: "peerexecutor"
97
97
98 def result(self, timeout=None):
98 def result(self, timeout=None):
99 if self.done():
99 if self.done():
100 return futures.Future.result(self, timeout)
100 return futures.Future.result(self, timeout)
101
101
102 self._peerexecutor.sendcommands()
102 self._peerexecutor.sendcommands()
103
103
104 # This looks like it will infinitely recurse. However,
104 # This looks like it will infinitely recurse. However,
105 # sendcommands() should modify __class__. This call serves as a check
105 # sendcommands() should modify __class__. This call serves as a check
106 # on that.
106 # on that.
107 return self.result(timeout)
107 return self.result(timeout)
108
108
109
109
110 # @interfaceutil.implementer(repository.ipeercommandexecutor)
110 class peerexecutor(repository.ipeercommandexecutor):
111 class peerexecutor:
112 def __init__(self, peer):
111 def __init__(self, peer):
113 self._peer = peer
112 self._peer = peer
114 self._sent = False
113 self._sent = False
115 self._closed = False
114 self._closed = False
116 self._calls = []
115 self._calls = []
117 self._futures = weakref.WeakSet()
116 self._futures = weakref.WeakSet()
118 self._responseexecutor = None
117 self._responseexecutor = None
119 self._responsef = None
118 self._responsef = None
120
119
121 def __enter__(self):
120 def __enter__(self):
122 return self
121 return self
123
122
124 def __exit__(self, exctype, excvalee, exctb):
123 def __exit__(self, exctype, excvalee, exctb):
125 self.close()
124 self.close()
126
125
127 def callcommand(self, command, args):
126 def callcommand(self, command, args):
128 if self._sent:
127 if self._sent:
129 raise error.ProgrammingError(
128 raise error.ProgrammingError(
130 b'callcommand() cannot be used after commands are sent'
129 b'callcommand() cannot be used after commands are sent'
131 )
130 )
132
131
133 if self._closed:
132 if self._closed:
134 raise error.ProgrammingError(
133 raise error.ProgrammingError(
135 b'callcommand() cannot be used after close()'
134 b'callcommand() cannot be used after close()'
136 )
135 )
137
136
138 # Commands are dispatched through methods on the peer.
137 # Commands are dispatched through methods on the peer.
139 fn = getattr(self._peer, pycompat.sysstr(command), None)
138 fn = getattr(self._peer, pycompat.sysstr(command), None)
140
139
141 if not fn:
140 if not fn:
142 raise error.ProgrammingError(
141 raise error.ProgrammingError(
143 b'cannot call command %s: method of same name not available '
142 b'cannot call command %s: method of same name not available '
144 b'on peer' % command
143 b'on peer' % command
145 )
144 )
146
145
147 # Commands are either batchable or they aren't. If a command
146 # Commands are either batchable or they aren't. If a command
148 # isn't batchable, we send it immediately because the executor
147 # isn't batchable, we send it immediately because the executor
149 # can no longer accept new commands after a non-batchable command.
148 # can no longer accept new commands after a non-batchable command.
150 # If a command is batchable, we queue it for later. But we have
149 # If a command is batchable, we queue it for later. But we have
151 # to account for the case of a non-batchable command arriving after
150 # to account for the case of a non-batchable command arriving after
152 # a batchable one and refuse to service it.
151 # a batchable one and refuse to service it.
153
152
154 def addcall():
153 def addcall():
155 f = futures.Future()
154 f = futures.Future()
156 self._futures.add(f)
155 self._futures.add(f)
157 self._calls.append((command, args, fn, f))
156 self._calls.append((command, args, fn, f))
158 return f
157 return f
159
158
160 if getattr(fn, 'batchable', False):
159 if getattr(fn, 'batchable', False):
161 f = addcall()
160 f = addcall()
162
161
163 # But since we don't issue it immediately, we wrap its result()
162 # But since we don't issue it immediately, we wrap its result()
164 # to trigger sending so we avoid deadlocks.
163 # to trigger sending so we avoid deadlocks.
165 f.__class__ = unsentfuture
164 f.__class__ = unsentfuture
166 f._peerexecutor = self
165 f._peerexecutor = self
167 else:
166 else:
168 if self._calls:
167 if self._calls:
169 raise error.ProgrammingError(
168 raise error.ProgrammingError(
170 b'%s is not batchable and cannot be called on a command '
169 b'%s is not batchable and cannot be called on a command '
171 b'executor along with other commands' % command
170 b'executor along with other commands' % command
172 )
171 )
173
172
174 f = addcall()
173 f = addcall()
175
174
176 # Non-batchable commands can never coexist with another command
175 # Non-batchable commands can never coexist with another command
177 # in this executor. So send the command immediately.
176 # in this executor. So send the command immediately.
178 self.sendcommands()
177 self.sendcommands()
179
178
180 return f
179 return f
181
180
182 def sendcommands(self):
181 def sendcommands(self):
183 if self._sent:
182 if self._sent:
184 return
183 return
185
184
186 if not self._calls:
185 if not self._calls:
187 return
186 return
188
187
189 self._sent = True
188 self._sent = True
190
189
191 # Unhack any future types so caller seens a clean type and to break
190 # Unhack any future types so caller seens a clean type and to break
192 # cycle between us and futures.
191 # cycle between us and futures.
193 for f in self._futures:
192 for f in self._futures:
194 if isinstance(f, unsentfuture):
193 if isinstance(f, unsentfuture):
195 f.__class__ = futures.Future
194 f.__class__ = futures.Future
196 f._peerexecutor = None
195 f._peerexecutor = None
197
196
198 calls = self._calls
197 calls = self._calls
199 # Mainly to destroy references to futures.
198 # Mainly to destroy references to futures.
200 self._calls = None
199 self._calls = None
201
200
202 # Simple case of a single command. We call it synchronously.
201 # Simple case of a single command. We call it synchronously.
203 if len(calls) == 1:
202 if len(calls) == 1:
204 command, args, fn, f = calls[0]
203 command, args, fn, f = calls[0]
205
204
206 # Future was cancelled. Ignore it.
205 # Future was cancelled. Ignore it.
207 if not f.set_running_or_notify_cancel():
206 if not f.set_running_or_notify_cancel():
208 return
207 return
209
208
210 try:
209 try:
211 result = fn(**pycompat.strkwargs(args))
210 result = fn(**pycompat.strkwargs(args))
212 except Exception:
211 except Exception:
213 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
212 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
214 else:
213 else:
215 f.set_result(result)
214 f.set_result(result)
216
215
217 return
216 return
218
217
219 # Batch commands are a bit harder. First, we have to deal with the
218 # Batch commands are a bit harder. First, we have to deal with the
220 # @batchable coroutine. That's a bit annoying. Furthermore, we also
219 # @batchable coroutine. That's a bit annoying. Furthermore, we also
221 # need to preserve streaming. i.e. it should be possible for the
220 # need to preserve streaming. i.e. it should be possible for the
222 # futures to resolve as data is coming in off the wire without having
221 # futures to resolve as data is coming in off the wire without having
223 # to wait for the final byte of the final response. We do this by
222 # to wait for the final byte of the final response. We do this by
224 # spinning up a thread to read the responses.
223 # spinning up a thread to read the responses.
225
224
226 requests = []
225 requests = []
227 states = []
226 states = []
228
227
229 for command, args, fn, f in calls:
228 for command, args, fn, f in calls:
230 # Future was cancelled. Ignore it.
229 # Future was cancelled. Ignore it.
231 if not f.set_running_or_notify_cancel():
230 if not f.set_running_or_notify_cancel():
232 continue
231 continue
233
232
234 try:
233 try:
235 encoded_args_or_res, decode = fn.batchable(
234 encoded_args_or_res, decode = fn.batchable(
236 fn.__self__, **pycompat.strkwargs(args)
235 fn.__self__, **pycompat.strkwargs(args)
237 )
236 )
238 except Exception:
237 except Exception:
239 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
238 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
240 return
239 return
241
240
242 if not decode:
241 if not decode:
243 f.set_result(encoded_args_or_res)
242 f.set_result(encoded_args_or_res)
244 else:
243 else:
245 requests.append((command, encoded_args_or_res))
244 requests.append((command, encoded_args_or_res))
246 states.append((command, f, batchable, decode))
245 states.append((command, f, batchable, decode))
247
246
248 if not requests:
247 if not requests:
249 return
248 return
250
249
251 # This will emit responses in order they were executed.
250 # This will emit responses in order they were executed.
252 wireresults = self._peer._submitbatch(requests)
251 wireresults = self._peer._submitbatch(requests)
253
252
254 # The use of a thread pool executor here is a bit weird for something
253 # The use of a thread pool executor here is a bit weird for something
255 # that only spins up a single thread. However, thread management is
254 # that only spins up a single thread. However, thread management is
256 # hard and it is easy to encounter race conditions, deadlocks, etc.
255 # hard and it is easy to encounter race conditions, deadlocks, etc.
257 # concurrent.futures already solves these problems and its thread pool
256 # concurrent.futures already solves these problems and its thread pool
258 # executor has minimal overhead. So we use it.
257 # executor has minimal overhead. So we use it.
259 self._responseexecutor = futures.ThreadPoolExecutor(1)
258 self._responseexecutor = futures.ThreadPoolExecutor(1)
260 self._responsef = self._responseexecutor.submit(
259 self._responsef = self._responseexecutor.submit(
261 self._readbatchresponse, states, wireresults
260 self._readbatchresponse, states, wireresults
262 )
261 )
263
262
264 def close(self):
263 def close(self):
265 self.sendcommands()
264 self.sendcommands()
266
265
267 if self._closed:
266 if self._closed:
268 return
267 return
269
268
270 self._closed = True
269 self._closed = True
271
270
272 if not self._responsef:
271 if not self._responsef:
273 return
272 return
274
273
275 # We need to wait on our in-flight response and then shut down the
274 # We need to wait on our in-flight response and then shut down the
276 # executor once we have a result.
275 # executor once we have a result.
277 try:
276 try:
278 self._responsef.result()
277 self._responsef.result()
279 finally:
278 finally:
280 # Help pytype- this is initialized by self.sendcommands(), called
279 # Help pytype- this is initialized by self.sendcommands(), called
281 # above.
280 # above.
282 assert self._responseexecutor is not None
281 assert self._responseexecutor is not None
283 self._responseexecutor.shutdown(wait=True)
282 self._responseexecutor.shutdown(wait=True)
284 self._responsef = None
283 self._responsef = None
285 self._responseexecutor = None
284 self._responseexecutor = None
286
285
287 # If any of our futures are still in progress, mark them as
286 # If any of our futures are still in progress, mark them as
288 # errored. Otherwise a result() could wait indefinitely.
287 # errored. Otherwise a result() could wait indefinitely.
289 for f in self._futures:
288 for f in self._futures:
290 if not f.done():
289 if not f.done():
291 f.set_exception(
290 f.set_exception(
292 error.ResponseError(
291 error.ResponseError(
293 _(b'unfulfilled batch command response'), None
292 _(b'unfulfilled batch command response'), None
294 )
293 )
295 )
294 )
296
295
297 self._futures = None
296 self._futures = None
298
297
299 def _readbatchresponse(self, states, wireresults):
298 def _readbatchresponse(self, states, wireresults):
300 # Executes in a thread to read data off the wire.
299 # Executes in a thread to read data off the wire.
301
300
302 for command, f, batchable, decode in states:
301 for command, f, batchable, decode in states:
303 # Grab raw result off the wire and teach the internal future
302 # Grab raw result off the wire and teach the internal future
304 # about it.
303 # about it.
305 try:
304 try:
306 remoteresult = next(wireresults)
305 remoteresult = next(wireresults)
307 except StopIteration:
306 except StopIteration:
308 # This can happen in particular because next(batchable)
307 # This can happen in particular because next(batchable)
309 # in the previous iteration can call peer._abort, which
308 # in the previous iteration can call peer._abort, which
310 # may close the peer.
309 # may close the peer.
311 f.set_exception(
310 f.set_exception(
312 error.ResponseError(
311 error.ResponseError(
313 _(b'unfulfilled batch command response'), None
312 _(b'unfulfilled batch command response'), None
314 )
313 )
315 )
314 )
316 else:
315 else:
317 try:
316 try:
318 result = decode(remoteresult)
317 result = decode(remoteresult)
319 except Exception:
318 except Exception:
320 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
319 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
321 else:
320 else:
322 f.set_result(result)
321 f.set_result(result)
323
322
324
323
325 @interfaceutil.implementer(
324 @interfaceutil.implementer(
326 repository.ipeercommands, repository.ipeerlegacycommands
325 repository.ipeercommands, repository.ipeerlegacycommands
327 )
326 )
328 class wirepeer(repository.peer):
327 class wirepeer(repository.peer):
329 """Client-side interface for communicating with a peer repository.
328 """Client-side interface for communicating with a peer repository.
330
329
331 Methods commonly call wire protocol commands of the same name.
330 Methods commonly call wire protocol commands of the same name.
332
331
333 See also httppeer.py and sshpeer.py for protocol-specific
332 See also httppeer.py and sshpeer.py for protocol-specific
334 implementations of this interface.
333 implementations of this interface.
335 """
334 """
336
335
337 def commandexecutor(self):
336 def commandexecutor(self):
338 return peerexecutor(self)
337 return peerexecutor(self)
339
338
340 # Begin of ipeercommands interface.
339 # Begin of ipeercommands interface.
341
340
342 def clonebundles(self):
341 def clonebundles(self):
343 if self.capable(b'clonebundles_manifest'):
342 if self.capable(b'clonebundles_manifest'):
344 return self._call(b'clonebundles_manifest')
343 return self._call(b'clonebundles_manifest')
345 else:
344 else:
346 self.requirecap(b'clonebundles', _(b'clone bundles'))
345 self.requirecap(b'clonebundles', _(b'clone bundles'))
347 return self._call(b'clonebundles')
346 return self._call(b'clonebundles')
348
347
349 def _finish_inline_clone_bundle(self, stream):
348 def _finish_inline_clone_bundle(self, stream):
350 pass # allow override for httppeer
349 pass # allow override for httppeer
351
350
352 def get_cached_bundle_inline(self, path):
351 def get_cached_bundle_inline(self, path):
353 stream = self._callstream(b"get_cached_bundle_inline", path=path)
352 stream = self._callstream(b"get_cached_bundle_inline", path=path)
354 length = util.uvarintdecodestream(stream)
353 length = util.uvarintdecodestream(stream)
355
354
356 # SSH streams will block if reading more than length
355 # SSH streams will block if reading more than length
357 for chunk in util.filechunkiter(stream, limit=length):
356 for chunk in util.filechunkiter(stream, limit=length):
358 yield chunk
357 yield chunk
359
358
360 self._finish_inline_clone_bundle(stream)
359 self._finish_inline_clone_bundle(stream)
361
360
362 @batchable
361 @batchable
363 def lookup(self, key):
362 def lookup(self, key):
364 self.requirecap(b'lookup', _(b'look up remote revision'))
363 self.requirecap(b'lookup', _(b'look up remote revision'))
365
364
366 def decode(d):
365 def decode(d):
367 success, data = d[:-1].split(b" ", 1)
366 success, data = d[:-1].split(b" ", 1)
368 if int(success):
367 if int(success):
369 return bin(data)
368 return bin(data)
370 else:
369 else:
371 self._abort(error.RepoError(data))
370 self._abort(error.RepoError(data))
372
371
373 return {b'key': encoding.fromlocal(key)}, decode
372 return {b'key': encoding.fromlocal(key)}, decode
374
373
375 @batchable
374 @batchable
376 def heads(self):
375 def heads(self):
377 def decode(d):
376 def decode(d):
378 try:
377 try:
379 return wireprototypes.decodelist(d[:-1])
378 return wireprototypes.decodelist(d[:-1])
380 except ValueError:
379 except ValueError:
381 self._abort(error.ResponseError(_(b"unexpected response:"), d))
380 self._abort(error.ResponseError(_(b"unexpected response:"), d))
382
381
383 return {}, decode
382 return {}, decode
384
383
385 @batchable
384 @batchable
386 def known(self, nodes):
385 def known(self, nodes):
387 def decode(d):
386 def decode(d):
388 try:
387 try:
389 return [bool(int(b)) for b in pycompat.iterbytestr(d)]
388 return [bool(int(b)) for b in pycompat.iterbytestr(d)]
390 except ValueError:
389 except ValueError:
391 self._abort(error.ResponseError(_(b"unexpected response:"), d))
390 self._abort(error.ResponseError(_(b"unexpected response:"), d))
392
391
393 return {b'nodes': wireprototypes.encodelist(nodes)}, decode
392 return {b'nodes': wireprototypes.encodelist(nodes)}, decode
394
393
395 @batchable
394 @batchable
396 def branchmap(self):
395 def branchmap(self):
397 def decode(d):
396 def decode(d):
398 try:
397 try:
399 branchmap = {}
398 branchmap = {}
400 for branchpart in d.splitlines():
399 for branchpart in d.splitlines():
401 branchname, branchheads = branchpart.split(b' ', 1)
400 branchname, branchheads = branchpart.split(b' ', 1)
402 branchname = encoding.tolocal(urlreq.unquote(branchname))
401 branchname = encoding.tolocal(urlreq.unquote(branchname))
403 branchheads = wireprototypes.decodelist(branchheads)
402 branchheads = wireprototypes.decodelist(branchheads)
404 branchmap[branchname] = branchheads
403 branchmap[branchname] = branchheads
405 return branchmap
404 return branchmap
406 except TypeError:
405 except TypeError:
407 self._abort(error.ResponseError(_(b"unexpected response:"), d))
406 self._abort(error.ResponseError(_(b"unexpected response:"), d))
408
407
409 return {}, decode
408 return {}, decode
410
409
411 @batchable
410 @batchable
412 def listkeys(self, namespace):
411 def listkeys(self, namespace):
413 if not self.capable(b'pushkey'):
412 if not self.capable(b'pushkey'):
414 return {}, None
413 return {}, None
415 self.ui.debug(b'preparing listkeys for "%s"\n' % namespace)
414 self.ui.debug(b'preparing listkeys for "%s"\n' % namespace)
416
415
417 def decode(d):
416 def decode(d):
418 self.ui.debug(
417 self.ui.debug(
419 b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
418 b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
420 )
419 )
421 return pushkeymod.decodekeys(d)
420 return pushkeymod.decodekeys(d)
422
421
423 return {b'namespace': encoding.fromlocal(namespace)}, decode
422 return {b'namespace': encoding.fromlocal(namespace)}, decode
424
423
425 @batchable
424 @batchable
426 def pushkey(self, namespace, key, old, new):
425 def pushkey(self, namespace, key, old, new):
427 if not self.capable(b'pushkey'):
426 if not self.capable(b'pushkey'):
428 return False, None
427 return False, None
429 self.ui.debug(b'preparing pushkey for "%s:%s"\n' % (namespace, key))
428 self.ui.debug(b'preparing pushkey for "%s:%s"\n' % (namespace, key))
430
429
431 def decode(d):
430 def decode(d):
432 d, output = d.split(b'\n', 1)
431 d, output = d.split(b'\n', 1)
433 try:
432 try:
434 d = bool(int(d))
433 d = bool(int(d))
435 except ValueError:
434 except ValueError:
436 raise error.ResponseError(
435 raise error.ResponseError(
437 _(b'push failed (unexpected response):'), d
436 _(b'push failed (unexpected response):'), d
438 )
437 )
439 for l in output.splitlines(True):
438 for l in output.splitlines(True):
440 self.ui.status(_(b'remote: '), l)
439 self.ui.status(_(b'remote: '), l)
441 return d
440 return d
442
441
443 return {
442 return {
444 b'namespace': encoding.fromlocal(namespace),
443 b'namespace': encoding.fromlocal(namespace),
445 b'key': encoding.fromlocal(key),
444 b'key': encoding.fromlocal(key),
446 b'old': encoding.fromlocal(old),
445 b'old': encoding.fromlocal(old),
447 b'new': encoding.fromlocal(new),
446 b'new': encoding.fromlocal(new),
448 }, decode
447 }, decode
449
448
450 def stream_out(self):
449 def stream_out(self):
451 return self._callstream(b'stream_out')
450 return self._callstream(b'stream_out')
452
451
453 def getbundle(self, source, **kwargs):
452 def getbundle(self, source, **kwargs):
454 kwargs = pycompat.byteskwargs(kwargs)
453 kwargs = pycompat.byteskwargs(kwargs)
455 self.requirecap(b'getbundle', _(b'look up remote changes'))
454 self.requirecap(b'getbundle', _(b'look up remote changes'))
456 opts = {}
455 opts = {}
457 bundlecaps = kwargs.get(b'bundlecaps') or set()
456 bundlecaps = kwargs.get(b'bundlecaps') or set()
458 for key, value in kwargs.items():
457 for key, value in kwargs.items():
459 if value is None:
458 if value is None:
460 continue
459 continue
461 keytype = wireprototypes.GETBUNDLE_ARGUMENTS.get(key)
460 keytype = wireprototypes.GETBUNDLE_ARGUMENTS.get(key)
462 if keytype is None:
461 if keytype is None:
463 raise error.ProgrammingError(
462 raise error.ProgrammingError(
464 b'Unexpectedly None keytype for key %s' % key
463 b'Unexpectedly None keytype for key %s' % key
465 )
464 )
466 elif keytype == b'nodes':
465 elif keytype == b'nodes':
467 value = wireprototypes.encodelist(value)
466 value = wireprototypes.encodelist(value)
468 elif keytype == b'csv':
467 elif keytype == b'csv':
469 value = b','.join(value)
468 value = b','.join(value)
470 elif keytype == b'scsv':
469 elif keytype == b'scsv':
471 value = b','.join(sorted(value))
470 value = b','.join(sorted(value))
472 elif keytype == b'boolean':
471 elif keytype == b'boolean':
473 value = b'%i' % bool(value)
472 value = b'%i' % bool(value)
474 elif keytype != b'plain':
473 elif keytype != b'plain':
475 raise KeyError(b'unknown getbundle option type %s' % keytype)
474 raise KeyError(b'unknown getbundle option type %s' % keytype)
476 opts[key] = value
475 opts[key] = value
477 f = self._callcompressable(b"getbundle", **pycompat.strkwargs(opts))
476 f = self._callcompressable(b"getbundle", **pycompat.strkwargs(opts))
478 if any((cap.startswith(b'HG2') for cap in bundlecaps)):
477 if any((cap.startswith(b'HG2') for cap in bundlecaps)):
479 return bundle2.getunbundler(self.ui, f)
478 return bundle2.getunbundler(self.ui, f)
480 else:
479 else:
481 return changegroupmod.cg1unpacker(f, b'UN')
480 return changegroupmod.cg1unpacker(f, b'UN')
482
481
483 def unbundle(self, bundle, heads, url):
482 def unbundle(self, bundle, heads, url):
484 """Send cg (a readable file-like object representing the
483 """Send cg (a readable file-like object representing the
485 changegroup to push, typically a chunkbuffer object) to the
484 changegroup to push, typically a chunkbuffer object) to the
486 remote server as a bundle.
485 remote server as a bundle.
487
486
488 When pushing a bundle10 stream, return an integer indicating the
487 When pushing a bundle10 stream, return an integer indicating the
489 result of the push (see changegroup.apply()).
488 result of the push (see changegroup.apply()).
490
489
491 When pushing a bundle20 stream, return a bundle20 stream.
490 When pushing a bundle20 stream, return a bundle20 stream.
492
491
493 `url` is the url the client thinks it's pushing to, which is
492 `url` is the url the client thinks it's pushing to, which is
494 visible to hooks.
493 visible to hooks.
495 """
494 """
496
495
497 if heads != [b'force'] and self.capable(b'unbundlehash'):
496 if heads != [b'force'] and self.capable(b'unbundlehash'):
498 heads = wireprototypes.encodelist(
497 heads = wireprototypes.encodelist(
499 [b'hashed', hashutil.sha1(b''.join(sorted(heads))).digest()]
498 [b'hashed', hashutil.sha1(b''.join(sorted(heads))).digest()]
500 )
499 )
501 else:
500 else:
502 heads = wireprototypes.encodelist(heads)
501 heads = wireprototypes.encodelist(heads)
503
502
504 if hasattr(bundle, 'deltaheader'):
503 if hasattr(bundle, 'deltaheader'):
505 # this a bundle10, do the old style call sequence
504 # this a bundle10, do the old style call sequence
506 ret, output = self._callpush(b"unbundle", bundle, heads=heads)
505 ret, output = self._callpush(b"unbundle", bundle, heads=heads)
507 if ret == b"":
506 if ret == b"":
508 raise error.ResponseError(_(b'push failed:'), output)
507 raise error.ResponseError(_(b'push failed:'), output)
509 try:
508 try:
510 ret = int(ret)
509 ret = int(ret)
511 except ValueError:
510 except ValueError:
512 raise error.ResponseError(
511 raise error.ResponseError(
513 _(b'push failed (unexpected response):'), ret
512 _(b'push failed (unexpected response):'), ret
514 )
513 )
515
514
516 for l in output.splitlines(True):
515 for l in output.splitlines(True):
517 self.ui.status(_(b'remote: '), l)
516 self.ui.status(_(b'remote: '), l)
518 else:
517 else:
519 # bundle2 push. Send a stream, fetch a stream.
518 # bundle2 push. Send a stream, fetch a stream.
520 stream = self._calltwowaystream(b'unbundle', bundle, heads=heads)
519 stream = self._calltwowaystream(b'unbundle', bundle, heads=heads)
521 ret = bundle2.getunbundler(self.ui, stream)
520 ret = bundle2.getunbundler(self.ui, stream)
522 return ret
521 return ret
523
522
524 # End of ipeercommands interface.
523 # End of ipeercommands interface.
525
524
526 # Begin of ipeerlegacycommands interface.
525 # Begin of ipeerlegacycommands interface.
527
526
528 def branches(self, nodes):
527 def branches(self, nodes):
529 n = wireprototypes.encodelist(nodes)
528 n = wireprototypes.encodelist(nodes)
530 d = self._call(b"branches", nodes=n)
529 d = self._call(b"branches", nodes=n)
531 try:
530 try:
532 br = [tuple(wireprototypes.decodelist(b)) for b in d.splitlines()]
531 br = [tuple(wireprototypes.decodelist(b)) for b in d.splitlines()]
533 return br
532 return br
534 except ValueError:
533 except ValueError:
535 self._abort(error.ResponseError(_(b"unexpected response:"), d))
534 self._abort(error.ResponseError(_(b"unexpected response:"), d))
536
535
537 def between(self, pairs):
536 def between(self, pairs):
538 batch = 8 # avoid giant requests
537 batch = 8 # avoid giant requests
539 r = []
538 r = []
540 for i in range(0, len(pairs), batch):
539 for i in range(0, len(pairs), batch):
541 n = b" ".join(
540 n = b" ".join(
542 [
541 [
543 wireprototypes.encodelist(p, b'-')
542 wireprototypes.encodelist(p, b'-')
544 for p in pairs[i : i + batch]
543 for p in pairs[i : i + batch]
545 ]
544 ]
546 )
545 )
547 d = self._call(b"between", pairs=n)
546 d = self._call(b"between", pairs=n)
548 try:
547 try:
549 r.extend(
548 r.extend(
550 l and wireprototypes.decodelist(l) or []
549 l and wireprototypes.decodelist(l) or []
551 for l in d.splitlines()
550 for l in d.splitlines()
552 )
551 )
553 except ValueError:
552 except ValueError:
554 self._abort(error.ResponseError(_(b"unexpected response:"), d))
553 self._abort(error.ResponseError(_(b"unexpected response:"), d))
555 return r
554 return r
556
555
557 def changegroup(self, nodes, source):
556 def changegroup(self, nodes, source):
558 n = wireprototypes.encodelist(nodes)
557 n = wireprototypes.encodelist(nodes)
559 f = self._callcompressable(b"changegroup", roots=n)
558 f = self._callcompressable(b"changegroup", roots=n)
560 return changegroupmod.cg1unpacker(f, b'UN')
559 return changegroupmod.cg1unpacker(f, b'UN')
561
560
562 def changegroupsubset(self, bases, heads, source):
561 def changegroupsubset(self, bases, heads, source):
563 self.requirecap(b'changegroupsubset', _(b'look up remote changes'))
562 self.requirecap(b'changegroupsubset', _(b'look up remote changes'))
564 bases = wireprototypes.encodelist(bases)
563 bases = wireprototypes.encodelist(bases)
565 heads = wireprototypes.encodelist(heads)
564 heads = wireprototypes.encodelist(heads)
566 f = self._callcompressable(
565 f = self._callcompressable(
567 b"changegroupsubset", bases=bases, heads=heads
566 b"changegroupsubset", bases=bases, heads=heads
568 )
567 )
569 return changegroupmod.cg1unpacker(f, b'UN')
568 return changegroupmod.cg1unpacker(f, b'UN')
570
569
571 # End of ipeerlegacycommands interface.
570 # End of ipeerlegacycommands interface.
572
571
573 def _submitbatch(self, req):
572 def _submitbatch(self, req):
574 """run batch request <req> on the server
573 """run batch request <req> on the server
575
574
576 Returns an iterator of the raw responses from the server.
575 Returns an iterator of the raw responses from the server.
577 """
576 """
578 ui = self.ui
577 ui = self.ui
579 if ui.debugflag and ui.configbool(b'devel', b'debug.peer-request'):
578 if ui.debugflag and ui.configbool(b'devel', b'debug.peer-request'):
580 ui.debug(b'devel-peer-request: batched-content\n')
579 ui.debug(b'devel-peer-request: batched-content\n')
581 for op, args in req:
580 for op, args in req:
582 msg = b'devel-peer-request: - %s (%d arguments)\n'
581 msg = b'devel-peer-request: - %s (%d arguments)\n'
583 ui.debug(msg % (op, len(args)))
582 ui.debug(msg % (op, len(args)))
584
583
585 unescapearg = wireprototypes.unescapebatcharg
584 unescapearg = wireprototypes.unescapebatcharg
586
585
587 rsp = self._callstream(b"batch", cmds=encodebatchcmds(req))
586 rsp = self._callstream(b"batch", cmds=encodebatchcmds(req))
588 chunk = rsp.read(1024)
587 chunk = rsp.read(1024)
589 work = [chunk]
588 work = [chunk]
590 while chunk:
589 while chunk:
591 while b';' not in chunk and chunk:
590 while b';' not in chunk and chunk:
592 chunk = rsp.read(1024)
591 chunk = rsp.read(1024)
593 work.append(chunk)
592 work.append(chunk)
594 merged = b''.join(work)
593 merged = b''.join(work)
595 while b';' in merged:
594 while b';' in merged:
596 one, merged = merged.split(b';', 1)
595 one, merged = merged.split(b';', 1)
597 yield unescapearg(one)
596 yield unescapearg(one)
598 chunk = rsp.read(1024)
597 chunk = rsp.read(1024)
599 work = [merged, chunk]
598 work = [merged, chunk]
600 yield unescapearg(b''.join(work))
599 yield unescapearg(b''.join(work))
601
600
602 def _submitone(self, op, args):
601 def _submitone(self, op, args):
603 return self._call(op, **pycompat.strkwargs(args))
602 return self._call(op, **pycompat.strkwargs(args))
604
603
605 def debugwireargs(self, one, two, three=None, four=None, five=None):
604 def debugwireargs(self, one, two, three=None, four=None, five=None):
606 # don't pass optional arguments left at their default value
605 # don't pass optional arguments left at their default value
607 opts = {}
606 opts = {}
608 if three is not None:
607 if three is not None:
609 opts['three'] = three
608 opts['three'] = three
610 if four is not None:
609 if four is not None:
611 opts['four'] = four
610 opts['four'] = four
612 return self._call(b'debugwireargs', one=one, two=two, **opts)
611 return self._call(b'debugwireargs', one=one, two=two, **opts)
613
612
614 def _call(self, cmd, **args):
613 def _call(self, cmd, **args):
615 """execute <cmd> on the server
614 """execute <cmd> on the server
616
615
617 The command is expected to return a simple string.
616 The command is expected to return a simple string.
618
617
619 returns the server reply as a string."""
618 returns the server reply as a string."""
620 raise NotImplementedError()
619 raise NotImplementedError()
621
620
622 def _callstream(self, cmd, **args):
621 def _callstream(self, cmd, **args):
623 """execute <cmd> on the server
622 """execute <cmd> on the server
624
623
625 The command is expected to return a stream. Note that if the
624 The command is expected to return a stream. Note that if the
626 command doesn't return a stream, _callstream behaves
625 command doesn't return a stream, _callstream behaves
627 differently for ssh and http peers.
626 differently for ssh and http peers.
628
627
629 returns the server reply as a file like object.
628 returns the server reply as a file like object.
630 """
629 """
631 raise NotImplementedError()
630 raise NotImplementedError()
632
631
633 def _callcompressable(self, cmd, **args):
632 def _callcompressable(self, cmd, **args):
634 """execute <cmd> on the server
633 """execute <cmd> on the server
635
634
636 The command is expected to return a stream.
635 The command is expected to return a stream.
637
636
638 The stream may have been compressed in some implementations. This
637 The stream may have been compressed in some implementations. This
639 function takes care of the decompression. This is the only difference
638 function takes care of the decompression. This is the only difference
640 with _callstream.
639 with _callstream.
641
640
642 returns the server reply as a file like object.
641 returns the server reply as a file like object.
643 """
642 """
644 raise NotImplementedError()
643 raise NotImplementedError()
645
644
646 def _callpush(self, cmd, fp, **args):
645 def _callpush(self, cmd, fp, **args):
647 """execute a <cmd> on server
646 """execute a <cmd> on server
648
647
649 The command is expected to be related to a push. Push has a special
648 The command is expected to be related to a push. Push has a special
650 return method.
649 return method.
651
650
652 returns the server reply as a (ret, output) tuple. ret is either
651 returns the server reply as a (ret, output) tuple. ret is either
653 empty (error) or a stringified int.
652 empty (error) or a stringified int.
654 """
653 """
655 raise NotImplementedError()
654 raise NotImplementedError()
656
655
657 def _calltwowaystream(self, cmd, fp, **args):
656 def _calltwowaystream(self, cmd, fp, **args):
658 """execute <cmd> on server
657 """execute <cmd> on server
659
658
660 The command will send a stream to the server and get a stream in reply.
659 The command will send a stream to the server and get a stream in reply.
661 """
660 """
662 raise NotImplementedError()
661 raise NotImplementedError()
663
662
664 def _abort(self, exception):
663 def _abort(self, exception):
665 """clearly abort the wire protocol connection and raise the exception"""
664 """clearly abort the wire protocol connection and raise the exception"""
666 raise NotImplementedError()
665 raise NotImplementedError()
General Comments 0
You need to be logged in to leave comments. Login now