##// END OF EJS Templates
undo-files: factor the vfs map in a repository property...
marmoute -
r51189:f3488731 stable
parent child Browse files
Show More
@@ -1,2058 +1,2062 b''
1 # repository.py - Interfaces and base classes for repositories and peers.
1 # repository.py - Interfaces and base classes for repositories and peers.
2 # coding: utf-8
2 # coding: utf-8
3 #
3 #
4 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 from ..i18n import _
10 from ..i18n import _
11 from .. import error
11 from .. import error
12 from . import util as interfaceutil
12 from . import util as interfaceutil
13
13
14 # Local repository feature string.
14 # Local repository feature string.
15
15
16 # Revlogs are being used for file storage.
16 # Revlogs are being used for file storage.
17 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
17 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
18 # The storage part of the repository is shared from an external source.
18 # The storage part of the repository is shared from an external source.
19 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
19 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
20 # LFS supported for backing file storage.
20 # LFS supported for backing file storage.
21 REPO_FEATURE_LFS = b'lfs'
21 REPO_FEATURE_LFS = b'lfs'
22 # Repository supports being stream cloned.
22 # Repository supports being stream cloned.
23 REPO_FEATURE_STREAM_CLONE = b'streamclone'
23 REPO_FEATURE_STREAM_CLONE = b'streamclone'
24 # Repository supports (at least) some sidedata to be stored
24 # Repository supports (at least) some sidedata to be stored
25 REPO_FEATURE_SIDE_DATA = b'side-data'
25 REPO_FEATURE_SIDE_DATA = b'side-data'
26 # Files storage may lack data for all ancestors.
26 # Files storage may lack data for all ancestors.
27 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
27 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
28
28
29 REVISION_FLAG_CENSORED = 1 << 15
29 REVISION_FLAG_CENSORED = 1 << 15
30 REVISION_FLAG_ELLIPSIS = 1 << 14
30 REVISION_FLAG_ELLIPSIS = 1 << 14
31 REVISION_FLAG_EXTSTORED = 1 << 13
31 REVISION_FLAG_EXTSTORED = 1 << 13
32 REVISION_FLAG_HASCOPIESINFO = 1 << 12
32 REVISION_FLAG_HASCOPIESINFO = 1 << 12
33
33
34 REVISION_FLAGS_KNOWN = (
34 REVISION_FLAGS_KNOWN = (
35 REVISION_FLAG_CENSORED
35 REVISION_FLAG_CENSORED
36 | REVISION_FLAG_ELLIPSIS
36 | REVISION_FLAG_ELLIPSIS
37 | REVISION_FLAG_EXTSTORED
37 | REVISION_FLAG_EXTSTORED
38 | REVISION_FLAG_HASCOPIESINFO
38 | REVISION_FLAG_HASCOPIESINFO
39 )
39 )
40
40
41 CG_DELTAMODE_STD = b'default'
41 CG_DELTAMODE_STD = b'default'
42 CG_DELTAMODE_PREV = b'previous'
42 CG_DELTAMODE_PREV = b'previous'
43 CG_DELTAMODE_FULL = b'fulltext'
43 CG_DELTAMODE_FULL = b'fulltext'
44 CG_DELTAMODE_P1 = b'p1'
44 CG_DELTAMODE_P1 = b'p1'
45
45
46
46
47 ## Cache related constants:
47 ## Cache related constants:
48 #
48 #
49 # Used to control which cache should be warmed in a repo.updatecaches(…) call.
49 # Used to control which cache should be warmed in a repo.updatecaches(…) call.
50
50
51 # Warm branchmaps of all known repoview's filter-level
51 # Warm branchmaps of all known repoview's filter-level
52 CACHE_BRANCHMAP_ALL = b"branchmap-all"
52 CACHE_BRANCHMAP_ALL = b"branchmap-all"
53 # Warm branchmaps of repoview's filter-level used by server
53 # Warm branchmaps of repoview's filter-level used by server
54 CACHE_BRANCHMAP_SERVED = b"branchmap-served"
54 CACHE_BRANCHMAP_SERVED = b"branchmap-served"
55 # Warm internal changelog cache (eg: persistent nodemap)
55 # Warm internal changelog cache (eg: persistent nodemap)
56 CACHE_CHANGELOG_CACHE = b"changelog-cache"
56 CACHE_CHANGELOG_CACHE = b"changelog-cache"
57 # Warm full manifest cache
57 # Warm full manifest cache
58 CACHE_FULL_MANIFEST = b"full-manifest"
58 CACHE_FULL_MANIFEST = b"full-manifest"
59 # Warm file-node-tags cache
59 # Warm file-node-tags cache
60 CACHE_FILE_NODE_TAGS = b"file-node-tags"
60 CACHE_FILE_NODE_TAGS = b"file-node-tags"
61 # Warm internal manifestlog cache (eg: persistent nodemap)
61 # Warm internal manifestlog cache (eg: persistent nodemap)
62 CACHE_MANIFESTLOG_CACHE = b"manifestlog-cache"
62 CACHE_MANIFESTLOG_CACHE = b"manifestlog-cache"
63 # Warn rev branch cache
63 # Warn rev branch cache
64 CACHE_REV_BRANCH = b"rev-branch-cache"
64 CACHE_REV_BRANCH = b"rev-branch-cache"
65 # Warm tags' cache for default repoview'
65 # Warm tags' cache for default repoview'
66 CACHE_TAGS_DEFAULT = b"tags-default"
66 CACHE_TAGS_DEFAULT = b"tags-default"
67 # Warm tags' cache for repoview's filter-level used by server
67 # Warm tags' cache for repoview's filter-level used by server
68 CACHE_TAGS_SERVED = b"tags-served"
68 CACHE_TAGS_SERVED = b"tags-served"
69
69
70 # the cache to warm by default after a simple transaction
70 # the cache to warm by default after a simple transaction
71 # (this is a mutable set to let extension update it)
71 # (this is a mutable set to let extension update it)
72 CACHES_DEFAULT = {
72 CACHES_DEFAULT = {
73 CACHE_BRANCHMAP_SERVED,
73 CACHE_BRANCHMAP_SERVED,
74 }
74 }
75
75
76 # the caches to warm when warming all of them
76 # the caches to warm when warming all of them
77 # (this is a mutable set to let extension update it)
77 # (this is a mutable set to let extension update it)
78 CACHES_ALL = {
78 CACHES_ALL = {
79 CACHE_BRANCHMAP_SERVED,
79 CACHE_BRANCHMAP_SERVED,
80 CACHE_BRANCHMAP_ALL,
80 CACHE_BRANCHMAP_ALL,
81 CACHE_CHANGELOG_CACHE,
81 CACHE_CHANGELOG_CACHE,
82 CACHE_FILE_NODE_TAGS,
82 CACHE_FILE_NODE_TAGS,
83 CACHE_FULL_MANIFEST,
83 CACHE_FULL_MANIFEST,
84 CACHE_MANIFESTLOG_CACHE,
84 CACHE_MANIFESTLOG_CACHE,
85 CACHE_TAGS_DEFAULT,
85 CACHE_TAGS_DEFAULT,
86 CACHE_TAGS_SERVED,
86 CACHE_TAGS_SERVED,
87 }
87 }
88
88
89 # the cache to warm by default on simple call
89 # the cache to warm by default on simple call
90 # (this is a mutable set to let extension update it)
90 # (this is a mutable set to let extension update it)
91 CACHES_POST_CLONE = CACHES_ALL.copy()
91 CACHES_POST_CLONE = CACHES_ALL.copy()
92 CACHES_POST_CLONE.discard(CACHE_FILE_NODE_TAGS)
92 CACHES_POST_CLONE.discard(CACHE_FILE_NODE_TAGS)
93
93
94
94
95 class ipeerconnection(interfaceutil.Interface):
95 class ipeerconnection(interfaceutil.Interface):
96 """Represents a "connection" to a repository.
96 """Represents a "connection" to a repository.
97
97
98 This is the base interface for representing a connection to a repository.
98 This is the base interface for representing a connection to a repository.
99 It holds basic properties and methods applicable to all peer types.
99 It holds basic properties and methods applicable to all peer types.
100
100
101 This is not a complete interface definition and should not be used
101 This is not a complete interface definition and should not be used
102 outside of this module.
102 outside of this module.
103 """
103 """
104
104
105 ui = interfaceutil.Attribute("""ui.ui instance""")
105 ui = interfaceutil.Attribute("""ui.ui instance""")
106 path = interfaceutil.Attribute("""a urlutil.path instance or None""")
106 path = interfaceutil.Attribute("""a urlutil.path instance or None""")
107
107
108 def url():
108 def url():
109 """Returns a URL string representing this peer.
109 """Returns a URL string representing this peer.
110
110
111 Currently, implementations expose the raw URL used to construct the
111 Currently, implementations expose the raw URL used to construct the
112 instance. It may contain credentials as part of the URL. The
112 instance. It may contain credentials as part of the URL. The
113 expectations of the value aren't well-defined and this could lead to
113 expectations of the value aren't well-defined and this could lead to
114 data leakage.
114 data leakage.
115
115
116 TODO audit/clean consumers and more clearly define the contents of this
116 TODO audit/clean consumers and more clearly define the contents of this
117 value.
117 value.
118 """
118 """
119
119
120 def local():
120 def local():
121 """Returns a local repository instance.
121 """Returns a local repository instance.
122
122
123 If the peer represents a local repository, returns an object that
123 If the peer represents a local repository, returns an object that
124 can be used to interface with it. Otherwise returns ``None``.
124 can be used to interface with it. Otherwise returns ``None``.
125 """
125 """
126
126
127 def canpush():
127 def canpush():
128 """Returns a boolean indicating if this peer can be pushed to."""
128 """Returns a boolean indicating if this peer can be pushed to."""
129
129
130 def close():
130 def close():
131 """Close the connection to this peer.
131 """Close the connection to this peer.
132
132
133 This is called when the peer will no longer be used. Resources
133 This is called when the peer will no longer be used. Resources
134 associated with the peer should be cleaned up.
134 associated with the peer should be cleaned up.
135 """
135 """
136
136
137
137
138 class ipeercapabilities(interfaceutil.Interface):
138 class ipeercapabilities(interfaceutil.Interface):
139 """Peer sub-interface related to capabilities."""
139 """Peer sub-interface related to capabilities."""
140
140
141 def capable(name):
141 def capable(name):
142 """Determine support for a named capability.
142 """Determine support for a named capability.
143
143
144 Returns ``False`` if capability not supported.
144 Returns ``False`` if capability not supported.
145
145
146 Returns ``True`` if boolean capability is supported. Returns a string
146 Returns ``True`` if boolean capability is supported. Returns a string
147 if capability support is non-boolean.
147 if capability support is non-boolean.
148
148
149 Capability strings may or may not map to wire protocol capabilities.
149 Capability strings may or may not map to wire protocol capabilities.
150 """
150 """
151
151
152 def requirecap(name, purpose):
152 def requirecap(name, purpose):
153 """Require a capability to be present.
153 """Require a capability to be present.
154
154
155 Raises a ``CapabilityError`` if the capability isn't present.
155 Raises a ``CapabilityError`` if the capability isn't present.
156 """
156 """
157
157
158
158
159 class ipeercommands(interfaceutil.Interface):
159 class ipeercommands(interfaceutil.Interface):
160 """Client-side interface for communicating over the wire protocol.
160 """Client-side interface for communicating over the wire protocol.
161
161
162 This interface is used as a gateway to the Mercurial wire protocol.
162 This interface is used as a gateway to the Mercurial wire protocol.
163 methods commonly call wire protocol commands of the same name.
163 methods commonly call wire protocol commands of the same name.
164 """
164 """
165
165
166 def branchmap():
166 def branchmap():
167 """Obtain heads in named branches.
167 """Obtain heads in named branches.
168
168
169 Returns a dict mapping branch name to an iterable of nodes that are
169 Returns a dict mapping branch name to an iterable of nodes that are
170 heads on that branch.
170 heads on that branch.
171 """
171 """
172
172
173 def capabilities():
173 def capabilities():
174 """Obtain capabilities of the peer.
174 """Obtain capabilities of the peer.
175
175
176 Returns a set of string capabilities.
176 Returns a set of string capabilities.
177 """
177 """
178
178
179 def clonebundles():
179 def clonebundles():
180 """Obtains the clone bundles manifest for the repo.
180 """Obtains the clone bundles manifest for the repo.
181
181
182 Returns the manifest as unparsed bytes.
182 Returns the manifest as unparsed bytes.
183 """
183 """
184
184
185 def debugwireargs(one, two, three=None, four=None, five=None):
185 def debugwireargs(one, two, three=None, four=None, five=None):
186 """Used to facilitate debugging of arguments passed over the wire."""
186 """Used to facilitate debugging of arguments passed over the wire."""
187
187
188 def getbundle(source, **kwargs):
188 def getbundle(source, **kwargs):
189 """Obtain remote repository data as a bundle.
189 """Obtain remote repository data as a bundle.
190
190
191 This command is how the bulk of repository data is transferred from
191 This command is how the bulk of repository data is transferred from
192 the peer to the local repository
192 the peer to the local repository
193
193
194 Returns a generator of bundle data.
194 Returns a generator of bundle data.
195 """
195 """
196
196
197 def heads():
197 def heads():
198 """Determine all known head revisions in the peer.
198 """Determine all known head revisions in the peer.
199
199
200 Returns an iterable of binary nodes.
200 Returns an iterable of binary nodes.
201 """
201 """
202
202
203 def known(nodes):
203 def known(nodes):
204 """Determine whether multiple nodes are known.
204 """Determine whether multiple nodes are known.
205
205
206 Accepts an iterable of nodes whose presence to check for.
206 Accepts an iterable of nodes whose presence to check for.
207
207
208 Returns an iterable of booleans indicating of the corresponding node
208 Returns an iterable of booleans indicating of the corresponding node
209 at that index is known to the peer.
209 at that index is known to the peer.
210 """
210 """
211
211
212 def listkeys(namespace):
212 def listkeys(namespace):
213 """Obtain all keys in a pushkey namespace.
213 """Obtain all keys in a pushkey namespace.
214
214
215 Returns an iterable of key names.
215 Returns an iterable of key names.
216 """
216 """
217
217
218 def lookup(key):
218 def lookup(key):
219 """Resolve a value to a known revision.
219 """Resolve a value to a known revision.
220
220
221 Returns a binary node of the resolved revision on success.
221 Returns a binary node of the resolved revision on success.
222 """
222 """
223
223
224 def pushkey(namespace, key, old, new):
224 def pushkey(namespace, key, old, new):
225 """Set a value using the ``pushkey`` protocol.
225 """Set a value using the ``pushkey`` protocol.
226
226
227 Arguments correspond to the pushkey namespace and key to operate on and
227 Arguments correspond to the pushkey namespace and key to operate on and
228 the old and new values for that key.
228 the old and new values for that key.
229
229
230 Returns a string with the peer result. The value inside varies by the
230 Returns a string with the peer result. The value inside varies by the
231 namespace.
231 namespace.
232 """
232 """
233
233
234 def stream_out():
234 def stream_out():
235 """Obtain streaming clone data.
235 """Obtain streaming clone data.
236
236
237 Successful result should be a generator of data chunks.
237 Successful result should be a generator of data chunks.
238 """
238 """
239
239
240 def unbundle(bundle, heads, url):
240 def unbundle(bundle, heads, url):
241 """Transfer repository data to the peer.
241 """Transfer repository data to the peer.
242
242
243 This is how the bulk of data during a push is transferred.
243 This is how the bulk of data during a push is transferred.
244
244
245 Returns the integer number of heads added to the peer.
245 Returns the integer number of heads added to the peer.
246 """
246 """
247
247
248
248
249 class ipeerlegacycommands(interfaceutil.Interface):
249 class ipeerlegacycommands(interfaceutil.Interface):
250 """Interface for implementing support for legacy wire protocol commands.
250 """Interface for implementing support for legacy wire protocol commands.
251
251
252 Wire protocol commands transition to legacy status when they are no longer
252 Wire protocol commands transition to legacy status when they are no longer
253 used by modern clients. To facilitate identifying which commands are
253 used by modern clients. To facilitate identifying which commands are
254 legacy, the interfaces are split.
254 legacy, the interfaces are split.
255 """
255 """
256
256
257 def between(pairs):
257 def between(pairs):
258 """Obtain nodes between pairs of nodes.
258 """Obtain nodes between pairs of nodes.
259
259
260 ``pairs`` is an iterable of node pairs.
260 ``pairs`` is an iterable of node pairs.
261
261
262 Returns an iterable of iterables of nodes corresponding to each
262 Returns an iterable of iterables of nodes corresponding to each
263 requested pair.
263 requested pair.
264 """
264 """
265
265
266 def branches(nodes):
266 def branches(nodes):
267 """Obtain ancestor changesets of specific nodes back to a branch point.
267 """Obtain ancestor changesets of specific nodes back to a branch point.
268
268
269 For each requested node, the peer finds the first ancestor node that is
269 For each requested node, the peer finds the first ancestor node that is
270 a DAG root or is a merge.
270 a DAG root or is a merge.
271
271
272 Returns an iterable of iterables with the resolved values for each node.
272 Returns an iterable of iterables with the resolved values for each node.
273 """
273 """
274
274
275 def changegroup(nodes, source):
275 def changegroup(nodes, source):
276 """Obtain a changegroup with data for descendants of specified nodes."""
276 """Obtain a changegroup with data for descendants of specified nodes."""
277
277
278 def changegroupsubset(bases, heads, source):
278 def changegroupsubset(bases, heads, source):
279 pass
279 pass
280
280
281
281
282 class ipeercommandexecutor(interfaceutil.Interface):
282 class ipeercommandexecutor(interfaceutil.Interface):
283 """Represents a mechanism to execute remote commands.
283 """Represents a mechanism to execute remote commands.
284
284
285 This is the primary interface for requesting that wire protocol commands
285 This is the primary interface for requesting that wire protocol commands
286 be executed. Instances of this interface are active in a context manager
286 be executed. Instances of this interface are active in a context manager
287 and have a well-defined lifetime. When the context manager exits, all
287 and have a well-defined lifetime. When the context manager exits, all
288 outstanding requests are waited on.
288 outstanding requests are waited on.
289 """
289 """
290
290
291 def callcommand(name, args):
291 def callcommand(name, args):
292 """Request that a named command be executed.
292 """Request that a named command be executed.
293
293
294 Receives the command name and a dictionary of command arguments.
294 Receives the command name and a dictionary of command arguments.
295
295
296 Returns a ``concurrent.futures.Future`` that will resolve to the
296 Returns a ``concurrent.futures.Future`` that will resolve to the
297 result of that command request. That exact value is left up to
297 result of that command request. That exact value is left up to
298 the implementation and possibly varies by command.
298 the implementation and possibly varies by command.
299
299
300 Not all commands can coexist with other commands in an executor
300 Not all commands can coexist with other commands in an executor
301 instance: it depends on the underlying wire protocol transport being
301 instance: it depends on the underlying wire protocol transport being
302 used and the command itself.
302 used and the command itself.
303
303
304 Implementations MAY call ``sendcommands()`` automatically if the
304 Implementations MAY call ``sendcommands()`` automatically if the
305 requested command can not coexist with other commands in this executor.
305 requested command can not coexist with other commands in this executor.
306
306
307 Implementations MAY call ``sendcommands()`` automatically when the
307 Implementations MAY call ``sendcommands()`` automatically when the
308 future's ``result()`` is called. So, consumers using multiple
308 future's ``result()`` is called. So, consumers using multiple
309 commands with an executor MUST ensure that ``result()`` is not called
309 commands with an executor MUST ensure that ``result()`` is not called
310 until all command requests have been issued.
310 until all command requests have been issued.
311 """
311 """
312
312
313 def sendcommands():
313 def sendcommands():
314 """Trigger submission of queued command requests.
314 """Trigger submission of queued command requests.
315
315
316 Not all transports submit commands as soon as they are requested to
316 Not all transports submit commands as soon as they are requested to
317 run. When called, this method forces queued command requests to be
317 run. When called, this method forces queued command requests to be
318 issued. It will no-op if all commands have already been sent.
318 issued. It will no-op if all commands have already been sent.
319
319
320 When called, no more new commands may be issued with this executor.
320 When called, no more new commands may be issued with this executor.
321 """
321 """
322
322
323 def close():
323 def close():
324 """Signal that this command request is finished.
324 """Signal that this command request is finished.
325
325
326 When called, no more new commands may be issued. All outstanding
326 When called, no more new commands may be issued. All outstanding
327 commands that have previously been issued are waited on before
327 commands that have previously been issued are waited on before
328 returning. This not only includes waiting for the futures to resolve,
328 returning. This not only includes waiting for the futures to resolve,
329 but also waiting for all response data to arrive. In other words,
329 but also waiting for all response data to arrive. In other words,
330 calling this waits for all on-wire state for issued command requests
330 calling this waits for all on-wire state for issued command requests
331 to finish.
331 to finish.
332
332
333 When used as a context manager, this method is called when exiting the
333 When used as a context manager, this method is called when exiting the
334 context manager.
334 context manager.
335
335
336 This method may call ``sendcommands()`` if there are buffered commands.
336 This method may call ``sendcommands()`` if there are buffered commands.
337 """
337 """
338
338
339
339
340 class ipeerrequests(interfaceutil.Interface):
340 class ipeerrequests(interfaceutil.Interface):
341 """Interface for executing commands on a peer."""
341 """Interface for executing commands on a peer."""
342
342
343 limitedarguments = interfaceutil.Attribute(
343 limitedarguments = interfaceutil.Attribute(
344 """True if the peer cannot receive large argument value for commands."""
344 """True if the peer cannot receive large argument value for commands."""
345 )
345 )
346
346
347 def commandexecutor():
347 def commandexecutor():
348 """A context manager that resolves to an ipeercommandexecutor.
348 """A context manager that resolves to an ipeercommandexecutor.
349
349
350 The object this resolves to can be used to issue command requests
350 The object this resolves to can be used to issue command requests
351 to the peer.
351 to the peer.
352
352
353 Callers should call its ``callcommand`` method to issue command
353 Callers should call its ``callcommand`` method to issue command
354 requests.
354 requests.
355
355
356 A new executor should be obtained for each distinct set of commands
356 A new executor should be obtained for each distinct set of commands
357 (possibly just a single command) that the consumer wants to execute
357 (possibly just a single command) that the consumer wants to execute
358 as part of a single operation or round trip. This is because some
358 as part of a single operation or round trip. This is because some
359 peers are half-duplex and/or don't support persistent connections.
359 peers are half-duplex and/or don't support persistent connections.
360 e.g. in the case of HTTP peers, commands sent to an executor represent
360 e.g. in the case of HTTP peers, commands sent to an executor represent
361 a single HTTP request. While some peers may support multiple command
361 a single HTTP request. While some peers may support multiple command
362 sends over the wire per executor, consumers need to code to the least
362 sends over the wire per executor, consumers need to code to the least
363 capable peer. So it should be assumed that command executors buffer
363 capable peer. So it should be assumed that command executors buffer
364 called commands until they are told to send them and that each
364 called commands until they are told to send them and that each
365 command executor could result in a new connection or wire-level request
365 command executor could result in a new connection or wire-level request
366 being issued.
366 being issued.
367 """
367 """
368
368
369
369
370 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
370 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
371 """Unified interface for peer repositories.
371 """Unified interface for peer repositories.
372
372
373 All peer instances must conform to this interface.
373 All peer instances must conform to this interface.
374 """
374 """
375
375
376
376
377 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
377 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
378 """Unified peer interface for wire protocol version 2 peers."""
378 """Unified peer interface for wire protocol version 2 peers."""
379
379
380 apidescriptor = interfaceutil.Attribute(
380 apidescriptor = interfaceutil.Attribute(
381 """Data structure holding description of server API."""
381 """Data structure holding description of server API."""
382 )
382 )
383
383
384
384
385 @interfaceutil.implementer(ipeerbase)
385 @interfaceutil.implementer(ipeerbase)
386 class peer:
386 class peer:
387 """Base class for peer repositories."""
387 """Base class for peer repositories."""
388
388
389 limitedarguments = False
389 limitedarguments = False
390
390
391 def __init__(self, ui, path=None):
391 def __init__(self, ui, path=None):
392 self.ui = ui
392 self.ui = ui
393 self.path = path
393 self.path = path
394
394
395 def capable(self, name):
395 def capable(self, name):
396 caps = self.capabilities()
396 caps = self.capabilities()
397 if name in caps:
397 if name in caps:
398 return True
398 return True
399
399
400 name = b'%s=' % name
400 name = b'%s=' % name
401 for cap in caps:
401 for cap in caps:
402 if cap.startswith(name):
402 if cap.startswith(name):
403 return cap[len(name) :]
403 return cap[len(name) :]
404
404
405 return False
405 return False
406
406
407 def requirecap(self, name, purpose):
407 def requirecap(self, name, purpose):
408 if self.capable(name):
408 if self.capable(name):
409 return
409 return
410
410
411 raise error.CapabilityError(
411 raise error.CapabilityError(
412 _(
412 _(
413 b'cannot %s; remote repository does not support the '
413 b'cannot %s; remote repository does not support the '
414 b'\'%s\' capability'
414 b'\'%s\' capability'
415 )
415 )
416 % (purpose, name)
416 % (purpose, name)
417 )
417 )
418
418
419
419
420 class iverifyproblem(interfaceutil.Interface):
420 class iverifyproblem(interfaceutil.Interface):
421 """Represents a problem with the integrity of the repository.
421 """Represents a problem with the integrity of the repository.
422
422
423 Instances of this interface are emitted to describe an integrity issue
423 Instances of this interface are emitted to describe an integrity issue
424 with a repository (e.g. corrupt storage, missing data, etc).
424 with a repository (e.g. corrupt storage, missing data, etc).
425
425
426 Instances are essentially messages associated with severity.
426 Instances are essentially messages associated with severity.
427 """
427 """
428
428
429 warning = interfaceutil.Attribute(
429 warning = interfaceutil.Attribute(
430 """Message indicating a non-fatal problem."""
430 """Message indicating a non-fatal problem."""
431 )
431 )
432
432
433 error = interfaceutil.Attribute("""Message indicating a fatal problem.""")
433 error = interfaceutil.Attribute("""Message indicating a fatal problem.""")
434
434
435 node = interfaceutil.Attribute(
435 node = interfaceutil.Attribute(
436 """Revision encountering the problem.
436 """Revision encountering the problem.
437
437
438 ``None`` means the problem doesn't apply to a single revision.
438 ``None`` means the problem doesn't apply to a single revision.
439 """
439 """
440 )
440 )
441
441
442
442
443 class irevisiondelta(interfaceutil.Interface):
443 class irevisiondelta(interfaceutil.Interface):
444 """Represents a delta between one revision and another.
444 """Represents a delta between one revision and another.
445
445
446 Instances convey enough information to allow a revision to be exchanged
446 Instances convey enough information to allow a revision to be exchanged
447 with another repository.
447 with another repository.
448
448
449 Instances represent the fulltext revision data or a delta against
449 Instances represent the fulltext revision data or a delta against
450 another revision. Therefore the ``revision`` and ``delta`` attributes
450 another revision. Therefore the ``revision`` and ``delta`` attributes
451 are mutually exclusive.
451 are mutually exclusive.
452
452
453 Typically used for changegroup generation.
453 Typically used for changegroup generation.
454 """
454 """
455
455
456 node = interfaceutil.Attribute("""20 byte node of this revision.""")
456 node = interfaceutil.Attribute("""20 byte node of this revision.""")
457
457
458 p1node = interfaceutil.Attribute(
458 p1node = interfaceutil.Attribute(
459 """20 byte node of 1st parent of this revision."""
459 """20 byte node of 1st parent of this revision."""
460 )
460 )
461
461
462 p2node = interfaceutil.Attribute(
462 p2node = interfaceutil.Attribute(
463 """20 byte node of 2nd parent of this revision."""
463 """20 byte node of 2nd parent of this revision."""
464 )
464 )
465
465
466 linknode = interfaceutil.Attribute(
466 linknode = interfaceutil.Attribute(
467 """20 byte node of the changelog revision this node is linked to."""
467 """20 byte node of the changelog revision this node is linked to."""
468 )
468 )
469
469
470 flags = interfaceutil.Attribute(
470 flags = interfaceutil.Attribute(
471 """2 bytes of integer flags that apply to this revision.
471 """2 bytes of integer flags that apply to this revision.
472
472
473 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
473 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
474 """
474 """
475 )
475 )
476
476
477 basenode = interfaceutil.Attribute(
477 basenode = interfaceutil.Attribute(
478 """20 byte node of the revision this data is a delta against.
478 """20 byte node of the revision this data is a delta against.
479
479
480 ``nullid`` indicates that the revision is a full revision and not
480 ``nullid`` indicates that the revision is a full revision and not
481 a delta.
481 a delta.
482 """
482 """
483 )
483 )
484
484
485 baserevisionsize = interfaceutil.Attribute(
485 baserevisionsize = interfaceutil.Attribute(
486 """Size of base revision this delta is against.
486 """Size of base revision this delta is against.
487
487
488 May be ``None`` if ``basenode`` is ``nullid``.
488 May be ``None`` if ``basenode`` is ``nullid``.
489 """
489 """
490 )
490 )
491
491
492 revision = interfaceutil.Attribute(
492 revision = interfaceutil.Attribute(
493 """Raw fulltext of revision data for this node."""
493 """Raw fulltext of revision data for this node."""
494 )
494 )
495
495
496 delta = interfaceutil.Attribute(
496 delta = interfaceutil.Attribute(
497 """Delta between ``basenode`` and ``node``.
497 """Delta between ``basenode`` and ``node``.
498
498
499 Stored in the bdiff delta format.
499 Stored in the bdiff delta format.
500 """
500 """
501 )
501 )
502
502
503 sidedata = interfaceutil.Attribute(
503 sidedata = interfaceutil.Attribute(
504 """Raw sidedata bytes for the given revision."""
504 """Raw sidedata bytes for the given revision."""
505 )
505 )
506
506
507 protocol_flags = interfaceutil.Attribute(
507 protocol_flags = interfaceutil.Attribute(
508 """Single byte of integer flags that can influence the protocol.
508 """Single byte of integer flags that can influence the protocol.
509
509
510 This is a bitwise composition of the ``storageutil.CG_FLAG*`` constants.
510 This is a bitwise composition of the ``storageutil.CG_FLAG*`` constants.
511 """
511 """
512 )
512 )
513
513
514
514
515 class ifilerevisionssequence(interfaceutil.Interface):
515 class ifilerevisionssequence(interfaceutil.Interface):
516 """Contains index data for all revisions of a file.
516 """Contains index data for all revisions of a file.
517
517
518 Types implementing this behave like lists of tuples. The index
518 Types implementing this behave like lists of tuples. The index
519 in the list corresponds to the revision number. The values contain
519 in the list corresponds to the revision number. The values contain
520 index metadata.
520 index metadata.
521
521
522 The *null* revision (revision number -1) is always the last item
522 The *null* revision (revision number -1) is always the last item
523 in the index.
523 in the index.
524 """
524 """
525
525
526 def __len__():
526 def __len__():
527 """The total number of revisions."""
527 """The total number of revisions."""
528
528
529 def __getitem__(rev):
529 def __getitem__(rev):
530 """Returns the object having a specific revision number.
530 """Returns the object having a specific revision number.
531
531
532 Returns an 8-tuple with the following fields:
532 Returns an 8-tuple with the following fields:
533
533
534 offset+flags
534 offset+flags
535 Contains the offset and flags for the revision. 64-bit unsigned
535 Contains the offset and flags for the revision. 64-bit unsigned
536 integer where first 6 bytes are the offset and the next 2 bytes
536 integer where first 6 bytes are the offset and the next 2 bytes
537 are flags. The offset can be 0 if it is not used by the store.
537 are flags. The offset can be 0 if it is not used by the store.
538 compressed size
538 compressed size
539 Size of the revision data in the store. It can be 0 if it isn't
539 Size of the revision data in the store. It can be 0 if it isn't
540 needed by the store.
540 needed by the store.
541 uncompressed size
541 uncompressed size
542 Fulltext size. It can be 0 if it isn't needed by the store.
542 Fulltext size. It can be 0 if it isn't needed by the store.
543 base revision
543 base revision
544 Revision number of revision the delta for storage is encoded
544 Revision number of revision the delta for storage is encoded
545 against. -1 indicates not encoded against a base revision.
545 against. -1 indicates not encoded against a base revision.
546 link revision
546 link revision
547 Revision number of changelog revision this entry is related to.
547 Revision number of changelog revision this entry is related to.
548 p1 revision
548 p1 revision
549 Revision number of 1st parent. -1 if no 1st parent.
549 Revision number of 1st parent. -1 if no 1st parent.
550 p2 revision
550 p2 revision
551 Revision number of 2nd parent. -1 if no 1st parent.
551 Revision number of 2nd parent. -1 if no 1st parent.
552 node
552 node
553 Binary node value for this revision number.
553 Binary node value for this revision number.
554
554
555 Negative values should index off the end of the sequence. ``-1``
555 Negative values should index off the end of the sequence. ``-1``
556 should return the null revision. ``-2`` should return the most
556 should return the null revision. ``-2`` should return the most
557 recent revision.
557 recent revision.
558 """
558 """
559
559
560 def __contains__(rev):
560 def __contains__(rev):
561 """Whether a revision number exists."""
561 """Whether a revision number exists."""
562
562
563 def insert(self, i, entry):
563 def insert(self, i, entry):
564 """Add an item to the index at specific revision."""
564 """Add an item to the index at specific revision."""
565
565
566
566
567 class ifileindex(interfaceutil.Interface):
567 class ifileindex(interfaceutil.Interface):
568 """Storage interface for index data of a single file.
568 """Storage interface for index data of a single file.
569
569
570 File storage data is divided into index metadata and data storage.
570 File storage data is divided into index metadata and data storage.
571 This interface defines the index portion of the interface.
571 This interface defines the index portion of the interface.
572
572
573 The index logically consists of:
573 The index logically consists of:
574
574
575 * A mapping between revision numbers and nodes.
575 * A mapping between revision numbers and nodes.
576 * DAG data (storing and querying the relationship between nodes).
576 * DAG data (storing and querying the relationship between nodes).
577 * Metadata to facilitate storage.
577 * Metadata to facilitate storage.
578 """
578 """
579
579
580 nullid = interfaceutil.Attribute(
580 nullid = interfaceutil.Attribute(
581 """node for the null revision for use as delta base."""
581 """node for the null revision for use as delta base."""
582 )
582 )
583
583
584 def __len__():
584 def __len__():
585 """Obtain the number of revisions stored for this file."""
585 """Obtain the number of revisions stored for this file."""
586
586
587 def __iter__():
587 def __iter__():
588 """Iterate over revision numbers for this file."""
588 """Iterate over revision numbers for this file."""
589
589
590 def hasnode(node):
590 def hasnode(node):
591 """Returns a bool indicating if a node is known to this store.
591 """Returns a bool indicating if a node is known to this store.
592
592
593 Implementations must only return True for full, binary node values:
593 Implementations must only return True for full, binary node values:
594 hex nodes, revision numbers, and partial node matches must be
594 hex nodes, revision numbers, and partial node matches must be
595 rejected.
595 rejected.
596
596
597 The null node is never present.
597 The null node is never present.
598 """
598 """
599
599
600 def revs(start=0, stop=None):
600 def revs(start=0, stop=None):
601 """Iterate over revision numbers for this file, with control."""
601 """Iterate over revision numbers for this file, with control."""
602
602
603 def parents(node):
603 def parents(node):
604 """Returns a 2-tuple of parent nodes for a revision.
604 """Returns a 2-tuple of parent nodes for a revision.
605
605
606 Values will be ``nullid`` if the parent is empty.
606 Values will be ``nullid`` if the parent is empty.
607 """
607 """
608
608
609 def parentrevs(rev):
609 def parentrevs(rev):
610 """Like parents() but operates on revision numbers."""
610 """Like parents() but operates on revision numbers."""
611
611
612 def rev(node):
612 def rev(node):
613 """Obtain the revision number given a node.
613 """Obtain the revision number given a node.
614
614
615 Raises ``error.LookupError`` if the node is not known.
615 Raises ``error.LookupError`` if the node is not known.
616 """
616 """
617
617
618 def node(rev):
618 def node(rev):
619 """Obtain the node value given a revision number.
619 """Obtain the node value given a revision number.
620
620
621 Raises ``IndexError`` if the node is not known.
621 Raises ``IndexError`` if the node is not known.
622 """
622 """
623
623
624 def lookup(node):
624 def lookup(node):
625 """Attempt to resolve a value to a node.
625 """Attempt to resolve a value to a node.
626
626
627 Value can be a binary node, hex node, revision number, or a string
627 Value can be a binary node, hex node, revision number, or a string
628 that can be converted to an integer.
628 that can be converted to an integer.
629
629
630 Raises ``error.LookupError`` if a node could not be resolved.
630 Raises ``error.LookupError`` if a node could not be resolved.
631 """
631 """
632
632
633 def linkrev(rev):
633 def linkrev(rev):
634 """Obtain the changeset revision number a revision is linked to."""
634 """Obtain the changeset revision number a revision is linked to."""
635
635
636 def iscensored(rev):
636 def iscensored(rev):
637 """Return whether a revision's content has been censored."""
637 """Return whether a revision's content has been censored."""
638
638
639 def commonancestorsheads(node1, node2):
639 def commonancestorsheads(node1, node2):
640 """Obtain an iterable of nodes containing heads of common ancestors.
640 """Obtain an iterable of nodes containing heads of common ancestors.
641
641
642 See ``ancestor.commonancestorsheads()``.
642 See ``ancestor.commonancestorsheads()``.
643 """
643 """
644
644
645 def descendants(revs):
645 def descendants(revs):
646 """Obtain descendant revision numbers for a set of revision numbers.
646 """Obtain descendant revision numbers for a set of revision numbers.
647
647
648 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
648 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
649 """
649 """
650
650
651 def heads(start=None, stop=None):
651 def heads(start=None, stop=None):
652 """Obtain a list of nodes that are DAG heads, with control.
652 """Obtain a list of nodes that are DAG heads, with control.
653
653
654 The set of revisions examined can be limited by specifying
654 The set of revisions examined can be limited by specifying
655 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
655 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
656 iterable of nodes. DAG traversal starts at earlier revision
656 iterable of nodes. DAG traversal starts at earlier revision
657 ``start`` and iterates forward until any node in ``stop`` is
657 ``start`` and iterates forward until any node in ``stop`` is
658 encountered.
658 encountered.
659 """
659 """
660
660
661 def children(node):
661 def children(node):
662 """Obtain nodes that are children of a node.
662 """Obtain nodes that are children of a node.
663
663
664 Returns a list of nodes.
664 Returns a list of nodes.
665 """
665 """
666
666
667
667
668 class ifiledata(interfaceutil.Interface):
668 class ifiledata(interfaceutil.Interface):
669 """Storage interface for data storage of a specific file.
669 """Storage interface for data storage of a specific file.
670
670
671 This complements ``ifileindex`` and provides an interface for accessing
671 This complements ``ifileindex`` and provides an interface for accessing
672 data for a tracked file.
672 data for a tracked file.
673 """
673 """
674
674
675 def size(rev):
675 def size(rev):
676 """Obtain the fulltext size of file data.
676 """Obtain the fulltext size of file data.
677
677
678 Any metadata is excluded from size measurements.
678 Any metadata is excluded from size measurements.
679 """
679 """
680
680
681 def revision(node, raw=False):
681 def revision(node, raw=False):
682 """Obtain fulltext data for a node.
682 """Obtain fulltext data for a node.
683
683
684 By default, any storage transformations are applied before the data
684 By default, any storage transformations are applied before the data
685 is returned. If ``raw`` is True, non-raw storage transformations
685 is returned. If ``raw`` is True, non-raw storage transformations
686 are not applied.
686 are not applied.
687
687
688 The fulltext data may contain a header containing metadata. Most
688 The fulltext data may contain a header containing metadata. Most
689 consumers should use ``read()`` to obtain the actual file data.
689 consumers should use ``read()`` to obtain the actual file data.
690 """
690 """
691
691
692 def rawdata(node):
692 def rawdata(node):
693 """Obtain raw data for a node."""
693 """Obtain raw data for a node."""
694
694
695 def read(node):
695 def read(node):
696 """Resolve file fulltext data.
696 """Resolve file fulltext data.
697
697
698 This is similar to ``revision()`` except any metadata in the data
698 This is similar to ``revision()`` except any metadata in the data
699 headers is stripped.
699 headers is stripped.
700 """
700 """
701
701
702 def renamed(node):
702 def renamed(node):
703 """Obtain copy metadata for a node.
703 """Obtain copy metadata for a node.
704
704
705 Returns ``False`` if no copy metadata is stored or a 2-tuple of
705 Returns ``False`` if no copy metadata is stored or a 2-tuple of
706 (path, node) from which this revision was copied.
706 (path, node) from which this revision was copied.
707 """
707 """
708
708
709 def cmp(node, fulltext):
709 def cmp(node, fulltext):
710 """Compare fulltext to another revision.
710 """Compare fulltext to another revision.
711
711
712 Returns True if the fulltext is different from what is stored.
712 Returns True if the fulltext is different from what is stored.
713
713
714 This takes copy metadata into account.
714 This takes copy metadata into account.
715
715
716 TODO better document the copy metadata and censoring logic.
716 TODO better document the copy metadata and censoring logic.
717 """
717 """
718
718
719 def emitrevisions(
719 def emitrevisions(
720 nodes,
720 nodes,
721 nodesorder=None,
721 nodesorder=None,
722 revisiondata=False,
722 revisiondata=False,
723 assumehaveparentrevisions=False,
723 assumehaveparentrevisions=False,
724 deltamode=CG_DELTAMODE_STD,
724 deltamode=CG_DELTAMODE_STD,
725 ):
725 ):
726 """Produce ``irevisiondelta`` for revisions.
726 """Produce ``irevisiondelta`` for revisions.
727
727
728 Given an iterable of nodes, emits objects conforming to the
728 Given an iterable of nodes, emits objects conforming to the
729 ``irevisiondelta`` interface that describe revisions in storage.
729 ``irevisiondelta`` interface that describe revisions in storage.
730
730
731 This method is a generator.
731 This method is a generator.
732
732
733 The input nodes may be unordered. Implementations must ensure that a
733 The input nodes may be unordered. Implementations must ensure that a
734 node's parents are emitted before the node itself. Transitively, this
734 node's parents are emitted before the node itself. Transitively, this
735 means that a node may only be emitted once all its ancestors in
735 means that a node may only be emitted once all its ancestors in
736 ``nodes`` have also been emitted.
736 ``nodes`` have also been emitted.
737
737
738 By default, emits "index" data (the ``node``, ``p1node``, and
738 By default, emits "index" data (the ``node``, ``p1node``, and
739 ``p2node`` attributes). If ``revisiondata`` is set, revision data
739 ``p2node`` attributes). If ``revisiondata`` is set, revision data
740 will also be present on the emitted objects.
740 will also be present on the emitted objects.
741
741
742 With default argument values, implementations can choose to emit
742 With default argument values, implementations can choose to emit
743 either fulltext revision data or a delta. When emitting deltas,
743 either fulltext revision data or a delta. When emitting deltas,
744 implementations must consider whether the delta's base revision
744 implementations must consider whether the delta's base revision
745 fulltext is available to the receiver.
745 fulltext is available to the receiver.
746
746
747 The base revision fulltext is guaranteed to be available if any of
747 The base revision fulltext is guaranteed to be available if any of
748 the following are met:
748 the following are met:
749
749
750 * Its fulltext revision was emitted by this method call.
750 * Its fulltext revision was emitted by this method call.
751 * A delta for that revision was emitted by this method call.
751 * A delta for that revision was emitted by this method call.
752 * ``assumehaveparentrevisions`` is True and the base revision is a
752 * ``assumehaveparentrevisions`` is True and the base revision is a
753 parent of the node.
753 parent of the node.
754
754
755 ``nodesorder`` can be used to control the order that revisions are
755 ``nodesorder`` can be used to control the order that revisions are
756 emitted. By default, revisions can be reordered as long as they are
756 emitted. By default, revisions can be reordered as long as they are
757 in DAG topological order (see above). If the value is ``nodes``,
757 in DAG topological order (see above). If the value is ``nodes``,
758 the iteration order from ``nodes`` should be used. If the value is
758 the iteration order from ``nodes`` should be used. If the value is
759 ``storage``, then the native order from the backing storage layer
759 ``storage``, then the native order from the backing storage layer
760 is used. (Not all storage layers will have strong ordering and behavior
760 is used. (Not all storage layers will have strong ordering and behavior
761 of this mode is storage-dependent.) ``nodes`` ordering can force
761 of this mode is storage-dependent.) ``nodes`` ordering can force
762 revisions to be emitted before their ancestors, so consumers should
762 revisions to be emitted before their ancestors, so consumers should
763 use it with care.
763 use it with care.
764
764
765 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
765 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
766 be set and it is the caller's responsibility to resolve it, if needed.
766 be set and it is the caller's responsibility to resolve it, if needed.
767
767
768 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
768 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
769 all revision data should be emitted as deltas against the revision
769 all revision data should be emitted as deltas against the revision
770 emitted just prior. The initial revision should be a delta against its
770 emitted just prior. The initial revision should be a delta against its
771 1st parent.
771 1st parent.
772 """
772 """
773
773
774
774
775 class ifilemutation(interfaceutil.Interface):
775 class ifilemutation(interfaceutil.Interface):
776 """Storage interface for mutation events of a tracked file."""
776 """Storage interface for mutation events of a tracked file."""
777
777
778 def add(filedata, meta, transaction, linkrev, p1, p2):
778 def add(filedata, meta, transaction, linkrev, p1, p2):
779 """Add a new revision to the store.
779 """Add a new revision to the store.
780
780
781 Takes file data, dictionary of metadata, a transaction, linkrev,
781 Takes file data, dictionary of metadata, a transaction, linkrev,
782 and parent nodes.
782 and parent nodes.
783
783
784 Returns the node that was added.
784 Returns the node that was added.
785
785
786 May no-op if a revision matching the supplied data is already stored.
786 May no-op if a revision matching the supplied data is already stored.
787 """
787 """
788
788
789 def addrevision(
789 def addrevision(
790 revisiondata,
790 revisiondata,
791 transaction,
791 transaction,
792 linkrev,
792 linkrev,
793 p1,
793 p1,
794 p2,
794 p2,
795 node=None,
795 node=None,
796 flags=0,
796 flags=0,
797 cachedelta=None,
797 cachedelta=None,
798 ):
798 ):
799 """Add a new revision to the store and return its number.
799 """Add a new revision to the store and return its number.
800
800
801 This is similar to ``add()`` except it operates at a lower level.
801 This is similar to ``add()`` except it operates at a lower level.
802
802
803 The data passed in already contains a metadata header, if any.
803 The data passed in already contains a metadata header, if any.
804
804
805 ``node`` and ``flags`` can be used to define the expected node and
805 ``node`` and ``flags`` can be used to define the expected node and
806 the flags to use with storage. ``flags`` is a bitwise value composed
806 the flags to use with storage. ``flags`` is a bitwise value composed
807 of the various ``REVISION_FLAG_*`` constants.
807 of the various ``REVISION_FLAG_*`` constants.
808
808
809 ``add()`` is usually called when adding files from e.g. the working
809 ``add()`` is usually called when adding files from e.g. the working
810 directory. ``addrevision()`` is often called by ``add()`` and for
810 directory. ``addrevision()`` is often called by ``add()`` and for
811 scenarios where revision data has already been computed, such as when
811 scenarios where revision data has already been computed, such as when
812 applying raw data from a peer repo.
812 applying raw data from a peer repo.
813 """
813 """
814
814
815 def addgroup(
815 def addgroup(
816 deltas,
816 deltas,
817 linkmapper,
817 linkmapper,
818 transaction,
818 transaction,
819 addrevisioncb=None,
819 addrevisioncb=None,
820 duplicaterevisioncb=None,
820 duplicaterevisioncb=None,
821 maybemissingparents=False,
821 maybemissingparents=False,
822 ):
822 ):
823 """Process a series of deltas for storage.
823 """Process a series of deltas for storage.
824
824
825 ``deltas`` is an iterable of 7-tuples of
825 ``deltas`` is an iterable of 7-tuples of
826 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
826 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
827 to add.
827 to add.
828
828
829 The ``delta`` field contains ``mpatch`` data to apply to a base
829 The ``delta`` field contains ``mpatch`` data to apply to a base
830 revision, identified by ``deltabase``. The base node can be
830 revision, identified by ``deltabase``. The base node can be
831 ``nullid``, in which case the header from the delta can be ignored
831 ``nullid``, in which case the header from the delta can be ignored
832 and the delta used as the fulltext.
832 and the delta used as the fulltext.
833
833
834 ``alwayscache`` instructs the lower layers to cache the content of the
834 ``alwayscache`` instructs the lower layers to cache the content of the
835 newly added revision, even if it needs to be explicitly computed.
835 newly added revision, even if it needs to be explicitly computed.
836 This used to be the default when ``addrevisioncb`` was provided up to
836 This used to be the default when ``addrevisioncb`` was provided up to
837 Mercurial 5.8.
837 Mercurial 5.8.
838
838
839 ``addrevisioncb`` should be called for each new rev as it is committed.
839 ``addrevisioncb`` should be called for each new rev as it is committed.
840 ``duplicaterevisioncb`` should be called for all revs with a
840 ``duplicaterevisioncb`` should be called for all revs with a
841 pre-existing node.
841 pre-existing node.
842
842
843 ``maybemissingparents`` is a bool indicating whether the incoming
843 ``maybemissingparents`` is a bool indicating whether the incoming
844 data may reference parents/ancestor revisions that aren't present.
844 data may reference parents/ancestor revisions that aren't present.
845 This flag is set when receiving data into a "shallow" store that
845 This flag is set when receiving data into a "shallow" store that
846 doesn't hold all history.
846 doesn't hold all history.
847
847
848 Returns a list of nodes that were processed. A node will be in the list
848 Returns a list of nodes that were processed. A node will be in the list
849 even if it existed in the store previously.
849 even if it existed in the store previously.
850 """
850 """
851
851
852 def censorrevision(tr, node, tombstone=b''):
852 def censorrevision(tr, node, tombstone=b''):
853 """Remove the content of a single revision.
853 """Remove the content of a single revision.
854
854
855 The specified ``node`` will have its content purged from storage.
855 The specified ``node`` will have its content purged from storage.
856 Future attempts to access the revision data for this node will
856 Future attempts to access the revision data for this node will
857 result in failure.
857 result in failure.
858
858
859 A ``tombstone`` message can optionally be stored. This message may be
859 A ``tombstone`` message can optionally be stored. This message may be
860 displayed to users when they attempt to access the missing revision
860 displayed to users when they attempt to access the missing revision
861 data.
861 data.
862
862
863 Storage backends may have stored deltas against the previous content
863 Storage backends may have stored deltas against the previous content
864 in this revision. As part of censoring a revision, these storage
864 in this revision. As part of censoring a revision, these storage
865 backends are expected to rewrite any internally stored deltas such
865 backends are expected to rewrite any internally stored deltas such
866 that they no longer reference the deleted content.
866 that they no longer reference the deleted content.
867 """
867 """
868
868
869 def getstrippoint(minlink):
869 def getstrippoint(minlink):
870 """Find the minimum revision that must be stripped to strip a linkrev.
870 """Find the minimum revision that must be stripped to strip a linkrev.
871
871
872 Returns a 2-tuple containing the minimum revision number and a set
872 Returns a 2-tuple containing the minimum revision number and a set
873 of all revisions numbers that would be broken by this strip.
873 of all revisions numbers that would be broken by this strip.
874
874
875 TODO this is highly revlog centric and should be abstracted into
875 TODO this is highly revlog centric and should be abstracted into
876 a higher-level deletion API. ``repair.strip()`` relies on this.
876 a higher-level deletion API. ``repair.strip()`` relies on this.
877 """
877 """
878
878
879 def strip(minlink, transaction):
879 def strip(minlink, transaction):
880 """Remove storage of items starting at a linkrev.
880 """Remove storage of items starting at a linkrev.
881
881
882 This uses ``getstrippoint()`` to determine the first node to remove.
882 This uses ``getstrippoint()`` to determine the first node to remove.
883 Then it effectively truncates storage for all revisions after that.
883 Then it effectively truncates storage for all revisions after that.
884
884
885 TODO this is highly revlog centric and should be abstracted into a
885 TODO this is highly revlog centric and should be abstracted into a
886 higher-level deletion API.
886 higher-level deletion API.
887 """
887 """
888
888
889
889
890 class ifilestorage(ifileindex, ifiledata, ifilemutation):
890 class ifilestorage(ifileindex, ifiledata, ifilemutation):
891 """Complete storage interface for a single tracked file."""
891 """Complete storage interface for a single tracked file."""
892
892
893 def files():
893 def files():
894 """Obtain paths that are backing storage for this file.
894 """Obtain paths that are backing storage for this file.
895
895
896 TODO this is used heavily by verify code and there should probably
896 TODO this is used heavily by verify code and there should probably
897 be a better API for that.
897 be a better API for that.
898 """
898 """
899
899
900 def storageinfo(
900 def storageinfo(
901 exclusivefiles=False,
901 exclusivefiles=False,
902 sharedfiles=False,
902 sharedfiles=False,
903 revisionscount=False,
903 revisionscount=False,
904 trackedsize=False,
904 trackedsize=False,
905 storedsize=False,
905 storedsize=False,
906 ):
906 ):
907 """Obtain information about storage for this file's data.
907 """Obtain information about storage for this file's data.
908
908
909 Returns a dict describing storage for this tracked path. The keys
909 Returns a dict describing storage for this tracked path. The keys
910 in the dict map to arguments of the same. The arguments are bools
910 in the dict map to arguments of the same. The arguments are bools
911 indicating whether to calculate and obtain that data.
911 indicating whether to calculate and obtain that data.
912
912
913 exclusivefiles
913 exclusivefiles
914 Iterable of (vfs, path) describing files that are exclusively
914 Iterable of (vfs, path) describing files that are exclusively
915 used to back storage for this tracked path.
915 used to back storage for this tracked path.
916
916
917 sharedfiles
917 sharedfiles
918 Iterable of (vfs, path) describing files that are used to back
918 Iterable of (vfs, path) describing files that are used to back
919 storage for this tracked path. Those files may also provide storage
919 storage for this tracked path. Those files may also provide storage
920 for other stored entities.
920 for other stored entities.
921
921
922 revisionscount
922 revisionscount
923 Number of revisions available for retrieval.
923 Number of revisions available for retrieval.
924
924
925 trackedsize
925 trackedsize
926 Total size in bytes of all tracked revisions. This is a sum of the
926 Total size in bytes of all tracked revisions. This is a sum of the
927 length of the fulltext of all revisions.
927 length of the fulltext of all revisions.
928
928
929 storedsize
929 storedsize
930 Total size in bytes used to store data for all tracked revisions.
930 Total size in bytes used to store data for all tracked revisions.
931 This is commonly less than ``trackedsize`` due to internal usage
931 This is commonly less than ``trackedsize`` due to internal usage
932 of deltas rather than fulltext revisions.
932 of deltas rather than fulltext revisions.
933
933
934 Not all storage backends may support all queries are have a reasonable
934 Not all storage backends may support all queries are have a reasonable
935 value to use. In that case, the value should be set to ``None`` and
935 value to use. In that case, the value should be set to ``None`` and
936 callers are expected to handle this special value.
936 callers are expected to handle this special value.
937 """
937 """
938
938
939 def verifyintegrity(state):
939 def verifyintegrity(state):
940 """Verifies the integrity of file storage.
940 """Verifies the integrity of file storage.
941
941
942 ``state`` is a dict holding state of the verifier process. It can be
942 ``state`` is a dict holding state of the verifier process. It can be
943 used to communicate data between invocations of multiple storage
943 used to communicate data between invocations of multiple storage
944 primitives.
944 primitives.
945
945
946 If individual revisions cannot have their revision content resolved,
946 If individual revisions cannot have their revision content resolved,
947 the method is expected to set the ``skipread`` key to a set of nodes
947 the method is expected to set the ``skipread`` key to a set of nodes
948 that encountered problems. If set, the method can also add the node(s)
948 that encountered problems. If set, the method can also add the node(s)
949 to ``safe_renamed`` in order to indicate nodes that may perform the
949 to ``safe_renamed`` in order to indicate nodes that may perform the
950 rename checks with currently accessible data.
950 rename checks with currently accessible data.
951
951
952 The method yields objects conforming to the ``iverifyproblem``
952 The method yields objects conforming to the ``iverifyproblem``
953 interface.
953 interface.
954 """
954 """
955
955
956
956
957 class idirs(interfaceutil.Interface):
957 class idirs(interfaceutil.Interface):
958 """Interface representing a collection of directories from paths.
958 """Interface representing a collection of directories from paths.
959
959
960 This interface is essentially a derived data structure representing
960 This interface is essentially a derived data structure representing
961 directories from a collection of paths.
961 directories from a collection of paths.
962 """
962 """
963
963
964 def addpath(path):
964 def addpath(path):
965 """Add a path to the collection.
965 """Add a path to the collection.
966
966
967 All directories in the path will be added to the collection.
967 All directories in the path will be added to the collection.
968 """
968 """
969
969
970 def delpath(path):
970 def delpath(path):
971 """Remove a path from the collection.
971 """Remove a path from the collection.
972
972
973 If the removal was the last path in a particular directory, the
973 If the removal was the last path in a particular directory, the
974 directory is removed from the collection.
974 directory is removed from the collection.
975 """
975 """
976
976
977 def __iter__():
977 def __iter__():
978 """Iterate over the directories in this collection of paths."""
978 """Iterate over the directories in this collection of paths."""
979
979
980 def __contains__(path):
980 def __contains__(path):
981 """Whether a specific directory is in this collection."""
981 """Whether a specific directory is in this collection."""
982
982
983
983
984 class imanifestdict(interfaceutil.Interface):
984 class imanifestdict(interfaceutil.Interface):
985 """Interface representing a manifest data structure.
985 """Interface representing a manifest data structure.
986
986
987 A manifest is effectively a dict mapping paths to entries. Each entry
987 A manifest is effectively a dict mapping paths to entries. Each entry
988 consists of a binary node and extra flags affecting that entry.
988 consists of a binary node and extra flags affecting that entry.
989 """
989 """
990
990
991 def __getitem__(path):
991 def __getitem__(path):
992 """Returns the binary node value for a path in the manifest.
992 """Returns the binary node value for a path in the manifest.
993
993
994 Raises ``KeyError`` if the path does not exist in the manifest.
994 Raises ``KeyError`` if the path does not exist in the manifest.
995
995
996 Equivalent to ``self.find(path)[0]``.
996 Equivalent to ``self.find(path)[0]``.
997 """
997 """
998
998
999 def find(path):
999 def find(path):
1000 """Returns the entry for a path in the manifest.
1000 """Returns the entry for a path in the manifest.
1001
1001
1002 Returns a 2-tuple of (node, flags).
1002 Returns a 2-tuple of (node, flags).
1003
1003
1004 Raises ``KeyError`` if the path does not exist in the manifest.
1004 Raises ``KeyError`` if the path does not exist in the manifest.
1005 """
1005 """
1006
1006
1007 def __len__():
1007 def __len__():
1008 """Return the number of entries in the manifest."""
1008 """Return the number of entries in the manifest."""
1009
1009
1010 def __nonzero__():
1010 def __nonzero__():
1011 """Returns True if the manifest has entries, False otherwise."""
1011 """Returns True if the manifest has entries, False otherwise."""
1012
1012
1013 __bool__ = __nonzero__
1013 __bool__ = __nonzero__
1014
1014
1015 def __setitem__(path, node):
1015 def __setitem__(path, node):
1016 """Define the node value for a path in the manifest.
1016 """Define the node value for a path in the manifest.
1017
1017
1018 If the path is already in the manifest, its flags will be copied to
1018 If the path is already in the manifest, its flags will be copied to
1019 the new entry.
1019 the new entry.
1020 """
1020 """
1021
1021
1022 def __contains__(path):
1022 def __contains__(path):
1023 """Whether a path exists in the manifest."""
1023 """Whether a path exists in the manifest."""
1024
1024
1025 def __delitem__(path):
1025 def __delitem__(path):
1026 """Remove a path from the manifest.
1026 """Remove a path from the manifest.
1027
1027
1028 Raises ``KeyError`` if the path is not in the manifest.
1028 Raises ``KeyError`` if the path is not in the manifest.
1029 """
1029 """
1030
1030
1031 def __iter__():
1031 def __iter__():
1032 """Iterate over paths in the manifest."""
1032 """Iterate over paths in the manifest."""
1033
1033
1034 def iterkeys():
1034 def iterkeys():
1035 """Iterate over paths in the manifest."""
1035 """Iterate over paths in the manifest."""
1036
1036
1037 def keys():
1037 def keys():
1038 """Obtain a list of paths in the manifest."""
1038 """Obtain a list of paths in the manifest."""
1039
1039
1040 def filesnotin(other, match=None):
1040 def filesnotin(other, match=None):
1041 """Obtain the set of paths in this manifest but not in another.
1041 """Obtain the set of paths in this manifest but not in another.
1042
1042
1043 ``match`` is an optional matcher function to be applied to both
1043 ``match`` is an optional matcher function to be applied to both
1044 manifests.
1044 manifests.
1045
1045
1046 Returns a set of paths.
1046 Returns a set of paths.
1047 """
1047 """
1048
1048
1049 def dirs():
1049 def dirs():
1050 """Returns an object implementing the ``idirs`` interface."""
1050 """Returns an object implementing the ``idirs`` interface."""
1051
1051
1052 def hasdir(dir):
1052 def hasdir(dir):
1053 """Returns a bool indicating if a directory is in this manifest."""
1053 """Returns a bool indicating if a directory is in this manifest."""
1054
1054
1055 def walk(match):
1055 def walk(match):
1056 """Generator of paths in manifest satisfying a matcher.
1056 """Generator of paths in manifest satisfying a matcher.
1057
1057
1058 If the matcher has explicit files listed and they don't exist in
1058 If the matcher has explicit files listed and they don't exist in
1059 the manifest, ``match.bad()`` is called for each missing file.
1059 the manifest, ``match.bad()`` is called for each missing file.
1060 """
1060 """
1061
1061
1062 def diff(other, match=None, clean=False):
1062 def diff(other, match=None, clean=False):
1063 """Find differences between this manifest and another.
1063 """Find differences between this manifest and another.
1064
1064
1065 This manifest is compared to ``other``.
1065 This manifest is compared to ``other``.
1066
1066
1067 If ``match`` is provided, the two manifests are filtered against this
1067 If ``match`` is provided, the two manifests are filtered against this
1068 matcher and only entries satisfying the matcher are compared.
1068 matcher and only entries satisfying the matcher are compared.
1069
1069
1070 If ``clean`` is True, unchanged files are included in the returned
1070 If ``clean`` is True, unchanged files are included in the returned
1071 object.
1071 object.
1072
1072
1073 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
1073 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
1074 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
1074 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
1075 represents the node and flags for this manifest and ``(node2, flag2)``
1075 represents the node and flags for this manifest and ``(node2, flag2)``
1076 are the same for the other manifest.
1076 are the same for the other manifest.
1077 """
1077 """
1078
1078
1079 def setflag(path, flag):
1079 def setflag(path, flag):
1080 """Set the flag value for a given path.
1080 """Set the flag value for a given path.
1081
1081
1082 Raises ``KeyError`` if the path is not already in the manifest.
1082 Raises ``KeyError`` if the path is not already in the manifest.
1083 """
1083 """
1084
1084
1085 def get(path, default=None):
1085 def get(path, default=None):
1086 """Obtain the node value for a path or a default value if missing."""
1086 """Obtain the node value for a path or a default value if missing."""
1087
1087
1088 def flags(path):
1088 def flags(path):
1089 """Return the flags value for a path (default: empty bytestring)."""
1089 """Return the flags value for a path (default: empty bytestring)."""
1090
1090
1091 def copy():
1091 def copy():
1092 """Return a copy of this manifest."""
1092 """Return a copy of this manifest."""
1093
1093
1094 def items():
1094 def items():
1095 """Returns an iterable of (path, node) for items in this manifest."""
1095 """Returns an iterable of (path, node) for items in this manifest."""
1096
1096
1097 def iteritems():
1097 def iteritems():
1098 """Identical to items()."""
1098 """Identical to items()."""
1099
1099
1100 def iterentries():
1100 def iterentries():
1101 """Returns an iterable of (path, node, flags) for this manifest.
1101 """Returns an iterable of (path, node, flags) for this manifest.
1102
1102
1103 Similar to ``iteritems()`` except items are a 3-tuple and include
1103 Similar to ``iteritems()`` except items are a 3-tuple and include
1104 flags.
1104 flags.
1105 """
1105 """
1106
1106
1107 def text():
1107 def text():
1108 """Obtain the raw data representation for this manifest.
1108 """Obtain the raw data representation for this manifest.
1109
1109
1110 Result is used to create a manifest revision.
1110 Result is used to create a manifest revision.
1111 """
1111 """
1112
1112
1113 def fastdelta(base, changes):
1113 def fastdelta(base, changes):
1114 """Obtain a delta between this manifest and another given changes.
1114 """Obtain a delta between this manifest and another given changes.
1115
1115
1116 ``base`` in the raw data representation for another manifest.
1116 ``base`` in the raw data representation for another manifest.
1117
1117
1118 ``changes`` is an iterable of ``(path, to_delete)``.
1118 ``changes`` is an iterable of ``(path, to_delete)``.
1119
1119
1120 Returns a 2-tuple containing ``bytearray(self.text())`` and the
1120 Returns a 2-tuple containing ``bytearray(self.text())`` and the
1121 delta between ``base`` and this manifest.
1121 delta between ``base`` and this manifest.
1122
1122
1123 If this manifest implementation can't support ``fastdelta()``,
1123 If this manifest implementation can't support ``fastdelta()``,
1124 raise ``mercurial.manifest.FastdeltaUnavailable``.
1124 raise ``mercurial.manifest.FastdeltaUnavailable``.
1125 """
1125 """
1126
1126
1127
1127
1128 class imanifestrevisionbase(interfaceutil.Interface):
1128 class imanifestrevisionbase(interfaceutil.Interface):
1129 """Base interface representing a single revision of a manifest.
1129 """Base interface representing a single revision of a manifest.
1130
1130
1131 Should not be used as a primary interface: should always be inherited
1131 Should not be used as a primary interface: should always be inherited
1132 as part of a larger interface.
1132 as part of a larger interface.
1133 """
1133 """
1134
1134
1135 def copy():
1135 def copy():
1136 """Obtain a copy of this manifest instance.
1136 """Obtain a copy of this manifest instance.
1137
1137
1138 Returns an object conforming to the ``imanifestrevisionwritable``
1138 Returns an object conforming to the ``imanifestrevisionwritable``
1139 interface. The instance will be associated with the same
1139 interface. The instance will be associated with the same
1140 ``imanifestlog`` collection as this instance.
1140 ``imanifestlog`` collection as this instance.
1141 """
1141 """
1142
1142
1143 def read():
1143 def read():
1144 """Obtain the parsed manifest data structure.
1144 """Obtain the parsed manifest data structure.
1145
1145
1146 The returned object conforms to the ``imanifestdict`` interface.
1146 The returned object conforms to the ``imanifestdict`` interface.
1147 """
1147 """
1148
1148
1149
1149
1150 class imanifestrevisionstored(imanifestrevisionbase):
1150 class imanifestrevisionstored(imanifestrevisionbase):
1151 """Interface representing a manifest revision committed to storage."""
1151 """Interface representing a manifest revision committed to storage."""
1152
1152
1153 def node():
1153 def node():
1154 """The binary node for this manifest."""
1154 """The binary node for this manifest."""
1155
1155
1156 parents = interfaceutil.Attribute(
1156 parents = interfaceutil.Attribute(
1157 """List of binary nodes that are parents for this manifest revision."""
1157 """List of binary nodes that are parents for this manifest revision."""
1158 )
1158 )
1159
1159
1160 def readdelta(shallow=False):
1160 def readdelta(shallow=False):
1161 """Obtain the manifest data structure representing changes from parent.
1161 """Obtain the manifest data structure representing changes from parent.
1162
1162
1163 This manifest is compared to its 1st parent. A new manifest representing
1163 This manifest is compared to its 1st parent. A new manifest representing
1164 those differences is constructed.
1164 those differences is constructed.
1165
1165
1166 The returned object conforms to the ``imanifestdict`` interface.
1166 The returned object conforms to the ``imanifestdict`` interface.
1167 """
1167 """
1168
1168
1169 def readfast(shallow=False):
1169 def readfast(shallow=False):
1170 """Calls either ``read()`` or ``readdelta()``.
1170 """Calls either ``read()`` or ``readdelta()``.
1171
1171
1172 The faster of the two options is called.
1172 The faster of the two options is called.
1173 """
1173 """
1174
1174
1175 def find(key):
1175 def find(key):
1176 """Calls self.read().find(key)``.
1176 """Calls self.read().find(key)``.
1177
1177
1178 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1178 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1179 """
1179 """
1180
1180
1181
1181
1182 class imanifestrevisionwritable(imanifestrevisionbase):
1182 class imanifestrevisionwritable(imanifestrevisionbase):
1183 """Interface representing a manifest revision that can be committed."""
1183 """Interface representing a manifest revision that can be committed."""
1184
1184
1185 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1185 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1186 """Add this revision to storage.
1186 """Add this revision to storage.
1187
1187
1188 Takes a transaction object, the changeset revision number it will
1188 Takes a transaction object, the changeset revision number it will
1189 be associated with, its parent nodes, and lists of added and
1189 be associated with, its parent nodes, and lists of added and
1190 removed paths.
1190 removed paths.
1191
1191
1192 If match is provided, storage can choose not to inspect or write out
1192 If match is provided, storage can choose not to inspect or write out
1193 items that do not match. Storage is still required to be able to provide
1193 items that do not match. Storage is still required to be able to provide
1194 the full manifest in the future for any directories written (these
1194 the full manifest in the future for any directories written (these
1195 manifests should not be "narrowed on disk").
1195 manifests should not be "narrowed on disk").
1196
1196
1197 Returns the binary node of the created revision.
1197 Returns the binary node of the created revision.
1198 """
1198 """
1199
1199
1200
1200
1201 class imanifeststorage(interfaceutil.Interface):
1201 class imanifeststorage(interfaceutil.Interface):
1202 """Storage interface for manifest data."""
1202 """Storage interface for manifest data."""
1203
1203
1204 nodeconstants = interfaceutil.Attribute(
1204 nodeconstants = interfaceutil.Attribute(
1205 """nodeconstants used by the current repository."""
1205 """nodeconstants used by the current repository."""
1206 )
1206 )
1207
1207
1208 tree = interfaceutil.Attribute(
1208 tree = interfaceutil.Attribute(
1209 """The path to the directory this manifest tracks.
1209 """The path to the directory this manifest tracks.
1210
1210
1211 The empty bytestring represents the root manifest.
1211 The empty bytestring represents the root manifest.
1212 """
1212 """
1213 )
1213 )
1214
1214
1215 index = interfaceutil.Attribute(
1215 index = interfaceutil.Attribute(
1216 """An ``ifilerevisionssequence`` instance."""
1216 """An ``ifilerevisionssequence`` instance."""
1217 )
1217 )
1218
1218
1219 opener = interfaceutil.Attribute(
1219 opener = interfaceutil.Attribute(
1220 """VFS opener to use to access underlying files used for storage.
1220 """VFS opener to use to access underlying files used for storage.
1221
1221
1222 TODO this is revlog specific and should not be exposed.
1222 TODO this is revlog specific and should not be exposed.
1223 """
1223 """
1224 )
1224 )
1225
1225
1226 _generaldelta = interfaceutil.Attribute(
1226 _generaldelta = interfaceutil.Attribute(
1227 """Whether generaldelta storage is being used.
1227 """Whether generaldelta storage is being used.
1228
1228
1229 TODO this is revlog specific and should not be exposed.
1229 TODO this is revlog specific and should not be exposed.
1230 """
1230 """
1231 )
1231 )
1232
1232
1233 fulltextcache = interfaceutil.Attribute(
1233 fulltextcache = interfaceutil.Attribute(
1234 """Dict with cache of fulltexts.
1234 """Dict with cache of fulltexts.
1235
1235
1236 TODO this doesn't feel appropriate for the storage interface.
1236 TODO this doesn't feel appropriate for the storage interface.
1237 """
1237 """
1238 )
1238 )
1239
1239
1240 def __len__():
1240 def __len__():
1241 """Obtain the number of revisions stored for this manifest."""
1241 """Obtain the number of revisions stored for this manifest."""
1242
1242
1243 def __iter__():
1243 def __iter__():
1244 """Iterate over revision numbers for this manifest."""
1244 """Iterate over revision numbers for this manifest."""
1245
1245
1246 def rev(node):
1246 def rev(node):
1247 """Obtain the revision number given a binary node.
1247 """Obtain the revision number given a binary node.
1248
1248
1249 Raises ``error.LookupError`` if the node is not known.
1249 Raises ``error.LookupError`` if the node is not known.
1250 """
1250 """
1251
1251
1252 def node(rev):
1252 def node(rev):
1253 """Obtain the node value given a revision number.
1253 """Obtain the node value given a revision number.
1254
1254
1255 Raises ``error.LookupError`` if the revision is not known.
1255 Raises ``error.LookupError`` if the revision is not known.
1256 """
1256 """
1257
1257
1258 def lookup(value):
1258 def lookup(value):
1259 """Attempt to resolve a value to a node.
1259 """Attempt to resolve a value to a node.
1260
1260
1261 Value can be a binary node, hex node, revision number, or a bytes
1261 Value can be a binary node, hex node, revision number, or a bytes
1262 that can be converted to an integer.
1262 that can be converted to an integer.
1263
1263
1264 Raises ``error.LookupError`` if a ndoe could not be resolved.
1264 Raises ``error.LookupError`` if a ndoe could not be resolved.
1265 """
1265 """
1266
1266
1267 def parents(node):
1267 def parents(node):
1268 """Returns a 2-tuple of parent nodes for a node.
1268 """Returns a 2-tuple of parent nodes for a node.
1269
1269
1270 Values will be ``nullid`` if the parent is empty.
1270 Values will be ``nullid`` if the parent is empty.
1271 """
1271 """
1272
1272
1273 def parentrevs(rev):
1273 def parentrevs(rev):
1274 """Like parents() but operates on revision numbers."""
1274 """Like parents() but operates on revision numbers."""
1275
1275
1276 def linkrev(rev):
1276 def linkrev(rev):
1277 """Obtain the changeset revision number a revision is linked to."""
1277 """Obtain the changeset revision number a revision is linked to."""
1278
1278
1279 def revision(node, _df=None):
1279 def revision(node, _df=None):
1280 """Obtain fulltext data for a node."""
1280 """Obtain fulltext data for a node."""
1281
1281
1282 def rawdata(node, _df=None):
1282 def rawdata(node, _df=None):
1283 """Obtain raw data for a node."""
1283 """Obtain raw data for a node."""
1284
1284
1285 def revdiff(rev1, rev2):
1285 def revdiff(rev1, rev2):
1286 """Obtain a delta between two revision numbers.
1286 """Obtain a delta between two revision numbers.
1287
1287
1288 The returned data is the result of ``bdiff.bdiff()`` on the raw
1288 The returned data is the result of ``bdiff.bdiff()`` on the raw
1289 revision data.
1289 revision data.
1290 """
1290 """
1291
1291
1292 def cmp(node, fulltext):
1292 def cmp(node, fulltext):
1293 """Compare fulltext to another revision.
1293 """Compare fulltext to another revision.
1294
1294
1295 Returns True if the fulltext is different from what is stored.
1295 Returns True if the fulltext is different from what is stored.
1296 """
1296 """
1297
1297
1298 def emitrevisions(
1298 def emitrevisions(
1299 nodes,
1299 nodes,
1300 nodesorder=None,
1300 nodesorder=None,
1301 revisiondata=False,
1301 revisiondata=False,
1302 assumehaveparentrevisions=False,
1302 assumehaveparentrevisions=False,
1303 ):
1303 ):
1304 """Produce ``irevisiondelta`` describing revisions.
1304 """Produce ``irevisiondelta`` describing revisions.
1305
1305
1306 See the documentation for ``ifiledata`` for more.
1306 See the documentation for ``ifiledata`` for more.
1307 """
1307 """
1308
1308
1309 def addgroup(
1309 def addgroup(
1310 deltas,
1310 deltas,
1311 linkmapper,
1311 linkmapper,
1312 transaction,
1312 transaction,
1313 addrevisioncb=None,
1313 addrevisioncb=None,
1314 duplicaterevisioncb=None,
1314 duplicaterevisioncb=None,
1315 ):
1315 ):
1316 """Process a series of deltas for storage.
1316 """Process a series of deltas for storage.
1317
1317
1318 See the documentation in ``ifilemutation`` for more.
1318 See the documentation in ``ifilemutation`` for more.
1319 """
1319 """
1320
1320
1321 def rawsize(rev):
1321 def rawsize(rev):
1322 """Obtain the size of tracked data.
1322 """Obtain the size of tracked data.
1323
1323
1324 Is equivalent to ``len(m.rawdata(node))``.
1324 Is equivalent to ``len(m.rawdata(node))``.
1325
1325
1326 TODO this method is only used by upgrade code and may be removed.
1326 TODO this method is only used by upgrade code and may be removed.
1327 """
1327 """
1328
1328
1329 def getstrippoint(minlink):
1329 def getstrippoint(minlink):
1330 """Find minimum revision that must be stripped to strip a linkrev.
1330 """Find minimum revision that must be stripped to strip a linkrev.
1331
1331
1332 See the documentation in ``ifilemutation`` for more.
1332 See the documentation in ``ifilemutation`` for more.
1333 """
1333 """
1334
1334
1335 def strip(minlink, transaction):
1335 def strip(minlink, transaction):
1336 """Remove storage of items starting at a linkrev.
1336 """Remove storage of items starting at a linkrev.
1337
1337
1338 See the documentation in ``ifilemutation`` for more.
1338 See the documentation in ``ifilemutation`` for more.
1339 """
1339 """
1340
1340
1341 def checksize():
1341 def checksize():
1342 """Obtain the expected sizes of backing files.
1342 """Obtain the expected sizes of backing files.
1343
1343
1344 TODO this is used by verify and it should not be part of the interface.
1344 TODO this is used by verify and it should not be part of the interface.
1345 """
1345 """
1346
1346
1347 def files():
1347 def files():
1348 """Obtain paths that are backing storage for this manifest.
1348 """Obtain paths that are backing storage for this manifest.
1349
1349
1350 TODO this is used by verify and there should probably be a better API
1350 TODO this is used by verify and there should probably be a better API
1351 for this functionality.
1351 for this functionality.
1352 """
1352 """
1353
1353
1354 def deltaparent(rev):
1354 def deltaparent(rev):
1355 """Obtain the revision that a revision is delta'd against.
1355 """Obtain the revision that a revision is delta'd against.
1356
1356
1357 TODO delta encoding is an implementation detail of storage and should
1357 TODO delta encoding is an implementation detail of storage and should
1358 not be exposed to the storage interface.
1358 not be exposed to the storage interface.
1359 """
1359 """
1360
1360
1361 def clone(tr, dest, **kwargs):
1361 def clone(tr, dest, **kwargs):
1362 """Clone this instance to another."""
1362 """Clone this instance to another."""
1363
1363
1364 def clearcaches(clear_persisted_data=False):
1364 def clearcaches(clear_persisted_data=False):
1365 """Clear any caches associated with this instance."""
1365 """Clear any caches associated with this instance."""
1366
1366
1367 def dirlog(d):
1367 def dirlog(d):
1368 """Obtain a manifest storage instance for a tree."""
1368 """Obtain a manifest storage instance for a tree."""
1369
1369
1370 def add(
1370 def add(
1371 m, transaction, link, p1, p2, added, removed, readtree=None, match=None
1371 m, transaction, link, p1, p2, added, removed, readtree=None, match=None
1372 ):
1372 ):
1373 """Add a revision to storage.
1373 """Add a revision to storage.
1374
1374
1375 ``m`` is an object conforming to ``imanifestdict``.
1375 ``m`` is an object conforming to ``imanifestdict``.
1376
1376
1377 ``link`` is the linkrev revision number.
1377 ``link`` is the linkrev revision number.
1378
1378
1379 ``p1`` and ``p2`` are the parent revision numbers.
1379 ``p1`` and ``p2`` are the parent revision numbers.
1380
1380
1381 ``added`` and ``removed`` are iterables of added and removed paths,
1381 ``added`` and ``removed`` are iterables of added and removed paths,
1382 respectively.
1382 respectively.
1383
1383
1384 ``readtree`` is a function that can be used to read the child tree(s)
1384 ``readtree`` is a function that can be used to read the child tree(s)
1385 when recursively writing the full tree structure when using
1385 when recursively writing the full tree structure when using
1386 treemanifets.
1386 treemanifets.
1387
1387
1388 ``match`` is a matcher that can be used to hint to storage that not all
1388 ``match`` is a matcher that can be used to hint to storage that not all
1389 paths must be inspected; this is an optimization and can be safely
1389 paths must be inspected; this is an optimization and can be safely
1390 ignored. Note that the storage must still be able to reproduce a full
1390 ignored. Note that the storage must still be able to reproduce a full
1391 manifest including files that did not match.
1391 manifest including files that did not match.
1392 """
1392 """
1393
1393
1394 def storageinfo(
1394 def storageinfo(
1395 exclusivefiles=False,
1395 exclusivefiles=False,
1396 sharedfiles=False,
1396 sharedfiles=False,
1397 revisionscount=False,
1397 revisionscount=False,
1398 trackedsize=False,
1398 trackedsize=False,
1399 storedsize=False,
1399 storedsize=False,
1400 ):
1400 ):
1401 """Obtain information about storage for this manifest's data.
1401 """Obtain information about storage for this manifest's data.
1402
1402
1403 See ``ifilestorage.storageinfo()`` for a description of this method.
1403 See ``ifilestorage.storageinfo()`` for a description of this method.
1404 This one behaves the same way, except for manifest data.
1404 This one behaves the same way, except for manifest data.
1405 """
1405 """
1406
1406
1407
1407
1408 class imanifestlog(interfaceutil.Interface):
1408 class imanifestlog(interfaceutil.Interface):
1409 """Interface representing a collection of manifest snapshots.
1409 """Interface representing a collection of manifest snapshots.
1410
1410
1411 Represents the root manifest in a repository.
1411 Represents the root manifest in a repository.
1412
1412
1413 Also serves as a means to access nested tree manifests and to cache
1413 Also serves as a means to access nested tree manifests and to cache
1414 tree manifests.
1414 tree manifests.
1415 """
1415 """
1416
1416
1417 nodeconstants = interfaceutil.Attribute(
1417 nodeconstants = interfaceutil.Attribute(
1418 """nodeconstants used by the current repository."""
1418 """nodeconstants used by the current repository."""
1419 )
1419 )
1420
1420
1421 def __getitem__(node):
1421 def __getitem__(node):
1422 """Obtain a manifest instance for a given binary node.
1422 """Obtain a manifest instance for a given binary node.
1423
1423
1424 Equivalent to calling ``self.get('', node)``.
1424 Equivalent to calling ``self.get('', node)``.
1425
1425
1426 The returned object conforms to the ``imanifestrevisionstored``
1426 The returned object conforms to the ``imanifestrevisionstored``
1427 interface.
1427 interface.
1428 """
1428 """
1429
1429
1430 def get(tree, node, verify=True):
1430 def get(tree, node, verify=True):
1431 """Retrieve the manifest instance for a given directory and binary node.
1431 """Retrieve the manifest instance for a given directory and binary node.
1432
1432
1433 ``node`` always refers to the node of the root manifest (which will be
1433 ``node`` always refers to the node of the root manifest (which will be
1434 the only manifest if flat manifests are being used).
1434 the only manifest if flat manifests are being used).
1435
1435
1436 If ``tree`` is the empty string, the root manifest is returned.
1436 If ``tree`` is the empty string, the root manifest is returned.
1437 Otherwise the manifest for the specified directory will be returned
1437 Otherwise the manifest for the specified directory will be returned
1438 (requires tree manifests).
1438 (requires tree manifests).
1439
1439
1440 If ``verify`` is True, ``LookupError`` is raised if the node is not
1440 If ``verify`` is True, ``LookupError`` is raised if the node is not
1441 known.
1441 known.
1442
1442
1443 The returned object conforms to the ``imanifestrevisionstored``
1443 The returned object conforms to the ``imanifestrevisionstored``
1444 interface.
1444 interface.
1445 """
1445 """
1446
1446
1447 def getstorage(tree):
1447 def getstorage(tree):
1448 """Retrieve an interface to storage for a particular tree.
1448 """Retrieve an interface to storage for a particular tree.
1449
1449
1450 If ``tree`` is the empty bytestring, storage for the root manifest will
1450 If ``tree`` is the empty bytestring, storage for the root manifest will
1451 be returned. Otherwise storage for a tree manifest is returned.
1451 be returned. Otherwise storage for a tree manifest is returned.
1452
1452
1453 TODO formalize interface for returned object.
1453 TODO formalize interface for returned object.
1454 """
1454 """
1455
1455
1456 def clearcaches():
1456 def clearcaches():
1457 """Clear caches associated with this collection."""
1457 """Clear caches associated with this collection."""
1458
1458
1459 def rev(node):
1459 def rev(node):
1460 """Obtain the revision number for a binary node.
1460 """Obtain the revision number for a binary node.
1461
1461
1462 Raises ``error.LookupError`` if the node is not known.
1462 Raises ``error.LookupError`` if the node is not known.
1463 """
1463 """
1464
1464
1465 def update_caches(transaction):
1465 def update_caches(transaction):
1466 """update whatever cache are relevant for the used storage."""
1466 """update whatever cache are relevant for the used storage."""
1467
1467
1468
1468
1469 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1469 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1470 """Local repository sub-interface providing access to tracked file storage.
1470 """Local repository sub-interface providing access to tracked file storage.
1471
1471
1472 This interface defines how a repository accesses storage for a single
1472 This interface defines how a repository accesses storage for a single
1473 tracked file path.
1473 tracked file path.
1474 """
1474 """
1475
1475
1476 def file(f):
1476 def file(f):
1477 """Obtain a filelog for a tracked path.
1477 """Obtain a filelog for a tracked path.
1478
1478
1479 The returned type conforms to the ``ifilestorage`` interface.
1479 The returned type conforms to the ``ifilestorage`` interface.
1480 """
1480 """
1481
1481
1482
1482
1483 class ilocalrepositorymain(interfaceutil.Interface):
1483 class ilocalrepositorymain(interfaceutil.Interface):
1484 """Main interface for local repositories.
1484 """Main interface for local repositories.
1485
1485
1486 This currently captures the reality of things - not how things should be.
1486 This currently captures the reality of things - not how things should be.
1487 """
1487 """
1488
1488
1489 nodeconstants = interfaceutil.Attribute(
1489 nodeconstants = interfaceutil.Attribute(
1490 """Constant nodes matching the hash function used by the repository."""
1490 """Constant nodes matching the hash function used by the repository."""
1491 )
1491 )
1492 nullid = interfaceutil.Attribute(
1492 nullid = interfaceutil.Attribute(
1493 """null revision for the hash function used by the repository."""
1493 """null revision for the hash function used by the repository."""
1494 )
1494 )
1495
1495
1496 supported = interfaceutil.Attribute(
1496 supported = interfaceutil.Attribute(
1497 """Set of requirements that this repo is capable of opening."""
1497 """Set of requirements that this repo is capable of opening."""
1498 )
1498 )
1499
1499
1500 requirements = interfaceutil.Attribute(
1500 requirements = interfaceutil.Attribute(
1501 """Set of requirements this repo uses."""
1501 """Set of requirements this repo uses."""
1502 )
1502 )
1503
1503
1504 features = interfaceutil.Attribute(
1504 features = interfaceutil.Attribute(
1505 """Set of "features" this repository supports.
1505 """Set of "features" this repository supports.
1506
1506
1507 A "feature" is a loosely-defined term. It can refer to a feature
1507 A "feature" is a loosely-defined term. It can refer to a feature
1508 in the classical sense or can describe an implementation detail
1508 in the classical sense or can describe an implementation detail
1509 of the repository. For example, a ``readonly`` feature may denote
1509 of the repository. For example, a ``readonly`` feature may denote
1510 the repository as read-only. Or a ``revlogfilestore`` feature may
1510 the repository as read-only. Or a ``revlogfilestore`` feature may
1511 denote that the repository is using revlogs for file storage.
1511 denote that the repository is using revlogs for file storage.
1512
1512
1513 The intent of features is to provide a machine-queryable mechanism
1513 The intent of features is to provide a machine-queryable mechanism
1514 for repo consumers to test for various repository characteristics.
1514 for repo consumers to test for various repository characteristics.
1515
1515
1516 Features are similar to ``requirements``. The main difference is that
1516 Features are similar to ``requirements``. The main difference is that
1517 requirements are stored on-disk and represent requirements to open the
1517 requirements are stored on-disk and represent requirements to open the
1518 repository. Features are more run-time capabilities of the repository
1518 repository. Features are more run-time capabilities of the repository
1519 and more granular capabilities (which may be derived from requirements).
1519 and more granular capabilities (which may be derived from requirements).
1520 """
1520 """
1521 )
1521 )
1522
1522
1523 filtername = interfaceutil.Attribute(
1523 filtername = interfaceutil.Attribute(
1524 """Name of the repoview that is active on this repo."""
1524 """Name of the repoview that is active on this repo."""
1525 )
1525 )
1526
1526
1527 vfs_map = interfaceutil.Attribute(
1528 """a bytes-key β†’ vfs mapping used by transaction and others"""
1529 )
1530
1527 wvfs = interfaceutil.Attribute(
1531 wvfs = interfaceutil.Attribute(
1528 """VFS used to access the working directory."""
1532 """VFS used to access the working directory."""
1529 )
1533 )
1530
1534
1531 vfs = interfaceutil.Attribute(
1535 vfs = interfaceutil.Attribute(
1532 """VFS rooted at the .hg directory.
1536 """VFS rooted at the .hg directory.
1533
1537
1534 Used to access repository data not in the store.
1538 Used to access repository data not in the store.
1535 """
1539 """
1536 )
1540 )
1537
1541
1538 svfs = interfaceutil.Attribute(
1542 svfs = interfaceutil.Attribute(
1539 """VFS rooted at the store.
1543 """VFS rooted at the store.
1540
1544
1541 Used to access repository data in the store. Typically .hg/store.
1545 Used to access repository data in the store. Typically .hg/store.
1542 But can point elsewhere if the store is shared.
1546 But can point elsewhere if the store is shared.
1543 """
1547 """
1544 )
1548 )
1545
1549
1546 root = interfaceutil.Attribute(
1550 root = interfaceutil.Attribute(
1547 """Path to the root of the working directory."""
1551 """Path to the root of the working directory."""
1548 )
1552 )
1549
1553
1550 path = interfaceutil.Attribute("""Path to the .hg directory.""")
1554 path = interfaceutil.Attribute("""Path to the .hg directory.""")
1551
1555
1552 origroot = interfaceutil.Attribute(
1556 origroot = interfaceutil.Attribute(
1553 """The filesystem path that was used to construct the repo."""
1557 """The filesystem path that was used to construct the repo."""
1554 )
1558 )
1555
1559
1556 auditor = interfaceutil.Attribute(
1560 auditor = interfaceutil.Attribute(
1557 """A pathauditor for the working directory.
1561 """A pathauditor for the working directory.
1558
1562
1559 This checks if a path refers to a nested repository.
1563 This checks if a path refers to a nested repository.
1560
1564
1561 Operates on the filesystem.
1565 Operates on the filesystem.
1562 """
1566 """
1563 )
1567 )
1564
1568
1565 nofsauditor = interfaceutil.Attribute(
1569 nofsauditor = interfaceutil.Attribute(
1566 """A pathauditor for the working directory.
1570 """A pathauditor for the working directory.
1567
1571
1568 This is like ``auditor`` except it doesn't do filesystem checks.
1572 This is like ``auditor`` except it doesn't do filesystem checks.
1569 """
1573 """
1570 )
1574 )
1571
1575
1572 baseui = interfaceutil.Attribute(
1576 baseui = interfaceutil.Attribute(
1573 """Original ui instance passed into constructor."""
1577 """Original ui instance passed into constructor."""
1574 )
1578 )
1575
1579
1576 ui = interfaceutil.Attribute("""Main ui instance for this instance.""")
1580 ui = interfaceutil.Attribute("""Main ui instance for this instance.""")
1577
1581
1578 sharedpath = interfaceutil.Attribute(
1582 sharedpath = interfaceutil.Attribute(
1579 """Path to the .hg directory of the repo this repo was shared from."""
1583 """Path to the .hg directory of the repo this repo was shared from."""
1580 )
1584 )
1581
1585
1582 store = interfaceutil.Attribute("""A store instance.""")
1586 store = interfaceutil.Attribute("""A store instance.""")
1583
1587
1584 spath = interfaceutil.Attribute("""Path to the store.""")
1588 spath = interfaceutil.Attribute("""Path to the store.""")
1585
1589
1586 sjoin = interfaceutil.Attribute("""Alias to self.store.join.""")
1590 sjoin = interfaceutil.Attribute("""Alias to self.store.join.""")
1587
1591
1588 cachevfs = interfaceutil.Attribute(
1592 cachevfs = interfaceutil.Attribute(
1589 """A VFS used to access the cache directory.
1593 """A VFS used to access the cache directory.
1590
1594
1591 Typically .hg/cache.
1595 Typically .hg/cache.
1592 """
1596 """
1593 )
1597 )
1594
1598
1595 wcachevfs = interfaceutil.Attribute(
1599 wcachevfs = interfaceutil.Attribute(
1596 """A VFS used to access the cache directory dedicated to working copy
1600 """A VFS used to access the cache directory dedicated to working copy
1597
1601
1598 Typically .hg/wcache.
1602 Typically .hg/wcache.
1599 """
1603 """
1600 )
1604 )
1601
1605
1602 filteredrevcache = interfaceutil.Attribute(
1606 filteredrevcache = interfaceutil.Attribute(
1603 """Holds sets of revisions to be filtered."""
1607 """Holds sets of revisions to be filtered."""
1604 )
1608 )
1605
1609
1606 names = interfaceutil.Attribute("""A ``namespaces`` instance.""")
1610 names = interfaceutil.Attribute("""A ``namespaces`` instance.""")
1607
1611
1608 filecopiesmode = interfaceutil.Attribute(
1612 filecopiesmode = interfaceutil.Attribute(
1609 """The way files copies should be dealt with in this repo."""
1613 """The way files copies should be dealt with in this repo."""
1610 )
1614 )
1611
1615
1612 def close():
1616 def close():
1613 """Close the handle on this repository."""
1617 """Close the handle on this repository."""
1614
1618
1615 def peer(path=None):
1619 def peer(path=None):
1616 """Obtain an object conforming to the ``peer`` interface."""
1620 """Obtain an object conforming to the ``peer`` interface."""
1617
1621
1618 def unfiltered():
1622 def unfiltered():
1619 """Obtain an unfiltered/raw view of this repo."""
1623 """Obtain an unfiltered/raw view of this repo."""
1620
1624
1621 def filtered(name, visibilityexceptions=None):
1625 def filtered(name, visibilityexceptions=None):
1622 """Obtain a named view of this repository."""
1626 """Obtain a named view of this repository."""
1623
1627
1624 obsstore = interfaceutil.Attribute("""A store of obsolescence data.""")
1628 obsstore = interfaceutil.Attribute("""A store of obsolescence data.""")
1625
1629
1626 changelog = interfaceutil.Attribute("""A handle on the changelog revlog.""")
1630 changelog = interfaceutil.Attribute("""A handle on the changelog revlog.""")
1627
1631
1628 manifestlog = interfaceutil.Attribute(
1632 manifestlog = interfaceutil.Attribute(
1629 """An instance conforming to the ``imanifestlog`` interface.
1633 """An instance conforming to the ``imanifestlog`` interface.
1630
1634
1631 Provides access to manifests for the repository.
1635 Provides access to manifests for the repository.
1632 """
1636 """
1633 )
1637 )
1634
1638
1635 dirstate = interfaceutil.Attribute("""Working directory state.""")
1639 dirstate = interfaceutil.Attribute("""Working directory state.""")
1636
1640
1637 narrowpats = interfaceutil.Attribute(
1641 narrowpats = interfaceutil.Attribute(
1638 """Matcher patterns for this repository's narrowspec."""
1642 """Matcher patterns for this repository's narrowspec."""
1639 )
1643 )
1640
1644
1641 def narrowmatch(match=None, includeexact=False):
1645 def narrowmatch(match=None, includeexact=False):
1642 """Obtain a matcher for the narrowspec."""
1646 """Obtain a matcher for the narrowspec."""
1643
1647
1644 def setnarrowpats(newincludes, newexcludes):
1648 def setnarrowpats(newincludes, newexcludes):
1645 """Define the narrowspec for this repository."""
1649 """Define the narrowspec for this repository."""
1646
1650
1647 def __getitem__(changeid):
1651 def __getitem__(changeid):
1648 """Try to resolve a changectx."""
1652 """Try to resolve a changectx."""
1649
1653
1650 def __contains__(changeid):
1654 def __contains__(changeid):
1651 """Whether a changeset exists."""
1655 """Whether a changeset exists."""
1652
1656
1653 def __nonzero__():
1657 def __nonzero__():
1654 """Always returns True."""
1658 """Always returns True."""
1655 return True
1659 return True
1656
1660
1657 __bool__ = __nonzero__
1661 __bool__ = __nonzero__
1658
1662
1659 def __len__():
1663 def __len__():
1660 """Returns the number of changesets in the repo."""
1664 """Returns the number of changesets in the repo."""
1661
1665
1662 def __iter__():
1666 def __iter__():
1663 """Iterate over revisions in the changelog."""
1667 """Iterate over revisions in the changelog."""
1664
1668
1665 def revs(expr, *args):
1669 def revs(expr, *args):
1666 """Evaluate a revset.
1670 """Evaluate a revset.
1667
1671
1668 Emits revisions.
1672 Emits revisions.
1669 """
1673 """
1670
1674
1671 def set(expr, *args):
1675 def set(expr, *args):
1672 """Evaluate a revset.
1676 """Evaluate a revset.
1673
1677
1674 Emits changectx instances.
1678 Emits changectx instances.
1675 """
1679 """
1676
1680
1677 def anyrevs(specs, user=False, localalias=None):
1681 def anyrevs(specs, user=False, localalias=None):
1678 """Find revisions matching one of the given revsets."""
1682 """Find revisions matching one of the given revsets."""
1679
1683
1680 def url():
1684 def url():
1681 """Returns a string representing the location of this repo."""
1685 """Returns a string representing the location of this repo."""
1682
1686
1683 def hook(name, throw=False, **args):
1687 def hook(name, throw=False, **args):
1684 """Call a hook."""
1688 """Call a hook."""
1685
1689
1686 def tags():
1690 def tags():
1687 """Return a mapping of tag to node."""
1691 """Return a mapping of tag to node."""
1688
1692
1689 def tagtype(tagname):
1693 def tagtype(tagname):
1690 """Return the type of a given tag."""
1694 """Return the type of a given tag."""
1691
1695
1692 def tagslist():
1696 def tagslist():
1693 """Return a list of tags ordered by revision."""
1697 """Return a list of tags ordered by revision."""
1694
1698
1695 def nodetags(node):
1699 def nodetags(node):
1696 """Return the tags associated with a node."""
1700 """Return the tags associated with a node."""
1697
1701
1698 def nodebookmarks(node):
1702 def nodebookmarks(node):
1699 """Return the list of bookmarks pointing to the specified node."""
1703 """Return the list of bookmarks pointing to the specified node."""
1700
1704
1701 def branchmap():
1705 def branchmap():
1702 """Return a mapping of branch to heads in that branch."""
1706 """Return a mapping of branch to heads in that branch."""
1703
1707
1704 def revbranchcache():
1708 def revbranchcache():
1705 pass
1709 pass
1706
1710
1707 def register_changeset(rev, changelogrevision):
1711 def register_changeset(rev, changelogrevision):
1708 """Extension point for caches for new nodes.
1712 """Extension point for caches for new nodes.
1709
1713
1710 Multiple consumers are expected to need parts of the changelogrevision,
1714 Multiple consumers are expected to need parts of the changelogrevision,
1711 so it is provided as optimization to avoid duplicate lookups. A simple
1715 so it is provided as optimization to avoid duplicate lookups. A simple
1712 cache would be fragile when other revisions are accessed, too."""
1716 cache would be fragile when other revisions are accessed, too."""
1713 pass
1717 pass
1714
1718
1715 def branchtip(branchtip, ignoremissing=False):
1719 def branchtip(branchtip, ignoremissing=False):
1716 """Return the tip node for a given branch."""
1720 """Return the tip node for a given branch."""
1717
1721
1718 def lookup(key):
1722 def lookup(key):
1719 """Resolve the node for a revision."""
1723 """Resolve the node for a revision."""
1720
1724
1721 def lookupbranch(key):
1725 def lookupbranch(key):
1722 """Look up the branch name of the given revision or branch name."""
1726 """Look up the branch name of the given revision or branch name."""
1723
1727
1724 def known(nodes):
1728 def known(nodes):
1725 """Determine whether a series of nodes is known.
1729 """Determine whether a series of nodes is known.
1726
1730
1727 Returns a list of bools.
1731 Returns a list of bools.
1728 """
1732 """
1729
1733
1730 def local():
1734 def local():
1731 """Whether the repository is local."""
1735 """Whether the repository is local."""
1732 return True
1736 return True
1733
1737
1734 def publishing():
1738 def publishing():
1735 """Whether the repository is a publishing repository."""
1739 """Whether the repository is a publishing repository."""
1736
1740
1737 def cancopy():
1741 def cancopy():
1738 pass
1742 pass
1739
1743
1740 def shared():
1744 def shared():
1741 """The type of shared repository or None."""
1745 """The type of shared repository or None."""
1742
1746
1743 def wjoin(f, *insidef):
1747 def wjoin(f, *insidef):
1744 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1748 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1745
1749
1746 def setparents(p1, p2):
1750 def setparents(p1, p2):
1747 """Set the parent nodes of the working directory."""
1751 """Set the parent nodes of the working directory."""
1748
1752
1749 def filectx(path, changeid=None, fileid=None):
1753 def filectx(path, changeid=None, fileid=None):
1750 """Obtain a filectx for the given file revision."""
1754 """Obtain a filectx for the given file revision."""
1751
1755
1752 def getcwd():
1756 def getcwd():
1753 """Obtain the current working directory from the dirstate."""
1757 """Obtain the current working directory from the dirstate."""
1754
1758
1755 def pathto(f, cwd=None):
1759 def pathto(f, cwd=None):
1756 """Obtain the relative path to a file."""
1760 """Obtain the relative path to a file."""
1757
1761
1758 def adddatafilter(name, fltr):
1762 def adddatafilter(name, fltr):
1759 pass
1763 pass
1760
1764
1761 def wread(filename):
1765 def wread(filename):
1762 """Read a file from wvfs, using data filters."""
1766 """Read a file from wvfs, using data filters."""
1763
1767
1764 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1768 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1765 """Write data to a file in the wvfs, using data filters."""
1769 """Write data to a file in the wvfs, using data filters."""
1766
1770
1767 def wwritedata(filename, data):
1771 def wwritedata(filename, data):
1768 """Resolve data for writing to the wvfs, using data filters."""
1772 """Resolve data for writing to the wvfs, using data filters."""
1769
1773
1770 def currenttransaction():
1774 def currenttransaction():
1771 """Obtain the current transaction instance or None."""
1775 """Obtain the current transaction instance or None."""
1772
1776
1773 def transaction(desc, report=None):
1777 def transaction(desc, report=None):
1774 """Open a new transaction to write to the repository."""
1778 """Open a new transaction to write to the repository."""
1775
1779
1776 def undofiles():
1780 def undofiles():
1777 """Returns a list of (vfs, path) for files to undo transactions."""
1781 """Returns a list of (vfs, path) for files to undo transactions."""
1778
1782
1779 def recover():
1783 def recover():
1780 """Roll back an interrupted transaction."""
1784 """Roll back an interrupted transaction."""
1781
1785
1782 def rollback(dryrun=False, force=False):
1786 def rollback(dryrun=False, force=False):
1783 """Undo the last transaction.
1787 """Undo the last transaction.
1784
1788
1785 DANGEROUS.
1789 DANGEROUS.
1786 """
1790 """
1787
1791
1788 def updatecaches(tr=None, full=False, caches=None):
1792 def updatecaches(tr=None, full=False, caches=None):
1789 """Warm repo caches."""
1793 """Warm repo caches."""
1790
1794
1791 def invalidatecaches():
1795 def invalidatecaches():
1792 """Invalidate cached data due to the repository mutating."""
1796 """Invalidate cached data due to the repository mutating."""
1793
1797
1794 def invalidatevolatilesets():
1798 def invalidatevolatilesets():
1795 pass
1799 pass
1796
1800
1797 def invalidatedirstate():
1801 def invalidatedirstate():
1798 """Invalidate the dirstate."""
1802 """Invalidate the dirstate."""
1799
1803
1800 def invalidate(clearfilecache=False):
1804 def invalidate(clearfilecache=False):
1801 pass
1805 pass
1802
1806
1803 def invalidateall():
1807 def invalidateall():
1804 pass
1808 pass
1805
1809
1806 def lock(wait=True):
1810 def lock(wait=True):
1807 """Lock the repository store and return a lock instance."""
1811 """Lock the repository store and return a lock instance."""
1808
1812
1809 def wlock(wait=True):
1813 def wlock(wait=True):
1810 """Lock the non-store parts of the repository."""
1814 """Lock the non-store parts of the repository."""
1811
1815
1812 def currentwlock():
1816 def currentwlock():
1813 """Return the wlock if it's held or None."""
1817 """Return the wlock if it's held or None."""
1814
1818
1815 def checkcommitpatterns(wctx, match, status, fail):
1819 def checkcommitpatterns(wctx, match, status, fail):
1816 pass
1820 pass
1817
1821
1818 def commit(
1822 def commit(
1819 text=b'',
1823 text=b'',
1820 user=None,
1824 user=None,
1821 date=None,
1825 date=None,
1822 match=None,
1826 match=None,
1823 force=False,
1827 force=False,
1824 editor=False,
1828 editor=False,
1825 extra=None,
1829 extra=None,
1826 ):
1830 ):
1827 """Add a new revision to the repository."""
1831 """Add a new revision to the repository."""
1828
1832
1829 def commitctx(ctx, error=False, origctx=None):
1833 def commitctx(ctx, error=False, origctx=None):
1830 """Commit a commitctx instance to the repository."""
1834 """Commit a commitctx instance to the repository."""
1831
1835
1832 def destroying():
1836 def destroying():
1833 """Inform the repository that nodes are about to be destroyed."""
1837 """Inform the repository that nodes are about to be destroyed."""
1834
1838
1835 def destroyed():
1839 def destroyed():
1836 """Inform the repository that nodes have been destroyed."""
1840 """Inform the repository that nodes have been destroyed."""
1837
1841
1838 def status(
1842 def status(
1839 node1=b'.',
1843 node1=b'.',
1840 node2=None,
1844 node2=None,
1841 match=None,
1845 match=None,
1842 ignored=False,
1846 ignored=False,
1843 clean=False,
1847 clean=False,
1844 unknown=False,
1848 unknown=False,
1845 listsubrepos=False,
1849 listsubrepos=False,
1846 ):
1850 ):
1847 """Convenience method to call repo[x].status()."""
1851 """Convenience method to call repo[x].status()."""
1848
1852
1849 def addpostdsstatus(ps):
1853 def addpostdsstatus(ps):
1850 pass
1854 pass
1851
1855
1852 def postdsstatus():
1856 def postdsstatus():
1853 pass
1857 pass
1854
1858
1855 def clearpostdsstatus():
1859 def clearpostdsstatus():
1856 pass
1860 pass
1857
1861
1858 def heads(start=None):
1862 def heads(start=None):
1859 """Obtain list of nodes that are DAG heads."""
1863 """Obtain list of nodes that are DAG heads."""
1860
1864
1861 def branchheads(branch=None, start=None, closed=False):
1865 def branchheads(branch=None, start=None, closed=False):
1862 pass
1866 pass
1863
1867
1864 def branches(nodes):
1868 def branches(nodes):
1865 pass
1869 pass
1866
1870
1867 def between(pairs):
1871 def between(pairs):
1868 pass
1872 pass
1869
1873
1870 def checkpush(pushop):
1874 def checkpush(pushop):
1871 pass
1875 pass
1872
1876
1873 prepushoutgoinghooks = interfaceutil.Attribute("""util.hooks instance.""")
1877 prepushoutgoinghooks = interfaceutil.Attribute("""util.hooks instance.""")
1874
1878
1875 def pushkey(namespace, key, old, new):
1879 def pushkey(namespace, key, old, new):
1876 pass
1880 pass
1877
1881
1878 def listkeys(namespace):
1882 def listkeys(namespace):
1879 pass
1883 pass
1880
1884
1881 def debugwireargs(one, two, three=None, four=None, five=None):
1885 def debugwireargs(one, two, three=None, four=None, five=None):
1882 pass
1886 pass
1883
1887
1884 def savecommitmessage(text):
1888 def savecommitmessage(text):
1885 pass
1889 pass
1886
1890
1887 def register_sidedata_computer(
1891 def register_sidedata_computer(
1888 kind, category, keys, computer, flags, replace=False
1892 kind, category, keys, computer, flags, replace=False
1889 ):
1893 ):
1890 pass
1894 pass
1891
1895
1892 def register_wanted_sidedata(category):
1896 def register_wanted_sidedata(category):
1893 pass
1897 pass
1894
1898
1895
1899
1896 class completelocalrepository(
1900 class completelocalrepository(
1897 ilocalrepositorymain, ilocalrepositoryfilestorage
1901 ilocalrepositorymain, ilocalrepositoryfilestorage
1898 ):
1902 ):
1899 """Complete interface for a local repository."""
1903 """Complete interface for a local repository."""
1900
1904
1901
1905
1902 class iwireprotocolcommandcacher(interfaceutil.Interface):
1906 class iwireprotocolcommandcacher(interfaceutil.Interface):
1903 """Represents a caching backend for wire protocol commands.
1907 """Represents a caching backend for wire protocol commands.
1904
1908
1905 Wire protocol version 2 supports transparent caching of many commands.
1909 Wire protocol version 2 supports transparent caching of many commands.
1906 To leverage this caching, servers can activate objects that cache
1910 To leverage this caching, servers can activate objects that cache
1907 command responses. Objects handle both cache writing and reading.
1911 command responses. Objects handle both cache writing and reading.
1908 This interface defines how that response caching mechanism works.
1912 This interface defines how that response caching mechanism works.
1909
1913
1910 Wire protocol version 2 commands emit a series of objects that are
1914 Wire protocol version 2 commands emit a series of objects that are
1911 serialized and sent to the client. The caching layer exists between
1915 serialized and sent to the client. The caching layer exists between
1912 the invocation of the command function and the sending of its output
1916 the invocation of the command function and the sending of its output
1913 objects to an output layer.
1917 objects to an output layer.
1914
1918
1915 Instances of this interface represent a binding to a cache that
1919 Instances of this interface represent a binding to a cache that
1916 can serve a response (in place of calling a command function) and/or
1920 can serve a response (in place of calling a command function) and/or
1917 write responses to a cache for subsequent use.
1921 write responses to a cache for subsequent use.
1918
1922
1919 When a command request arrives, the following happens with regards
1923 When a command request arrives, the following happens with regards
1920 to this interface:
1924 to this interface:
1921
1925
1922 1. The server determines whether the command request is cacheable.
1926 1. The server determines whether the command request is cacheable.
1923 2. If it is, an instance of this interface is spawned.
1927 2. If it is, an instance of this interface is spawned.
1924 3. The cacher is activated in a context manager (``__enter__`` is called).
1928 3. The cacher is activated in a context manager (``__enter__`` is called).
1925 4. A cache *key* for that request is derived. This will call the
1929 4. A cache *key* for that request is derived. This will call the
1926 instance's ``adjustcachekeystate()`` method so the derivation
1930 instance's ``adjustcachekeystate()`` method so the derivation
1927 can be influenced.
1931 can be influenced.
1928 5. The cacher is informed of the derived cache key via a call to
1932 5. The cacher is informed of the derived cache key via a call to
1929 ``setcachekey()``.
1933 ``setcachekey()``.
1930 6. The cacher's ``lookup()`` method is called to test for presence of
1934 6. The cacher's ``lookup()`` method is called to test for presence of
1931 the derived key in the cache.
1935 the derived key in the cache.
1932 7. If ``lookup()`` returns a hit, that cached result is used in place
1936 7. If ``lookup()`` returns a hit, that cached result is used in place
1933 of invoking the command function. ``__exit__`` is called and the instance
1937 of invoking the command function. ``__exit__`` is called and the instance
1934 is discarded.
1938 is discarded.
1935 8. The command function is invoked.
1939 8. The command function is invoked.
1936 9. ``onobject()`` is called for each object emitted by the command
1940 9. ``onobject()`` is called for each object emitted by the command
1937 function.
1941 function.
1938 10. After the final object is seen, ``onfinished()`` is called.
1942 10. After the final object is seen, ``onfinished()`` is called.
1939 11. ``__exit__`` is called to signal the end of use of the instance.
1943 11. ``__exit__`` is called to signal the end of use of the instance.
1940
1944
1941 Cache *key* derivation can be influenced by the instance.
1945 Cache *key* derivation can be influenced by the instance.
1942
1946
1943 Cache keys are initially derived by a deterministic representation of
1947 Cache keys are initially derived by a deterministic representation of
1944 the command request. This includes the command name, arguments, protocol
1948 the command request. This includes the command name, arguments, protocol
1945 version, etc. This initial key derivation is performed by CBOR-encoding a
1949 version, etc. This initial key derivation is performed by CBOR-encoding a
1946 data structure and feeding that output into a hasher.
1950 data structure and feeding that output into a hasher.
1947
1951
1948 Instances of this interface can influence this initial key derivation
1952 Instances of this interface can influence this initial key derivation
1949 via ``adjustcachekeystate()``.
1953 via ``adjustcachekeystate()``.
1950
1954
1951 The instance is informed of the derived cache key via a call to
1955 The instance is informed of the derived cache key via a call to
1952 ``setcachekey()``. The instance must store the key locally so it can
1956 ``setcachekey()``. The instance must store the key locally so it can
1953 be consulted on subsequent operations that may require it.
1957 be consulted on subsequent operations that may require it.
1954
1958
1955 When constructed, the instance has access to a callable that can be used
1959 When constructed, the instance has access to a callable that can be used
1956 for encoding response objects. This callable receives as its single
1960 for encoding response objects. This callable receives as its single
1957 argument an object emitted by a command function. It returns an iterable
1961 argument an object emitted by a command function. It returns an iterable
1958 of bytes chunks representing the encoded object. Unless the cacher is
1962 of bytes chunks representing the encoded object. Unless the cacher is
1959 caching native Python objects in memory or has a way of reconstructing
1963 caching native Python objects in memory or has a way of reconstructing
1960 the original Python objects, implementations typically call this function
1964 the original Python objects, implementations typically call this function
1961 to produce bytes from the output objects and then store those bytes in
1965 to produce bytes from the output objects and then store those bytes in
1962 the cache. When it comes time to re-emit those bytes, they are wrapped
1966 the cache. When it comes time to re-emit those bytes, they are wrapped
1963 in a ``wireprototypes.encodedresponse`` instance to tell the output
1967 in a ``wireprototypes.encodedresponse`` instance to tell the output
1964 layer that they are pre-encoded.
1968 layer that they are pre-encoded.
1965
1969
1966 When receiving the objects emitted by the command function, instances
1970 When receiving the objects emitted by the command function, instances
1967 can choose what to do with those objects. The simplest thing to do is
1971 can choose what to do with those objects. The simplest thing to do is
1968 re-emit the original objects. They will be forwarded to the output
1972 re-emit the original objects. They will be forwarded to the output
1969 layer and will be processed as if the cacher did not exist.
1973 layer and will be processed as if the cacher did not exist.
1970
1974
1971 Implementations could also choose to not emit objects - instead locally
1975 Implementations could also choose to not emit objects - instead locally
1972 buffering objects or their encoded representation. They could then emit
1976 buffering objects or their encoded representation. They could then emit
1973 a single "coalesced" object when ``onfinished()`` is called. In
1977 a single "coalesced" object when ``onfinished()`` is called. In
1974 this way, the implementation would function as a filtering layer of
1978 this way, the implementation would function as a filtering layer of
1975 sorts.
1979 sorts.
1976
1980
1977 When caching objects, typically the encoded form of the object will
1981 When caching objects, typically the encoded form of the object will
1978 be stored. Keep in mind that if the original object is forwarded to
1982 be stored. Keep in mind that if the original object is forwarded to
1979 the output layer, it will need to be encoded there as well. For large
1983 the output layer, it will need to be encoded there as well. For large
1980 output, this redundant encoding could add overhead. Implementations
1984 output, this redundant encoding could add overhead. Implementations
1981 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1985 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1982 instances to avoid this overhead.
1986 instances to avoid this overhead.
1983 """
1987 """
1984
1988
1985 def __enter__():
1989 def __enter__():
1986 """Marks the instance as active.
1990 """Marks the instance as active.
1987
1991
1988 Should return self.
1992 Should return self.
1989 """
1993 """
1990
1994
1991 def __exit__(exctype, excvalue, exctb):
1995 def __exit__(exctype, excvalue, exctb):
1992 """Called when cacher is no longer used.
1996 """Called when cacher is no longer used.
1993
1997
1994 This can be used by implementations to perform cleanup actions (e.g.
1998 This can be used by implementations to perform cleanup actions (e.g.
1995 disconnecting network sockets, aborting a partially cached response.
1999 disconnecting network sockets, aborting a partially cached response.
1996 """
2000 """
1997
2001
1998 def adjustcachekeystate(state):
2002 def adjustcachekeystate(state):
1999 """Influences cache key derivation by adjusting state to derive key.
2003 """Influences cache key derivation by adjusting state to derive key.
2000
2004
2001 A dict defining the state used to derive the cache key is passed.
2005 A dict defining the state used to derive the cache key is passed.
2002
2006
2003 Implementations can modify this dict to record additional state that
2007 Implementations can modify this dict to record additional state that
2004 is wanted to influence key derivation.
2008 is wanted to influence key derivation.
2005
2009
2006 Implementations are *highly* encouraged to not modify or delete
2010 Implementations are *highly* encouraged to not modify or delete
2007 existing keys.
2011 existing keys.
2008 """
2012 """
2009
2013
2010 def setcachekey(key):
2014 def setcachekey(key):
2011 """Record the derived cache key for this request.
2015 """Record the derived cache key for this request.
2012
2016
2013 Instances may mutate the key for internal usage, as desired. e.g.
2017 Instances may mutate the key for internal usage, as desired. e.g.
2014 instances may wish to prepend the repo name, introduce path
2018 instances may wish to prepend the repo name, introduce path
2015 components for filesystem or URL addressing, etc. Behavior is up to
2019 components for filesystem or URL addressing, etc. Behavior is up to
2016 the cache.
2020 the cache.
2017
2021
2018 Returns a bool indicating if the request is cacheable by this
2022 Returns a bool indicating if the request is cacheable by this
2019 instance.
2023 instance.
2020 """
2024 """
2021
2025
2022 def lookup():
2026 def lookup():
2023 """Attempt to resolve an entry in the cache.
2027 """Attempt to resolve an entry in the cache.
2024
2028
2025 The instance is instructed to look for the cache key that it was
2029 The instance is instructed to look for the cache key that it was
2026 informed about via the call to ``setcachekey()``.
2030 informed about via the call to ``setcachekey()``.
2027
2031
2028 If there's no cache hit or the cacher doesn't wish to use the cached
2032 If there's no cache hit or the cacher doesn't wish to use the cached
2029 entry, ``None`` should be returned.
2033 entry, ``None`` should be returned.
2030
2034
2031 Else, a dict defining the cached result should be returned. The
2035 Else, a dict defining the cached result should be returned. The
2032 dict may have the following keys:
2036 dict may have the following keys:
2033
2037
2034 objs
2038 objs
2035 An iterable of objects that should be sent to the client. That
2039 An iterable of objects that should be sent to the client. That
2036 iterable of objects is expected to be what the command function
2040 iterable of objects is expected to be what the command function
2037 would return if invoked or an equivalent representation thereof.
2041 would return if invoked or an equivalent representation thereof.
2038 """
2042 """
2039
2043
2040 def onobject(obj):
2044 def onobject(obj):
2041 """Called when a new object is emitted from the command function.
2045 """Called when a new object is emitted from the command function.
2042
2046
2043 Receives as its argument the object that was emitted from the
2047 Receives as its argument the object that was emitted from the
2044 command function.
2048 command function.
2045
2049
2046 This method returns an iterator of objects to forward to the output
2050 This method returns an iterator of objects to forward to the output
2047 layer. The easiest implementation is a generator that just
2051 layer. The easiest implementation is a generator that just
2048 ``yield obj``.
2052 ``yield obj``.
2049 """
2053 """
2050
2054
2051 def onfinished():
2055 def onfinished():
2052 """Called after all objects have been emitted from the command function.
2056 """Called after all objects have been emitted from the command function.
2053
2057
2054 Implementations should return an iterator of objects to forward to
2058 Implementations should return an iterator of objects to forward to
2055 the output layer.
2059 the output layer.
2056
2060
2057 This method can be a generator.
2061 This method can be a generator.
2058 """
2062 """
@@ -1,3995 +1,4000 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 # coding: utf-8
2 # coding: utf-8
3 #
3 #
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import functools
10 import functools
11 import os
11 import os
12 import random
12 import random
13 import re
13 import re
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from concurrent import futures
18 from concurrent import futures
19 from typing import (
19 from typing import (
20 Optional,
20 Optional,
21 )
21 )
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 bin,
25 bin,
26 hex,
26 hex,
27 nullrev,
27 nullrev,
28 sha1nodeconstants,
28 sha1nodeconstants,
29 short,
29 short,
30 )
30 )
31 from .pycompat import (
31 from .pycompat import (
32 delattr,
32 delattr,
33 getattr,
33 getattr,
34 )
34 )
35 from . import (
35 from . import (
36 bookmarks,
36 bookmarks,
37 branchmap,
37 branchmap,
38 bundle2,
38 bundle2,
39 bundlecaches,
39 bundlecaches,
40 changegroup,
40 changegroup,
41 color,
41 color,
42 commit,
42 commit,
43 context,
43 context,
44 dirstate,
44 dirstate,
45 discovery,
45 discovery,
46 encoding,
46 encoding,
47 error,
47 error,
48 exchange,
48 exchange,
49 extensions,
49 extensions,
50 filelog,
50 filelog,
51 hook,
51 hook,
52 lock as lockmod,
52 lock as lockmod,
53 match as matchmod,
53 match as matchmod,
54 mergestate as mergestatemod,
54 mergestate as mergestatemod,
55 mergeutil,
55 mergeutil,
56 namespaces,
56 namespaces,
57 narrowspec,
57 narrowspec,
58 obsolete,
58 obsolete,
59 pathutil,
59 pathutil,
60 phases,
60 phases,
61 pushkey,
61 pushkey,
62 pycompat,
62 pycompat,
63 rcutil,
63 rcutil,
64 repoview,
64 repoview,
65 requirements as requirementsmod,
65 requirements as requirementsmod,
66 revlog,
66 revlog,
67 revset,
67 revset,
68 revsetlang,
68 revsetlang,
69 scmutil,
69 scmutil,
70 sparse,
70 sparse,
71 store as storemod,
71 store as storemod,
72 subrepoutil,
72 subrepoutil,
73 tags as tagsmod,
73 tags as tagsmod,
74 transaction,
74 transaction,
75 txnutil,
75 txnutil,
76 util,
76 util,
77 vfs as vfsmod,
77 vfs as vfsmod,
78 wireprototypes,
78 wireprototypes,
79 )
79 )
80
80
81 from .interfaces import (
81 from .interfaces import (
82 repository,
82 repository,
83 util as interfaceutil,
83 util as interfaceutil,
84 )
84 )
85
85
86 from .utils import (
86 from .utils import (
87 hashutil,
87 hashutil,
88 procutil,
88 procutil,
89 stringutil,
89 stringutil,
90 urlutil,
90 urlutil,
91 )
91 )
92
92
93 from .revlogutils import (
93 from .revlogutils import (
94 concurrency_checker as revlogchecker,
94 concurrency_checker as revlogchecker,
95 constants as revlogconst,
95 constants as revlogconst,
96 sidedata as sidedatamod,
96 sidedata as sidedatamod,
97 )
97 )
98
98
99 release = lockmod.release
99 release = lockmod.release
100 urlerr = util.urlerr
100 urlerr = util.urlerr
101 urlreq = util.urlreq
101 urlreq = util.urlreq
102
102
103 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(
103 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(
104 b"^((dirstate|narrowspec.dirstate).*|branch$)"
104 b"^((dirstate|narrowspec.dirstate).*|branch$)"
105 )
105 )
106
106
107 # set of (path, vfs-location) tuples. vfs-location is:
107 # set of (path, vfs-location) tuples. vfs-location is:
108 # - 'plain for vfs relative paths
108 # - 'plain for vfs relative paths
109 # - '' for svfs relative paths
109 # - '' for svfs relative paths
110 _cachedfiles = set()
110 _cachedfiles = set()
111
111
112
112
113 class _basefilecache(scmutil.filecache):
113 class _basefilecache(scmutil.filecache):
114 """All filecache usage on repo are done for logic that should be unfiltered"""
114 """All filecache usage on repo are done for logic that should be unfiltered"""
115
115
116 def __get__(self, repo, type=None):
116 def __get__(self, repo, type=None):
117 if repo is None:
117 if repo is None:
118 return self
118 return self
119 # proxy to unfiltered __dict__ since filtered repo has no entry
119 # proxy to unfiltered __dict__ since filtered repo has no entry
120 unfi = repo.unfiltered()
120 unfi = repo.unfiltered()
121 try:
121 try:
122 return unfi.__dict__[self.sname]
122 return unfi.__dict__[self.sname]
123 except KeyError:
123 except KeyError:
124 pass
124 pass
125 return super(_basefilecache, self).__get__(unfi, type)
125 return super(_basefilecache, self).__get__(unfi, type)
126
126
127 def set(self, repo, value):
127 def set(self, repo, value):
128 return super(_basefilecache, self).set(repo.unfiltered(), value)
128 return super(_basefilecache, self).set(repo.unfiltered(), value)
129
129
130
130
131 class repofilecache(_basefilecache):
131 class repofilecache(_basefilecache):
132 """filecache for files in .hg but outside of .hg/store"""
132 """filecache for files in .hg but outside of .hg/store"""
133
133
134 def __init__(self, *paths):
134 def __init__(self, *paths):
135 super(repofilecache, self).__init__(*paths)
135 super(repofilecache, self).__init__(*paths)
136 for path in paths:
136 for path in paths:
137 _cachedfiles.add((path, b'plain'))
137 _cachedfiles.add((path, b'plain'))
138
138
139 def join(self, obj, fname):
139 def join(self, obj, fname):
140 return obj.vfs.join(fname)
140 return obj.vfs.join(fname)
141
141
142
142
143 class storecache(_basefilecache):
143 class storecache(_basefilecache):
144 """filecache for files in the store"""
144 """filecache for files in the store"""
145
145
146 def __init__(self, *paths):
146 def __init__(self, *paths):
147 super(storecache, self).__init__(*paths)
147 super(storecache, self).__init__(*paths)
148 for path in paths:
148 for path in paths:
149 _cachedfiles.add((path, b''))
149 _cachedfiles.add((path, b''))
150
150
151 def join(self, obj, fname):
151 def join(self, obj, fname):
152 return obj.sjoin(fname)
152 return obj.sjoin(fname)
153
153
154
154
155 class changelogcache(storecache):
155 class changelogcache(storecache):
156 """filecache for the changelog"""
156 """filecache for the changelog"""
157
157
158 def __init__(self):
158 def __init__(self):
159 super(changelogcache, self).__init__()
159 super(changelogcache, self).__init__()
160 _cachedfiles.add((b'00changelog.i', b''))
160 _cachedfiles.add((b'00changelog.i', b''))
161 _cachedfiles.add((b'00changelog.n', b''))
161 _cachedfiles.add((b'00changelog.n', b''))
162
162
163 def tracked_paths(self, obj):
163 def tracked_paths(self, obj):
164 paths = [self.join(obj, b'00changelog.i')]
164 paths = [self.join(obj, b'00changelog.i')]
165 if obj.store.opener.options.get(b'persistent-nodemap', False):
165 if obj.store.opener.options.get(b'persistent-nodemap', False):
166 paths.append(self.join(obj, b'00changelog.n'))
166 paths.append(self.join(obj, b'00changelog.n'))
167 return paths
167 return paths
168
168
169
169
170 class manifestlogcache(storecache):
170 class manifestlogcache(storecache):
171 """filecache for the manifestlog"""
171 """filecache for the manifestlog"""
172
172
173 def __init__(self):
173 def __init__(self):
174 super(manifestlogcache, self).__init__()
174 super(manifestlogcache, self).__init__()
175 _cachedfiles.add((b'00manifest.i', b''))
175 _cachedfiles.add((b'00manifest.i', b''))
176 _cachedfiles.add((b'00manifest.n', b''))
176 _cachedfiles.add((b'00manifest.n', b''))
177
177
178 def tracked_paths(self, obj):
178 def tracked_paths(self, obj):
179 paths = [self.join(obj, b'00manifest.i')]
179 paths = [self.join(obj, b'00manifest.i')]
180 if obj.store.opener.options.get(b'persistent-nodemap', False):
180 if obj.store.opener.options.get(b'persistent-nodemap', False):
181 paths.append(self.join(obj, b'00manifest.n'))
181 paths.append(self.join(obj, b'00manifest.n'))
182 return paths
182 return paths
183
183
184
184
185 class mixedrepostorecache(_basefilecache):
185 class mixedrepostorecache(_basefilecache):
186 """filecache for a mix files in .hg/store and outside"""
186 """filecache for a mix files in .hg/store and outside"""
187
187
188 def __init__(self, *pathsandlocations):
188 def __init__(self, *pathsandlocations):
189 # scmutil.filecache only uses the path for passing back into our
189 # scmutil.filecache only uses the path for passing back into our
190 # join(), so we can safely pass a list of paths and locations
190 # join(), so we can safely pass a list of paths and locations
191 super(mixedrepostorecache, self).__init__(*pathsandlocations)
191 super(mixedrepostorecache, self).__init__(*pathsandlocations)
192 _cachedfiles.update(pathsandlocations)
192 _cachedfiles.update(pathsandlocations)
193
193
194 def join(self, obj, fnameandlocation):
194 def join(self, obj, fnameandlocation):
195 fname, location = fnameandlocation
195 fname, location = fnameandlocation
196 if location == b'plain':
196 if location == b'plain':
197 return obj.vfs.join(fname)
197 return obj.vfs.join(fname)
198 else:
198 else:
199 if location != b'':
199 if location != b'':
200 raise error.ProgrammingError(
200 raise error.ProgrammingError(
201 b'unexpected location: %s' % location
201 b'unexpected location: %s' % location
202 )
202 )
203 return obj.sjoin(fname)
203 return obj.sjoin(fname)
204
204
205
205
206 def isfilecached(repo, name):
206 def isfilecached(repo, name):
207 """check if a repo has already cached "name" filecache-ed property
207 """check if a repo has already cached "name" filecache-ed property
208
208
209 This returns (cachedobj-or-None, iscached) tuple.
209 This returns (cachedobj-or-None, iscached) tuple.
210 """
210 """
211 cacheentry = repo.unfiltered()._filecache.get(name, None)
211 cacheentry = repo.unfiltered()._filecache.get(name, None)
212 if not cacheentry:
212 if not cacheentry:
213 return None, False
213 return None, False
214 return cacheentry.obj, True
214 return cacheentry.obj, True
215
215
216
216
217 class unfilteredpropertycache(util.propertycache):
217 class unfilteredpropertycache(util.propertycache):
218 """propertycache that apply to unfiltered repo only"""
218 """propertycache that apply to unfiltered repo only"""
219
219
220 def __get__(self, repo, type=None):
220 def __get__(self, repo, type=None):
221 unfi = repo.unfiltered()
221 unfi = repo.unfiltered()
222 if unfi is repo:
222 if unfi is repo:
223 return super(unfilteredpropertycache, self).__get__(unfi)
223 return super(unfilteredpropertycache, self).__get__(unfi)
224 return getattr(unfi, self.name)
224 return getattr(unfi, self.name)
225
225
226
226
227 class filteredpropertycache(util.propertycache):
227 class filteredpropertycache(util.propertycache):
228 """propertycache that must take filtering in account"""
228 """propertycache that must take filtering in account"""
229
229
230 def cachevalue(self, obj, value):
230 def cachevalue(self, obj, value):
231 object.__setattr__(obj, self.name, value)
231 object.__setattr__(obj, self.name, value)
232
232
233
233
234 def hasunfilteredcache(repo, name):
234 def hasunfilteredcache(repo, name):
235 """check if a repo has an unfilteredpropertycache value for <name>"""
235 """check if a repo has an unfilteredpropertycache value for <name>"""
236 return name in vars(repo.unfiltered())
236 return name in vars(repo.unfiltered())
237
237
238
238
239 def unfilteredmethod(orig):
239 def unfilteredmethod(orig):
240 """decorate method that always need to be run on unfiltered version"""
240 """decorate method that always need to be run on unfiltered version"""
241
241
242 @functools.wraps(orig)
242 @functools.wraps(orig)
243 def wrapper(repo, *args, **kwargs):
243 def wrapper(repo, *args, **kwargs):
244 return orig(repo.unfiltered(), *args, **kwargs)
244 return orig(repo.unfiltered(), *args, **kwargs)
245
245
246 return wrapper
246 return wrapper
247
247
248
248
249 moderncaps = {
249 moderncaps = {
250 b'lookup',
250 b'lookup',
251 b'branchmap',
251 b'branchmap',
252 b'pushkey',
252 b'pushkey',
253 b'known',
253 b'known',
254 b'getbundle',
254 b'getbundle',
255 b'unbundle',
255 b'unbundle',
256 }
256 }
257 legacycaps = moderncaps.union({b'changegroupsubset'})
257 legacycaps = moderncaps.union({b'changegroupsubset'})
258
258
259
259
260 @interfaceutil.implementer(repository.ipeercommandexecutor)
260 @interfaceutil.implementer(repository.ipeercommandexecutor)
261 class localcommandexecutor:
261 class localcommandexecutor:
262 def __init__(self, peer):
262 def __init__(self, peer):
263 self._peer = peer
263 self._peer = peer
264 self._sent = False
264 self._sent = False
265 self._closed = False
265 self._closed = False
266
266
267 def __enter__(self):
267 def __enter__(self):
268 return self
268 return self
269
269
270 def __exit__(self, exctype, excvalue, exctb):
270 def __exit__(self, exctype, excvalue, exctb):
271 self.close()
271 self.close()
272
272
273 def callcommand(self, command, args):
273 def callcommand(self, command, args):
274 if self._sent:
274 if self._sent:
275 raise error.ProgrammingError(
275 raise error.ProgrammingError(
276 b'callcommand() cannot be used after sendcommands()'
276 b'callcommand() cannot be used after sendcommands()'
277 )
277 )
278
278
279 if self._closed:
279 if self._closed:
280 raise error.ProgrammingError(
280 raise error.ProgrammingError(
281 b'callcommand() cannot be used after close()'
281 b'callcommand() cannot be used after close()'
282 )
282 )
283
283
284 # We don't need to support anything fancy. Just call the named
284 # We don't need to support anything fancy. Just call the named
285 # method on the peer and return a resolved future.
285 # method on the peer and return a resolved future.
286 fn = getattr(self._peer, pycompat.sysstr(command))
286 fn = getattr(self._peer, pycompat.sysstr(command))
287
287
288 f = futures.Future()
288 f = futures.Future()
289
289
290 try:
290 try:
291 result = fn(**pycompat.strkwargs(args))
291 result = fn(**pycompat.strkwargs(args))
292 except Exception:
292 except Exception:
293 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
293 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
294 else:
294 else:
295 f.set_result(result)
295 f.set_result(result)
296
296
297 return f
297 return f
298
298
299 def sendcommands(self):
299 def sendcommands(self):
300 self._sent = True
300 self._sent = True
301
301
302 def close(self):
302 def close(self):
303 self._closed = True
303 self._closed = True
304
304
305
305
306 @interfaceutil.implementer(repository.ipeercommands)
306 @interfaceutil.implementer(repository.ipeercommands)
307 class localpeer(repository.peer):
307 class localpeer(repository.peer):
308 '''peer for a local repo; reflects only the most recent API'''
308 '''peer for a local repo; reflects only the most recent API'''
309
309
310 def __init__(self, repo, caps=None, path=None):
310 def __init__(self, repo, caps=None, path=None):
311 super(localpeer, self).__init__(repo.ui, path=path)
311 super(localpeer, self).__init__(repo.ui, path=path)
312
312
313 if caps is None:
313 if caps is None:
314 caps = moderncaps.copy()
314 caps = moderncaps.copy()
315 self._repo = repo.filtered(b'served')
315 self._repo = repo.filtered(b'served')
316
316
317 if repo._wanted_sidedata:
317 if repo._wanted_sidedata:
318 formatted = bundle2.format_remote_wanted_sidedata(repo)
318 formatted = bundle2.format_remote_wanted_sidedata(repo)
319 caps.add(b'exp-wanted-sidedata=' + formatted)
319 caps.add(b'exp-wanted-sidedata=' + formatted)
320
320
321 self._caps = repo._restrictcapabilities(caps)
321 self._caps = repo._restrictcapabilities(caps)
322
322
323 # Begin of _basepeer interface.
323 # Begin of _basepeer interface.
324
324
325 def url(self):
325 def url(self):
326 return self._repo.url()
326 return self._repo.url()
327
327
328 def local(self):
328 def local(self):
329 return self._repo
329 return self._repo
330
330
331 def canpush(self):
331 def canpush(self):
332 return True
332 return True
333
333
334 def close(self):
334 def close(self):
335 self._repo.close()
335 self._repo.close()
336
336
337 # End of _basepeer interface.
337 # End of _basepeer interface.
338
338
339 # Begin of _basewirecommands interface.
339 # Begin of _basewirecommands interface.
340
340
341 def branchmap(self):
341 def branchmap(self):
342 return self._repo.branchmap()
342 return self._repo.branchmap()
343
343
344 def capabilities(self):
344 def capabilities(self):
345 return self._caps
345 return self._caps
346
346
347 def clonebundles(self):
347 def clonebundles(self):
348 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
348 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
349
349
350 def debugwireargs(self, one, two, three=None, four=None, five=None):
350 def debugwireargs(self, one, two, three=None, four=None, five=None):
351 """Used to test argument passing over the wire"""
351 """Used to test argument passing over the wire"""
352 return b"%s %s %s %s %s" % (
352 return b"%s %s %s %s %s" % (
353 one,
353 one,
354 two,
354 two,
355 pycompat.bytestr(three),
355 pycompat.bytestr(three),
356 pycompat.bytestr(four),
356 pycompat.bytestr(four),
357 pycompat.bytestr(five),
357 pycompat.bytestr(five),
358 )
358 )
359
359
360 def getbundle(
360 def getbundle(
361 self,
361 self,
362 source,
362 source,
363 heads=None,
363 heads=None,
364 common=None,
364 common=None,
365 bundlecaps=None,
365 bundlecaps=None,
366 remote_sidedata=None,
366 remote_sidedata=None,
367 **kwargs
367 **kwargs
368 ):
368 ):
369 chunks = exchange.getbundlechunks(
369 chunks = exchange.getbundlechunks(
370 self._repo,
370 self._repo,
371 source,
371 source,
372 heads=heads,
372 heads=heads,
373 common=common,
373 common=common,
374 bundlecaps=bundlecaps,
374 bundlecaps=bundlecaps,
375 remote_sidedata=remote_sidedata,
375 remote_sidedata=remote_sidedata,
376 **kwargs
376 **kwargs
377 )[1]
377 )[1]
378 cb = util.chunkbuffer(chunks)
378 cb = util.chunkbuffer(chunks)
379
379
380 if exchange.bundle2requested(bundlecaps):
380 if exchange.bundle2requested(bundlecaps):
381 # When requesting a bundle2, getbundle returns a stream to make the
381 # When requesting a bundle2, getbundle returns a stream to make the
382 # wire level function happier. We need to build a proper object
382 # wire level function happier. We need to build a proper object
383 # from it in local peer.
383 # from it in local peer.
384 return bundle2.getunbundler(self.ui, cb)
384 return bundle2.getunbundler(self.ui, cb)
385 else:
385 else:
386 return changegroup.getunbundler(b'01', cb, None)
386 return changegroup.getunbundler(b'01', cb, None)
387
387
388 def heads(self):
388 def heads(self):
389 return self._repo.heads()
389 return self._repo.heads()
390
390
391 def known(self, nodes):
391 def known(self, nodes):
392 return self._repo.known(nodes)
392 return self._repo.known(nodes)
393
393
394 def listkeys(self, namespace):
394 def listkeys(self, namespace):
395 return self._repo.listkeys(namespace)
395 return self._repo.listkeys(namespace)
396
396
397 def lookup(self, key):
397 def lookup(self, key):
398 return self._repo.lookup(key)
398 return self._repo.lookup(key)
399
399
400 def pushkey(self, namespace, key, old, new):
400 def pushkey(self, namespace, key, old, new):
401 return self._repo.pushkey(namespace, key, old, new)
401 return self._repo.pushkey(namespace, key, old, new)
402
402
403 def stream_out(self):
403 def stream_out(self):
404 raise error.Abort(_(b'cannot perform stream clone against local peer'))
404 raise error.Abort(_(b'cannot perform stream clone against local peer'))
405
405
406 def unbundle(self, bundle, heads, url):
406 def unbundle(self, bundle, heads, url):
407 """apply a bundle on a repo
407 """apply a bundle on a repo
408
408
409 This function handles the repo locking itself."""
409 This function handles the repo locking itself."""
410 try:
410 try:
411 try:
411 try:
412 bundle = exchange.readbundle(self.ui, bundle, None)
412 bundle = exchange.readbundle(self.ui, bundle, None)
413 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
413 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
414 if util.safehasattr(ret, b'getchunks'):
414 if util.safehasattr(ret, b'getchunks'):
415 # This is a bundle20 object, turn it into an unbundler.
415 # This is a bundle20 object, turn it into an unbundler.
416 # This little dance should be dropped eventually when the
416 # This little dance should be dropped eventually when the
417 # API is finally improved.
417 # API is finally improved.
418 stream = util.chunkbuffer(ret.getchunks())
418 stream = util.chunkbuffer(ret.getchunks())
419 ret = bundle2.getunbundler(self.ui, stream)
419 ret = bundle2.getunbundler(self.ui, stream)
420 return ret
420 return ret
421 except Exception as exc:
421 except Exception as exc:
422 # If the exception contains output salvaged from a bundle2
422 # If the exception contains output salvaged from a bundle2
423 # reply, we need to make sure it is printed before continuing
423 # reply, we need to make sure it is printed before continuing
424 # to fail. So we build a bundle2 with such output and consume
424 # to fail. So we build a bundle2 with such output and consume
425 # it directly.
425 # it directly.
426 #
426 #
427 # This is not very elegant but allows a "simple" solution for
427 # This is not very elegant but allows a "simple" solution for
428 # issue4594
428 # issue4594
429 output = getattr(exc, '_bundle2salvagedoutput', ())
429 output = getattr(exc, '_bundle2salvagedoutput', ())
430 if output:
430 if output:
431 bundler = bundle2.bundle20(self._repo.ui)
431 bundler = bundle2.bundle20(self._repo.ui)
432 for out in output:
432 for out in output:
433 bundler.addpart(out)
433 bundler.addpart(out)
434 stream = util.chunkbuffer(bundler.getchunks())
434 stream = util.chunkbuffer(bundler.getchunks())
435 b = bundle2.getunbundler(self.ui, stream)
435 b = bundle2.getunbundler(self.ui, stream)
436 bundle2.processbundle(self._repo, b)
436 bundle2.processbundle(self._repo, b)
437 raise
437 raise
438 except error.PushRaced as exc:
438 except error.PushRaced as exc:
439 raise error.ResponseError(
439 raise error.ResponseError(
440 _(b'push failed:'), stringutil.forcebytestr(exc)
440 _(b'push failed:'), stringutil.forcebytestr(exc)
441 )
441 )
442
442
443 # End of _basewirecommands interface.
443 # End of _basewirecommands interface.
444
444
445 # Begin of peer interface.
445 # Begin of peer interface.
446
446
447 def commandexecutor(self):
447 def commandexecutor(self):
448 return localcommandexecutor(self)
448 return localcommandexecutor(self)
449
449
450 # End of peer interface.
450 # End of peer interface.
451
451
452
452
453 @interfaceutil.implementer(repository.ipeerlegacycommands)
453 @interfaceutil.implementer(repository.ipeerlegacycommands)
454 class locallegacypeer(localpeer):
454 class locallegacypeer(localpeer):
455 """peer extension which implements legacy methods too; used for tests with
455 """peer extension which implements legacy methods too; used for tests with
456 restricted capabilities"""
456 restricted capabilities"""
457
457
458 def __init__(self, repo, path=None):
458 def __init__(self, repo, path=None):
459 super(locallegacypeer, self).__init__(repo, caps=legacycaps, path=path)
459 super(locallegacypeer, self).__init__(repo, caps=legacycaps, path=path)
460
460
461 # Begin of baselegacywirecommands interface.
461 # Begin of baselegacywirecommands interface.
462
462
463 def between(self, pairs):
463 def between(self, pairs):
464 return self._repo.between(pairs)
464 return self._repo.between(pairs)
465
465
466 def branches(self, nodes):
466 def branches(self, nodes):
467 return self._repo.branches(nodes)
467 return self._repo.branches(nodes)
468
468
469 def changegroup(self, nodes, source):
469 def changegroup(self, nodes, source):
470 outgoing = discovery.outgoing(
470 outgoing = discovery.outgoing(
471 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
471 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
472 )
472 )
473 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
473 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
474
474
475 def changegroupsubset(self, bases, heads, source):
475 def changegroupsubset(self, bases, heads, source):
476 outgoing = discovery.outgoing(
476 outgoing = discovery.outgoing(
477 self._repo, missingroots=bases, ancestorsof=heads
477 self._repo, missingroots=bases, ancestorsof=heads
478 )
478 )
479 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
479 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
480
480
481 # End of baselegacywirecommands interface.
481 # End of baselegacywirecommands interface.
482
482
483
483
484 # Functions receiving (ui, features) that extensions can register to impact
484 # Functions receiving (ui, features) that extensions can register to impact
485 # the ability to load repositories with custom requirements. Only
485 # the ability to load repositories with custom requirements. Only
486 # functions defined in loaded extensions are called.
486 # functions defined in loaded extensions are called.
487 #
487 #
488 # The function receives a set of requirement strings that the repository
488 # The function receives a set of requirement strings that the repository
489 # is capable of opening. Functions will typically add elements to the
489 # is capable of opening. Functions will typically add elements to the
490 # set to reflect that the extension knows how to handle that requirements.
490 # set to reflect that the extension knows how to handle that requirements.
491 featuresetupfuncs = set()
491 featuresetupfuncs = set()
492
492
493
493
494 def _getsharedvfs(hgvfs, requirements):
494 def _getsharedvfs(hgvfs, requirements):
495 """returns the vfs object pointing to root of shared source
495 """returns the vfs object pointing to root of shared source
496 repo for a shared repository
496 repo for a shared repository
497
497
498 hgvfs is vfs pointing at .hg/ of current repo (shared one)
498 hgvfs is vfs pointing at .hg/ of current repo (shared one)
499 requirements is a set of requirements of current repo (shared one)
499 requirements is a set of requirements of current repo (shared one)
500 """
500 """
501 # The ``shared`` or ``relshared`` requirements indicate the
501 # The ``shared`` or ``relshared`` requirements indicate the
502 # store lives in the path contained in the ``.hg/sharedpath`` file.
502 # store lives in the path contained in the ``.hg/sharedpath`` file.
503 # This is an absolute path for ``shared`` and relative to
503 # This is an absolute path for ``shared`` and relative to
504 # ``.hg/`` for ``relshared``.
504 # ``.hg/`` for ``relshared``.
505 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
505 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
506 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
506 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
507 sharedpath = util.normpath(hgvfs.join(sharedpath))
507 sharedpath = util.normpath(hgvfs.join(sharedpath))
508
508
509 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
509 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
510
510
511 if not sharedvfs.exists():
511 if not sharedvfs.exists():
512 raise error.RepoError(
512 raise error.RepoError(
513 _(b'.hg/sharedpath points to nonexistent directory %s')
513 _(b'.hg/sharedpath points to nonexistent directory %s')
514 % sharedvfs.base
514 % sharedvfs.base
515 )
515 )
516 return sharedvfs
516 return sharedvfs
517
517
518
518
519 def _readrequires(vfs, allowmissing):
519 def _readrequires(vfs, allowmissing):
520 """reads the require file present at root of this vfs
520 """reads the require file present at root of this vfs
521 and return a set of requirements
521 and return a set of requirements
522
522
523 If allowmissing is True, we suppress FileNotFoundError if raised"""
523 If allowmissing is True, we suppress FileNotFoundError if raised"""
524 # requires file contains a newline-delimited list of
524 # requires file contains a newline-delimited list of
525 # features/capabilities the opener (us) must have in order to use
525 # features/capabilities the opener (us) must have in order to use
526 # the repository. This file was introduced in Mercurial 0.9.2,
526 # the repository. This file was introduced in Mercurial 0.9.2,
527 # which means very old repositories may not have one. We assume
527 # which means very old repositories may not have one. We assume
528 # a missing file translates to no requirements.
528 # a missing file translates to no requirements.
529 read = vfs.tryread if allowmissing else vfs.read
529 read = vfs.tryread if allowmissing else vfs.read
530 return set(read(b'requires').splitlines())
530 return set(read(b'requires').splitlines())
531
531
532
532
533 def makelocalrepository(baseui, path: bytes, intents=None):
533 def makelocalrepository(baseui, path: bytes, intents=None):
534 """Create a local repository object.
534 """Create a local repository object.
535
535
536 Given arguments needed to construct a local repository, this function
536 Given arguments needed to construct a local repository, this function
537 performs various early repository loading functionality (such as
537 performs various early repository loading functionality (such as
538 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
538 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
539 the repository can be opened, derives a type suitable for representing
539 the repository can be opened, derives a type suitable for representing
540 that repository, and returns an instance of it.
540 that repository, and returns an instance of it.
541
541
542 The returned object conforms to the ``repository.completelocalrepository``
542 The returned object conforms to the ``repository.completelocalrepository``
543 interface.
543 interface.
544
544
545 The repository type is derived by calling a series of factory functions
545 The repository type is derived by calling a series of factory functions
546 for each aspect/interface of the final repository. These are defined by
546 for each aspect/interface of the final repository. These are defined by
547 ``REPO_INTERFACES``.
547 ``REPO_INTERFACES``.
548
548
549 Each factory function is called to produce a type implementing a specific
549 Each factory function is called to produce a type implementing a specific
550 interface. The cumulative list of returned types will be combined into a
550 interface. The cumulative list of returned types will be combined into a
551 new type and that type will be instantiated to represent the local
551 new type and that type will be instantiated to represent the local
552 repository.
552 repository.
553
553
554 The factory functions each receive various state that may be consulted
554 The factory functions each receive various state that may be consulted
555 as part of deriving a type.
555 as part of deriving a type.
556
556
557 Extensions should wrap these factory functions to customize repository type
557 Extensions should wrap these factory functions to customize repository type
558 creation. Note that an extension's wrapped function may be called even if
558 creation. Note that an extension's wrapped function may be called even if
559 that extension is not loaded for the repo being constructed. Extensions
559 that extension is not loaded for the repo being constructed. Extensions
560 should check if their ``__name__`` appears in the
560 should check if their ``__name__`` appears in the
561 ``extensionmodulenames`` set passed to the factory function and no-op if
561 ``extensionmodulenames`` set passed to the factory function and no-op if
562 not.
562 not.
563 """
563 """
564 ui = baseui.copy()
564 ui = baseui.copy()
565 # Prevent copying repo configuration.
565 # Prevent copying repo configuration.
566 ui.copy = baseui.copy
566 ui.copy = baseui.copy
567
567
568 # Working directory VFS rooted at repository root.
568 # Working directory VFS rooted at repository root.
569 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
569 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
570
570
571 # Main VFS for .hg/ directory.
571 # Main VFS for .hg/ directory.
572 hgpath = wdirvfs.join(b'.hg')
572 hgpath = wdirvfs.join(b'.hg')
573 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
573 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
574 # Whether this repository is shared one or not
574 # Whether this repository is shared one or not
575 shared = False
575 shared = False
576 # If this repository is shared, vfs pointing to shared repo
576 # If this repository is shared, vfs pointing to shared repo
577 sharedvfs = None
577 sharedvfs = None
578
578
579 # The .hg/ path should exist and should be a directory. All other
579 # The .hg/ path should exist and should be a directory. All other
580 # cases are errors.
580 # cases are errors.
581 if not hgvfs.isdir():
581 if not hgvfs.isdir():
582 try:
582 try:
583 hgvfs.stat()
583 hgvfs.stat()
584 except FileNotFoundError:
584 except FileNotFoundError:
585 pass
585 pass
586 except ValueError as e:
586 except ValueError as e:
587 # Can be raised on Python 3.8 when path is invalid.
587 # Can be raised on Python 3.8 when path is invalid.
588 raise error.Abort(
588 raise error.Abort(
589 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
589 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
590 )
590 )
591
591
592 raise error.RepoError(_(b'repository %s not found') % path)
592 raise error.RepoError(_(b'repository %s not found') % path)
593
593
594 requirements = _readrequires(hgvfs, True)
594 requirements = _readrequires(hgvfs, True)
595 shared = (
595 shared = (
596 requirementsmod.SHARED_REQUIREMENT in requirements
596 requirementsmod.SHARED_REQUIREMENT in requirements
597 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
597 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
598 )
598 )
599 storevfs = None
599 storevfs = None
600 if shared:
600 if shared:
601 # This is a shared repo
601 # This is a shared repo
602 sharedvfs = _getsharedvfs(hgvfs, requirements)
602 sharedvfs = _getsharedvfs(hgvfs, requirements)
603 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
603 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
604 else:
604 else:
605 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
605 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
606
606
607 # if .hg/requires contains the sharesafe requirement, it means
607 # if .hg/requires contains the sharesafe requirement, it means
608 # there exists a `.hg/store/requires` too and we should read it
608 # there exists a `.hg/store/requires` too and we should read it
609 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
609 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
610 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
610 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
611 # is not present, refer checkrequirementscompat() for that
611 # is not present, refer checkrequirementscompat() for that
612 #
612 #
613 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
613 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
614 # repository was shared the old way. We check the share source .hg/requires
614 # repository was shared the old way. We check the share source .hg/requires
615 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
615 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
616 # to be reshared
616 # to be reshared
617 hint = _(b"see `hg help config.format.use-share-safe` for more information")
617 hint = _(b"see `hg help config.format.use-share-safe` for more information")
618 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
618 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
619 if (
619 if (
620 shared
620 shared
621 and requirementsmod.SHARESAFE_REQUIREMENT
621 and requirementsmod.SHARESAFE_REQUIREMENT
622 not in _readrequires(sharedvfs, True)
622 not in _readrequires(sharedvfs, True)
623 ):
623 ):
624 mismatch_warn = ui.configbool(
624 mismatch_warn = ui.configbool(
625 b'share', b'safe-mismatch.source-not-safe.warn'
625 b'share', b'safe-mismatch.source-not-safe.warn'
626 )
626 )
627 mismatch_config = ui.config(
627 mismatch_config = ui.config(
628 b'share', b'safe-mismatch.source-not-safe'
628 b'share', b'safe-mismatch.source-not-safe'
629 )
629 )
630 mismatch_verbose_upgrade = ui.configbool(
630 mismatch_verbose_upgrade = ui.configbool(
631 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
631 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
632 )
632 )
633 if mismatch_config in (
633 if mismatch_config in (
634 b'downgrade-allow',
634 b'downgrade-allow',
635 b'allow',
635 b'allow',
636 b'downgrade-abort',
636 b'downgrade-abort',
637 ):
637 ):
638 # prevent cyclic import localrepo -> upgrade -> localrepo
638 # prevent cyclic import localrepo -> upgrade -> localrepo
639 from . import upgrade
639 from . import upgrade
640
640
641 upgrade.downgrade_share_to_non_safe(
641 upgrade.downgrade_share_to_non_safe(
642 ui,
642 ui,
643 hgvfs,
643 hgvfs,
644 sharedvfs,
644 sharedvfs,
645 requirements,
645 requirements,
646 mismatch_config,
646 mismatch_config,
647 mismatch_warn,
647 mismatch_warn,
648 mismatch_verbose_upgrade,
648 mismatch_verbose_upgrade,
649 )
649 )
650 elif mismatch_config == b'abort':
650 elif mismatch_config == b'abort':
651 raise error.Abort(
651 raise error.Abort(
652 _(b"share source does not support share-safe requirement"),
652 _(b"share source does not support share-safe requirement"),
653 hint=hint,
653 hint=hint,
654 )
654 )
655 else:
655 else:
656 raise error.Abort(
656 raise error.Abort(
657 _(
657 _(
658 b"share-safe mismatch with source.\nUnrecognized"
658 b"share-safe mismatch with source.\nUnrecognized"
659 b" value '%s' of `share.safe-mismatch.source-not-safe`"
659 b" value '%s' of `share.safe-mismatch.source-not-safe`"
660 b" set."
660 b" set."
661 )
661 )
662 % mismatch_config,
662 % mismatch_config,
663 hint=hint,
663 hint=hint,
664 )
664 )
665 else:
665 else:
666 requirements |= _readrequires(storevfs, False)
666 requirements |= _readrequires(storevfs, False)
667 elif shared:
667 elif shared:
668 sourcerequires = _readrequires(sharedvfs, False)
668 sourcerequires = _readrequires(sharedvfs, False)
669 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
669 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
670 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
670 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
671 mismatch_warn = ui.configbool(
671 mismatch_warn = ui.configbool(
672 b'share', b'safe-mismatch.source-safe.warn'
672 b'share', b'safe-mismatch.source-safe.warn'
673 )
673 )
674 mismatch_verbose_upgrade = ui.configbool(
674 mismatch_verbose_upgrade = ui.configbool(
675 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
675 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
676 )
676 )
677 if mismatch_config in (
677 if mismatch_config in (
678 b'upgrade-allow',
678 b'upgrade-allow',
679 b'allow',
679 b'allow',
680 b'upgrade-abort',
680 b'upgrade-abort',
681 ):
681 ):
682 # prevent cyclic import localrepo -> upgrade -> localrepo
682 # prevent cyclic import localrepo -> upgrade -> localrepo
683 from . import upgrade
683 from . import upgrade
684
684
685 upgrade.upgrade_share_to_safe(
685 upgrade.upgrade_share_to_safe(
686 ui,
686 ui,
687 hgvfs,
687 hgvfs,
688 storevfs,
688 storevfs,
689 requirements,
689 requirements,
690 mismatch_config,
690 mismatch_config,
691 mismatch_warn,
691 mismatch_warn,
692 mismatch_verbose_upgrade,
692 mismatch_verbose_upgrade,
693 )
693 )
694 elif mismatch_config == b'abort':
694 elif mismatch_config == b'abort':
695 raise error.Abort(
695 raise error.Abort(
696 _(
696 _(
697 b'version mismatch: source uses share-safe'
697 b'version mismatch: source uses share-safe'
698 b' functionality while the current share does not'
698 b' functionality while the current share does not'
699 ),
699 ),
700 hint=hint,
700 hint=hint,
701 )
701 )
702 else:
702 else:
703 raise error.Abort(
703 raise error.Abort(
704 _(
704 _(
705 b"share-safe mismatch with source.\nUnrecognized"
705 b"share-safe mismatch with source.\nUnrecognized"
706 b" value '%s' of `share.safe-mismatch.source-safe` set."
706 b" value '%s' of `share.safe-mismatch.source-safe` set."
707 )
707 )
708 % mismatch_config,
708 % mismatch_config,
709 hint=hint,
709 hint=hint,
710 )
710 )
711
711
712 # The .hg/hgrc file may load extensions or contain config options
712 # The .hg/hgrc file may load extensions or contain config options
713 # that influence repository construction. Attempt to load it and
713 # that influence repository construction. Attempt to load it and
714 # process any new extensions that it may have pulled in.
714 # process any new extensions that it may have pulled in.
715 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
715 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
716 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
716 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
717 extensions.loadall(ui)
717 extensions.loadall(ui)
718 extensions.populateui(ui)
718 extensions.populateui(ui)
719
719
720 # Set of module names of extensions loaded for this repository.
720 # Set of module names of extensions loaded for this repository.
721 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
721 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
722
722
723 supportedrequirements = gathersupportedrequirements(ui)
723 supportedrequirements = gathersupportedrequirements(ui)
724
724
725 # We first validate the requirements are known.
725 # We first validate the requirements are known.
726 ensurerequirementsrecognized(requirements, supportedrequirements)
726 ensurerequirementsrecognized(requirements, supportedrequirements)
727
727
728 # Then we validate that the known set is reasonable to use together.
728 # Then we validate that the known set is reasonable to use together.
729 ensurerequirementscompatible(ui, requirements)
729 ensurerequirementscompatible(ui, requirements)
730
730
731 # TODO there are unhandled edge cases related to opening repositories with
731 # TODO there are unhandled edge cases related to opening repositories with
732 # shared storage. If storage is shared, we should also test for requirements
732 # shared storage. If storage is shared, we should also test for requirements
733 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
733 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
734 # that repo, as that repo may load extensions needed to open it. This is a
734 # that repo, as that repo may load extensions needed to open it. This is a
735 # bit complicated because we don't want the other hgrc to overwrite settings
735 # bit complicated because we don't want the other hgrc to overwrite settings
736 # in this hgrc.
736 # in this hgrc.
737 #
737 #
738 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
738 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
739 # file when sharing repos. But if a requirement is added after the share is
739 # file when sharing repos. But if a requirement is added after the share is
740 # performed, thereby introducing a new requirement for the opener, we may
740 # performed, thereby introducing a new requirement for the opener, we may
741 # will not see that and could encounter a run-time error interacting with
741 # will not see that and could encounter a run-time error interacting with
742 # that shared store since it has an unknown-to-us requirement.
742 # that shared store since it has an unknown-to-us requirement.
743
743
744 # At this point, we know we should be capable of opening the repository.
744 # At this point, we know we should be capable of opening the repository.
745 # Now get on with doing that.
745 # Now get on with doing that.
746
746
747 features = set()
747 features = set()
748
748
749 # The "store" part of the repository holds versioned data. How it is
749 # The "store" part of the repository holds versioned data. How it is
750 # accessed is determined by various requirements. If `shared` or
750 # accessed is determined by various requirements. If `shared` or
751 # `relshared` requirements are present, this indicates current repository
751 # `relshared` requirements are present, this indicates current repository
752 # is a share and store exists in path mentioned in `.hg/sharedpath`
752 # is a share and store exists in path mentioned in `.hg/sharedpath`
753 if shared:
753 if shared:
754 storebasepath = sharedvfs.base
754 storebasepath = sharedvfs.base
755 cachepath = sharedvfs.join(b'cache')
755 cachepath = sharedvfs.join(b'cache')
756 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
756 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
757 else:
757 else:
758 storebasepath = hgvfs.base
758 storebasepath = hgvfs.base
759 cachepath = hgvfs.join(b'cache')
759 cachepath = hgvfs.join(b'cache')
760 wcachepath = hgvfs.join(b'wcache')
760 wcachepath = hgvfs.join(b'wcache')
761
761
762 # The store has changed over time and the exact layout is dictated by
762 # The store has changed over time and the exact layout is dictated by
763 # requirements. The store interface abstracts differences across all
763 # requirements. The store interface abstracts differences across all
764 # of them.
764 # of them.
765 store = makestore(
765 store = makestore(
766 requirements,
766 requirements,
767 storebasepath,
767 storebasepath,
768 lambda base: vfsmod.vfs(base, cacheaudited=True),
768 lambda base: vfsmod.vfs(base, cacheaudited=True),
769 )
769 )
770 hgvfs.createmode = store.createmode
770 hgvfs.createmode = store.createmode
771
771
772 storevfs = store.vfs
772 storevfs = store.vfs
773 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
773 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
774
774
775 if (
775 if (
776 requirementsmod.REVLOGV2_REQUIREMENT in requirements
776 requirementsmod.REVLOGV2_REQUIREMENT in requirements
777 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
777 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
778 ):
778 ):
779 features.add(repository.REPO_FEATURE_SIDE_DATA)
779 features.add(repository.REPO_FEATURE_SIDE_DATA)
780 # the revlogv2 docket introduced race condition that we need to fix
780 # the revlogv2 docket introduced race condition that we need to fix
781 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
781 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
782
782
783 # The cache vfs is used to manage cache files.
783 # The cache vfs is used to manage cache files.
784 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
784 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
785 cachevfs.createmode = store.createmode
785 cachevfs.createmode = store.createmode
786 # The cache vfs is used to manage cache files related to the working copy
786 # The cache vfs is used to manage cache files related to the working copy
787 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
787 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
788 wcachevfs.createmode = store.createmode
788 wcachevfs.createmode = store.createmode
789
789
790 # Now resolve the type for the repository object. We do this by repeatedly
790 # Now resolve the type for the repository object. We do this by repeatedly
791 # calling a factory function to produces types for specific aspects of the
791 # calling a factory function to produces types for specific aspects of the
792 # repo's operation. The aggregate returned types are used as base classes
792 # repo's operation. The aggregate returned types are used as base classes
793 # for a dynamically-derived type, which will represent our new repository.
793 # for a dynamically-derived type, which will represent our new repository.
794
794
795 bases = []
795 bases = []
796 extrastate = {}
796 extrastate = {}
797
797
798 for iface, fn in REPO_INTERFACES:
798 for iface, fn in REPO_INTERFACES:
799 # We pass all potentially useful state to give extensions tons of
799 # We pass all potentially useful state to give extensions tons of
800 # flexibility.
800 # flexibility.
801 typ = fn()(
801 typ = fn()(
802 ui=ui,
802 ui=ui,
803 intents=intents,
803 intents=intents,
804 requirements=requirements,
804 requirements=requirements,
805 features=features,
805 features=features,
806 wdirvfs=wdirvfs,
806 wdirvfs=wdirvfs,
807 hgvfs=hgvfs,
807 hgvfs=hgvfs,
808 store=store,
808 store=store,
809 storevfs=storevfs,
809 storevfs=storevfs,
810 storeoptions=storevfs.options,
810 storeoptions=storevfs.options,
811 cachevfs=cachevfs,
811 cachevfs=cachevfs,
812 wcachevfs=wcachevfs,
812 wcachevfs=wcachevfs,
813 extensionmodulenames=extensionmodulenames,
813 extensionmodulenames=extensionmodulenames,
814 extrastate=extrastate,
814 extrastate=extrastate,
815 baseclasses=bases,
815 baseclasses=bases,
816 )
816 )
817
817
818 if not isinstance(typ, type):
818 if not isinstance(typ, type):
819 raise error.ProgrammingError(
819 raise error.ProgrammingError(
820 b'unable to construct type for %s' % iface
820 b'unable to construct type for %s' % iface
821 )
821 )
822
822
823 bases.append(typ)
823 bases.append(typ)
824
824
825 # type() allows you to use characters in type names that wouldn't be
825 # type() allows you to use characters in type names that wouldn't be
826 # recognized as Python symbols in source code. We abuse that to add
826 # recognized as Python symbols in source code. We abuse that to add
827 # rich information about our constructed repo.
827 # rich information about our constructed repo.
828 name = pycompat.sysstr(
828 name = pycompat.sysstr(
829 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
829 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
830 )
830 )
831
831
832 cls = type(name, tuple(bases), {})
832 cls = type(name, tuple(bases), {})
833
833
834 return cls(
834 return cls(
835 baseui=baseui,
835 baseui=baseui,
836 ui=ui,
836 ui=ui,
837 origroot=path,
837 origroot=path,
838 wdirvfs=wdirvfs,
838 wdirvfs=wdirvfs,
839 hgvfs=hgvfs,
839 hgvfs=hgvfs,
840 requirements=requirements,
840 requirements=requirements,
841 supportedrequirements=supportedrequirements,
841 supportedrequirements=supportedrequirements,
842 sharedpath=storebasepath,
842 sharedpath=storebasepath,
843 store=store,
843 store=store,
844 cachevfs=cachevfs,
844 cachevfs=cachevfs,
845 wcachevfs=wcachevfs,
845 wcachevfs=wcachevfs,
846 features=features,
846 features=features,
847 intents=intents,
847 intents=intents,
848 )
848 )
849
849
850
850
851 def loadhgrc(
851 def loadhgrc(
852 ui,
852 ui,
853 wdirvfs: vfsmod.vfs,
853 wdirvfs: vfsmod.vfs,
854 hgvfs: vfsmod.vfs,
854 hgvfs: vfsmod.vfs,
855 requirements,
855 requirements,
856 sharedvfs: Optional[vfsmod.vfs] = None,
856 sharedvfs: Optional[vfsmod.vfs] = None,
857 ):
857 ):
858 """Load hgrc files/content into a ui instance.
858 """Load hgrc files/content into a ui instance.
859
859
860 This is called during repository opening to load any additional
860 This is called during repository opening to load any additional
861 config files or settings relevant to the current repository.
861 config files or settings relevant to the current repository.
862
862
863 Returns a bool indicating whether any additional configs were loaded.
863 Returns a bool indicating whether any additional configs were loaded.
864
864
865 Extensions should monkeypatch this function to modify how per-repo
865 Extensions should monkeypatch this function to modify how per-repo
866 configs are loaded. For example, an extension may wish to pull in
866 configs are loaded. For example, an extension may wish to pull in
867 configs from alternate files or sources.
867 configs from alternate files or sources.
868
868
869 sharedvfs is vfs object pointing to source repo if the current one is a
869 sharedvfs is vfs object pointing to source repo if the current one is a
870 shared one
870 shared one
871 """
871 """
872 if not rcutil.use_repo_hgrc():
872 if not rcutil.use_repo_hgrc():
873 return False
873 return False
874
874
875 ret = False
875 ret = False
876 # first load config from shared source if we has to
876 # first load config from shared source if we has to
877 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
877 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
878 try:
878 try:
879 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
879 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
880 ret = True
880 ret = True
881 except IOError:
881 except IOError:
882 pass
882 pass
883
883
884 try:
884 try:
885 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
885 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
886 ret = True
886 ret = True
887 except IOError:
887 except IOError:
888 pass
888 pass
889
889
890 try:
890 try:
891 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
891 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
892 ret = True
892 ret = True
893 except IOError:
893 except IOError:
894 pass
894 pass
895
895
896 return ret
896 return ret
897
897
898
898
899 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
899 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
900 """Perform additional actions after .hg/hgrc is loaded.
900 """Perform additional actions after .hg/hgrc is loaded.
901
901
902 This function is called during repository loading immediately after
902 This function is called during repository loading immediately after
903 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
903 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
904
904
905 The function can be used to validate configs, automatically add
905 The function can be used to validate configs, automatically add
906 options (including extensions) based on requirements, etc.
906 options (including extensions) based on requirements, etc.
907 """
907 """
908
908
909 # Map of requirements to list of extensions to load automatically when
909 # Map of requirements to list of extensions to load automatically when
910 # requirement is present.
910 # requirement is present.
911 autoextensions = {
911 autoextensions = {
912 b'git': [b'git'],
912 b'git': [b'git'],
913 b'largefiles': [b'largefiles'],
913 b'largefiles': [b'largefiles'],
914 b'lfs': [b'lfs'],
914 b'lfs': [b'lfs'],
915 }
915 }
916
916
917 for requirement, names in sorted(autoextensions.items()):
917 for requirement, names in sorted(autoextensions.items()):
918 if requirement not in requirements:
918 if requirement not in requirements:
919 continue
919 continue
920
920
921 for name in names:
921 for name in names:
922 if not ui.hasconfig(b'extensions', name):
922 if not ui.hasconfig(b'extensions', name):
923 ui.setconfig(b'extensions', name, b'', source=b'autoload')
923 ui.setconfig(b'extensions', name, b'', source=b'autoload')
924
924
925
925
926 def gathersupportedrequirements(ui):
926 def gathersupportedrequirements(ui):
927 """Determine the complete set of recognized requirements."""
927 """Determine the complete set of recognized requirements."""
928 # Start with all requirements supported by this file.
928 # Start with all requirements supported by this file.
929 supported = set(localrepository._basesupported)
929 supported = set(localrepository._basesupported)
930
930
931 # Execute ``featuresetupfuncs`` entries if they belong to an extension
931 # Execute ``featuresetupfuncs`` entries if they belong to an extension
932 # relevant to this ui instance.
932 # relevant to this ui instance.
933 modules = {m.__name__ for n, m in extensions.extensions(ui)}
933 modules = {m.__name__ for n, m in extensions.extensions(ui)}
934
934
935 for fn in featuresetupfuncs:
935 for fn in featuresetupfuncs:
936 if fn.__module__ in modules:
936 if fn.__module__ in modules:
937 fn(ui, supported)
937 fn(ui, supported)
938
938
939 # Add derived requirements from registered compression engines.
939 # Add derived requirements from registered compression engines.
940 for name in util.compengines:
940 for name in util.compengines:
941 engine = util.compengines[name]
941 engine = util.compengines[name]
942 if engine.available() and engine.revlogheader():
942 if engine.available() and engine.revlogheader():
943 supported.add(b'exp-compression-%s' % name)
943 supported.add(b'exp-compression-%s' % name)
944 if engine.name() == b'zstd':
944 if engine.name() == b'zstd':
945 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
945 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
946
946
947 return supported
947 return supported
948
948
949
949
950 def ensurerequirementsrecognized(requirements, supported):
950 def ensurerequirementsrecognized(requirements, supported):
951 """Validate that a set of local requirements is recognized.
951 """Validate that a set of local requirements is recognized.
952
952
953 Receives a set of requirements. Raises an ``error.RepoError`` if there
953 Receives a set of requirements. Raises an ``error.RepoError`` if there
954 exists any requirement in that set that currently loaded code doesn't
954 exists any requirement in that set that currently loaded code doesn't
955 recognize.
955 recognize.
956
956
957 Returns a set of supported requirements.
957 Returns a set of supported requirements.
958 """
958 """
959 missing = set()
959 missing = set()
960
960
961 for requirement in requirements:
961 for requirement in requirements:
962 if requirement in supported:
962 if requirement in supported:
963 continue
963 continue
964
964
965 if not requirement or not requirement[0:1].isalnum():
965 if not requirement or not requirement[0:1].isalnum():
966 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
966 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
967
967
968 missing.add(requirement)
968 missing.add(requirement)
969
969
970 if missing:
970 if missing:
971 raise error.RequirementError(
971 raise error.RequirementError(
972 _(b'repository requires features unknown to this Mercurial: %s')
972 _(b'repository requires features unknown to this Mercurial: %s')
973 % b' '.join(sorted(missing)),
973 % b' '.join(sorted(missing)),
974 hint=_(
974 hint=_(
975 b'see https://mercurial-scm.org/wiki/MissingRequirement '
975 b'see https://mercurial-scm.org/wiki/MissingRequirement '
976 b'for more information'
976 b'for more information'
977 ),
977 ),
978 )
978 )
979
979
980
980
981 def ensurerequirementscompatible(ui, requirements):
981 def ensurerequirementscompatible(ui, requirements):
982 """Validates that a set of recognized requirements is mutually compatible.
982 """Validates that a set of recognized requirements is mutually compatible.
983
983
984 Some requirements may not be compatible with others or require
984 Some requirements may not be compatible with others or require
985 config options that aren't enabled. This function is called during
985 config options that aren't enabled. This function is called during
986 repository opening to ensure that the set of requirements needed
986 repository opening to ensure that the set of requirements needed
987 to open a repository is sane and compatible with config options.
987 to open a repository is sane and compatible with config options.
988
988
989 Extensions can monkeypatch this function to perform additional
989 Extensions can monkeypatch this function to perform additional
990 checking.
990 checking.
991
991
992 ``error.RepoError`` should be raised on failure.
992 ``error.RepoError`` should be raised on failure.
993 """
993 """
994 if (
994 if (
995 requirementsmod.SPARSE_REQUIREMENT in requirements
995 requirementsmod.SPARSE_REQUIREMENT in requirements
996 and not sparse.enabled
996 and not sparse.enabled
997 ):
997 ):
998 raise error.RepoError(
998 raise error.RepoError(
999 _(
999 _(
1000 b'repository is using sparse feature but '
1000 b'repository is using sparse feature but '
1001 b'sparse is not enabled; enable the '
1001 b'sparse is not enabled; enable the '
1002 b'"sparse" extensions to access'
1002 b'"sparse" extensions to access'
1003 )
1003 )
1004 )
1004 )
1005
1005
1006
1006
1007 def makestore(requirements, path, vfstype):
1007 def makestore(requirements, path, vfstype):
1008 """Construct a storage object for a repository."""
1008 """Construct a storage object for a repository."""
1009 if requirementsmod.STORE_REQUIREMENT in requirements:
1009 if requirementsmod.STORE_REQUIREMENT in requirements:
1010 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1010 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1011 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1011 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1012 return storemod.fncachestore(path, vfstype, dotencode)
1012 return storemod.fncachestore(path, vfstype, dotencode)
1013
1013
1014 return storemod.encodedstore(path, vfstype)
1014 return storemod.encodedstore(path, vfstype)
1015
1015
1016 return storemod.basicstore(path, vfstype)
1016 return storemod.basicstore(path, vfstype)
1017
1017
1018
1018
1019 def resolvestorevfsoptions(ui, requirements, features):
1019 def resolvestorevfsoptions(ui, requirements, features):
1020 """Resolve the options to pass to the store vfs opener.
1020 """Resolve the options to pass to the store vfs opener.
1021
1021
1022 The returned dict is used to influence behavior of the storage layer.
1022 The returned dict is used to influence behavior of the storage layer.
1023 """
1023 """
1024 options = {}
1024 options = {}
1025
1025
1026 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1026 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1027 options[b'treemanifest'] = True
1027 options[b'treemanifest'] = True
1028
1028
1029 # experimental config: format.manifestcachesize
1029 # experimental config: format.manifestcachesize
1030 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1030 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1031 if manifestcachesize is not None:
1031 if manifestcachesize is not None:
1032 options[b'manifestcachesize'] = manifestcachesize
1032 options[b'manifestcachesize'] = manifestcachesize
1033
1033
1034 # In the absence of another requirement superseding a revlog-related
1034 # In the absence of another requirement superseding a revlog-related
1035 # requirement, we have to assume the repo is using revlog version 0.
1035 # requirement, we have to assume the repo is using revlog version 0.
1036 # This revlog format is super old and we don't bother trying to parse
1036 # This revlog format is super old and we don't bother trying to parse
1037 # opener options for it because those options wouldn't do anything
1037 # opener options for it because those options wouldn't do anything
1038 # meaningful on such old repos.
1038 # meaningful on such old repos.
1039 if (
1039 if (
1040 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1040 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1041 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1041 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1042 ):
1042 ):
1043 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1043 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1044 else: # explicitly mark repo as using revlogv0
1044 else: # explicitly mark repo as using revlogv0
1045 options[b'revlogv0'] = True
1045 options[b'revlogv0'] = True
1046
1046
1047 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1047 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1048 options[b'copies-storage'] = b'changeset-sidedata'
1048 options[b'copies-storage'] = b'changeset-sidedata'
1049 else:
1049 else:
1050 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1050 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1051 copiesextramode = (b'changeset-only', b'compatibility')
1051 copiesextramode = (b'changeset-only', b'compatibility')
1052 if writecopiesto in copiesextramode:
1052 if writecopiesto in copiesextramode:
1053 options[b'copies-storage'] = b'extra'
1053 options[b'copies-storage'] = b'extra'
1054
1054
1055 return options
1055 return options
1056
1056
1057
1057
1058 def resolverevlogstorevfsoptions(ui, requirements, features):
1058 def resolverevlogstorevfsoptions(ui, requirements, features):
1059 """Resolve opener options specific to revlogs."""
1059 """Resolve opener options specific to revlogs."""
1060
1060
1061 options = {}
1061 options = {}
1062 options[b'flagprocessors'] = {}
1062 options[b'flagprocessors'] = {}
1063
1063
1064 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1064 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1065 options[b'revlogv1'] = True
1065 options[b'revlogv1'] = True
1066 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1066 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1067 options[b'revlogv2'] = True
1067 options[b'revlogv2'] = True
1068 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1068 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1069 options[b'changelogv2'] = True
1069 options[b'changelogv2'] = True
1070 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1070 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1071 options[b'changelogv2.compute-rank'] = cmp_rank
1071 options[b'changelogv2.compute-rank'] = cmp_rank
1072
1072
1073 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1073 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1074 options[b'generaldelta'] = True
1074 options[b'generaldelta'] = True
1075
1075
1076 # experimental config: format.chunkcachesize
1076 # experimental config: format.chunkcachesize
1077 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1077 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1078 if chunkcachesize is not None:
1078 if chunkcachesize is not None:
1079 options[b'chunkcachesize'] = chunkcachesize
1079 options[b'chunkcachesize'] = chunkcachesize
1080
1080
1081 deltabothparents = ui.configbool(
1081 deltabothparents = ui.configbool(
1082 b'storage', b'revlog.optimize-delta-parent-choice'
1082 b'storage', b'revlog.optimize-delta-parent-choice'
1083 )
1083 )
1084 options[b'deltabothparents'] = deltabothparents
1084 options[b'deltabothparents'] = deltabothparents
1085 dps_cgds = ui.configint(
1085 dps_cgds = ui.configint(
1086 b'storage',
1086 b'storage',
1087 b'revlog.delta-parent-search.candidate-group-chunk-size',
1087 b'revlog.delta-parent-search.candidate-group-chunk-size',
1088 )
1088 )
1089 options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
1089 options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
1090 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1090 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1091
1091
1092 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1092 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1093 options[b'issue6528.fix-incoming'] = issue6528
1093 options[b'issue6528.fix-incoming'] = issue6528
1094
1094
1095 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1095 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1096 lazydeltabase = False
1096 lazydeltabase = False
1097 if lazydelta:
1097 if lazydelta:
1098 lazydeltabase = ui.configbool(
1098 lazydeltabase = ui.configbool(
1099 b'storage', b'revlog.reuse-external-delta-parent'
1099 b'storage', b'revlog.reuse-external-delta-parent'
1100 )
1100 )
1101 if lazydeltabase is None:
1101 if lazydeltabase is None:
1102 lazydeltabase = not scmutil.gddeltaconfig(ui)
1102 lazydeltabase = not scmutil.gddeltaconfig(ui)
1103 options[b'lazydelta'] = lazydelta
1103 options[b'lazydelta'] = lazydelta
1104 options[b'lazydeltabase'] = lazydeltabase
1104 options[b'lazydeltabase'] = lazydeltabase
1105
1105
1106 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1106 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1107 if 0 <= chainspan:
1107 if 0 <= chainspan:
1108 options[b'maxdeltachainspan'] = chainspan
1108 options[b'maxdeltachainspan'] = chainspan
1109
1109
1110 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1110 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1111 if mmapindexthreshold is not None:
1111 if mmapindexthreshold is not None:
1112 options[b'mmapindexthreshold'] = mmapindexthreshold
1112 options[b'mmapindexthreshold'] = mmapindexthreshold
1113
1113
1114 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1114 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1115 srdensitythres = float(
1115 srdensitythres = float(
1116 ui.config(b'experimental', b'sparse-read.density-threshold')
1116 ui.config(b'experimental', b'sparse-read.density-threshold')
1117 )
1117 )
1118 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1118 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1119 options[b'with-sparse-read'] = withsparseread
1119 options[b'with-sparse-read'] = withsparseread
1120 options[b'sparse-read-density-threshold'] = srdensitythres
1120 options[b'sparse-read-density-threshold'] = srdensitythres
1121 options[b'sparse-read-min-gap-size'] = srmingapsize
1121 options[b'sparse-read-min-gap-size'] = srmingapsize
1122
1122
1123 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1123 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1124 options[b'sparse-revlog'] = sparserevlog
1124 options[b'sparse-revlog'] = sparserevlog
1125 if sparserevlog:
1125 if sparserevlog:
1126 options[b'generaldelta'] = True
1126 options[b'generaldelta'] = True
1127
1127
1128 maxchainlen = None
1128 maxchainlen = None
1129 if sparserevlog:
1129 if sparserevlog:
1130 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1130 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1131 # experimental config: format.maxchainlen
1131 # experimental config: format.maxchainlen
1132 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1132 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1133 if maxchainlen is not None:
1133 if maxchainlen is not None:
1134 options[b'maxchainlen'] = maxchainlen
1134 options[b'maxchainlen'] = maxchainlen
1135
1135
1136 for r in requirements:
1136 for r in requirements:
1137 # we allow multiple compression engine requirement to co-exist because
1137 # we allow multiple compression engine requirement to co-exist because
1138 # strickly speaking, revlog seems to support mixed compression style.
1138 # strickly speaking, revlog seems to support mixed compression style.
1139 #
1139 #
1140 # The compression used for new entries will be "the last one"
1140 # The compression used for new entries will be "the last one"
1141 prefix = r.startswith
1141 prefix = r.startswith
1142 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1142 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1143 options[b'compengine'] = r.split(b'-', 2)[2]
1143 options[b'compengine'] = r.split(b'-', 2)[2]
1144
1144
1145 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1145 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1146 if options[b'zlib.level'] is not None:
1146 if options[b'zlib.level'] is not None:
1147 if not (0 <= options[b'zlib.level'] <= 9):
1147 if not (0 <= options[b'zlib.level'] <= 9):
1148 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1148 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1149 raise error.Abort(msg % options[b'zlib.level'])
1149 raise error.Abort(msg % options[b'zlib.level'])
1150 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1150 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1151 if options[b'zstd.level'] is not None:
1151 if options[b'zstd.level'] is not None:
1152 if not (0 <= options[b'zstd.level'] <= 22):
1152 if not (0 <= options[b'zstd.level'] <= 22):
1153 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1153 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1154 raise error.Abort(msg % options[b'zstd.level'])
1154 raise error.Abort(msg % options[b'zstd.level'])
1155
1155
1156 if requirementsmod.NARROW_REQUIREMENT in requirements:
1156 if requirementsmod.NARROW_REQUIREMENT in requirements:
1157 options[b'enableellipsis'] = True
1157 options[b'enableellipsis'] = True
1158
1158
1159 if ui.configbool(b'experimental', b'rust.index'):
1159 if ui.configbool(b'experimental', b'rust.index'):
1160 options[b'rust.index'] = True
1160 options[b'rust.index'] = True
1161 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1161 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1162 slow_path = ui.config(
1162 slow_path = ui.config(
1163 b'storage', b'revlog.persistent-nodemap.slow-path'
1163 b'storage', b'revlog.persistent-nodemap.slow-path'
1164 )
1164 )
1165 if slow_path not in (b'allow', b'warn', b'abort'):
1165 if slow_path not in (b'allow', b'warn', b'abort'):
1166 default = ui.config_default(
1166 default = ui.config_default(
1167 b'storage', b'revlog.persistent-nodemap.slow-path'
1167 b'storage', b'revlog.persistent-nodemap.slow-path'
1168 )
1168 )
1169 msg = _(
1169 msg = _(
1170 b'unknown value for config '
1170 b'unknown value for config '
1171 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1171 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1172 )
1172 )
1173 ui.warn(msg % slow_path)
1173 ui.warn(msg % slow_path)
1174 if not ui.quiet:
1174 if not ui.quiet:
1175 ui.warn(_(b'falling back to default value: %s\n') % default)
1175 ui.warn(_(b'falling back to default value: %s\n') % default)
1176 slow_path = default
1176 slow_path = default
1177
1177
1178 msg = _(
1178 msg = _(
1179 b"accessing `persistent-nodemap` repository without associated "
1179 b"accessing `persistent-nodemap` repository without associated "
1180 b"fast implementation."
1180 b"fast implementation."
1181 )
1181 )
1182 hint = _(
1182 hint = _(
1183 b"check `hg help config.format.use-persistent-nodemap` "
1183 b"check `hg help config.format.use-persistent-nodemap` "
1184 b"for details"
1184 b"for details"
1185 )
1185 )
1186 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1186 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1187 if slow_path == b'warn':
1187 if slow_path == b'warn':
1188 msg = b"warning: " + msg + b'\n'
1188 msg = b"warning: " + msg + b'\n'
1189 ui.warn(msg)
1189 ui.warn(msg)
1190 if not ui.quiet:
1190 if not ui.quiet:
1191 hint = b'(' + hint + b')\n'
1191 hint = b'(' + hint + b')\n'
1192 ui.warn(hint)
1192 ui.warn(hint)
1193 if slow_path == b'abort':
1193 if slow_path == b'abort':
1194 raise error.Abort(msg, hint=hint)
1194 raise error.Abort(msg, hint=hint)
1195 options[b'persistent-nodemap'] = True
1195 options[b'persistent-nodemap'] = True
1196 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1196 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1197 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1197 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1198 if slow_path not in (b'allow', b'warn', b'abort'):
1198 if slow_path not in (b'allow', b'warn', b'abort'):
1199 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1199 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1200 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1200 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1201 ui.warn(msg % slow_path)
1201 ui.warn(msg % slow_path)
1202 if not ui.quiet:
1202 if not ui.quiet:
1203 ui.warn(_(b'falling back to default value: %s\n') % default)
1203 ui.warn(_(b'falling back to default value: %s\n') % default)
1204 slow_path = default
1204 slow_path = default
1205
1205
1206 msg = _(
1206 msg = _(
1207 b"accessing `dirstate-v2` repository without associated "
1207 b"accessing `dirstate-v2` repository without associated "
1208 b"fast implementation."
1208 b"fast implementation."
1209 )
1209 )
1210 hint = _(
1210 hint = _(
1211 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1211 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1212 )
1212 )
1213 if not dirstate.HAS_FAST_DIRSTATE_V2:
1213 if not dirstate.HAS_FAST_DIRSTATE_V2:
1214 if slow_path == b'warn':
1214 if slow_path == b'warn':
1215 msg = b"warning: " + msg + b'\n'
1215 msg = b"warning: " + msg + b'\n'
1216 ui.warn(msg)
1216 ui.warn(msg)
1217 if not ui.quiet:
1217 if not ui.quiet:
1218 hint = b'(' + hint + b')\n'
1218 hint = b'(' + hint + b')\n'
1219 ui.warn(hint)
1219 ui.warn(hint)
1220 if slow_path == b'abort':
1220 if slow_path == b'abort':
1221 raise error.Abort(msg, hint=hint)
1221 raise error.Abort(msg, hint=hint)
1222 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1222 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1223 options[b'persistent-nodemap.mmap'] = True
1223 options[b'persistent-nodemap.mmap'] = True
1224 if ui.configbool(b'devel', b'persistent-nodemap'):
1224 if ui.configbool(b'devel', b'persistent-nodemap'):
1225 options[b'devel-force-nodemap'] = True
1225 options[b'devel-force-nodemap'] = True
1226
1226
1227 return options
1227 return options
1228
1228
1229
1229
1230 def makemain(**kwargs):
1230 def makemain(**kwargs):
1231 """Produce a type conforming to ``ilocalrepositorymain``."""
1231 """Produce a type conforming to ``ilocalrepositorymain``."""
1232 return localrepository
1232 return localrepository
1233
1233
1234
1234
1235 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1235 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1236 class revlogfilestorage:
1236 class revlogfilestorage:
1237 """File storage when using revlogs."""
1237 """File storage when using revlogs."""
1238
1238
1239 def file(self, path):
1239 def file(self, path):
1240 if path.startswith(b'/'):
1240 if path.startswith(b'/'):
1241 path = path[1:]
1241 path = path[1:]
1242
1242
1243 return filelog.filelog(self.svfs, path)
1243 return filelog.filelog(self.svfs, path)
1244
1244
1245
1245
1246 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1246 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1247 class revlognarrowfilestorage:
1247 class revlognarrowfilestorage:
1248 """File storage when using revlogs and narrow files."""
1248 """File storage when using revlogs and narrow files."""
1249
1249
1250 def file(self, path):
1250 def file(self, path):
1251 if path.startswith(b'/'):
1251 if path.startswith(b'/'):
1252 path = path[1:]
1252 path = path[1:]
1253
1253
1254 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1254 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1255
1255
1256
1256
1257 def makefilestorage(requirements, features, **kwargs):
1257 def makefilestorage(requirements, features, **kwargs):
1258 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1258 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1259 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1259 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1260 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1260 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1261
1261
1262 if requirementsmod.NARROW_REQUIREMENT in requirements:
1262 if requirementsmod.NARROW_REQUIREMENT in requirements:
1263 return revlognarrowfilestorage
1263 return revlognarrowfilestorage
1264 else:
1264 else:
1265 return revlogfilestorage
1265 return revlogfilestorage
1266
1266
1267
1267
1268 # List of repository interfaces and factory functions for them. Each
1268 # List of repository interfaces and factory functions for them. Each
1269 # will be called in order during ``makelocalrepository()`` to iteratively
1269 # will be called in order during ``makelocalrepository()`` to iteratively
1270 # derive the final type for a local repository instance. We capture the
1270 # derive the final type for a local repository instance. We capture the
1271 # function as a lambda so we don't hold a reference and the module-level
1271 # function as a lambda so we don't hold a reference and the module-level
1272 # functions can be wrapped.
1272 # functions can be wrapped.
1273 REPO_INTERFACES = [
1273 REPO_INTERFACES = [
1274 (repository.ilocalrepositorymain, lambda: makemain),
1274 (repository.ilocalrepositorymain, lambda: makemain),
1275 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1275 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1276 ]
1276 ]
1277
1277
1278
1278
1279 @interfaceutil.implementer(repository.ilocalrepositorymain)
1279 @interfaceutil.implementer(repository.ilocalrepositorymain)
1280 class localrepository:
1280 class localrepository:
1281 """Main class for representing local repositories.
1281 """Main class for representing local repositories.
1282
1282
1283 All local repositories are instances of this class.
1283 All local repositories are instances of this class.
1284
1284
1285 Constructed on its own, instances of this class are not usable as
1285 Constructed on its own, instances of this class are not usable as
1286 repository objects. To obtain a usable repository object, call
1286 repository objects. To obtain a usable repository object, call
1287 ``hg.repository()``, ``localrepo.instance()``, or
1287 ``hg.repository()``, ``localrepo.instance()``, or
1288 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1288 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1289 ``instance()`` adds support for creating new repositories.
1289 ``instance()`` adds support for creating new repositories.
1290 ``hg.repository()`` adds more extension integration, including calling
1290 ``hg.repository()`` adds more extension integration, including calling
1291 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1291 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1292 used.
1292 used.
1293 """
1293 """
1294
1294
1295 _basesupported = {
1295 _basesupported = {
1296 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1296 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1297 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1297 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1298 requirementsmod.CHANGELOGV2_REQUIREMENT,
1298 requirementsmod.CHANGELOGV2_REQUIREMENT,
1299 requirementsmod.COPIESSDC_REQUIREMENT,
1299 requirementsmod.COPIESSDC_REQUIREMENT,
1300 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1300 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1301 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1301 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1302 requirementsmod.DOTENCODE_REQUIREMENT,
1302 requirementsmod.DOTENCODE_REQUIREMENT,
1303 requirementsmod.FNCACHE_REQUIREMENT,
1303 requirementsmod.FNCACHE_REQUIREMENT,
1304 requirementsmod.GENERALDELTA_REQUIREMENT,
1304 requirementsmod.GENERALDELTA_REQUIREMENT,
1305 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1305 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1306 requirementsmod.NODEMAP_REQUIREMENT,
1306 requirementsmod.NODEMAP_REQUIREMENT,
1307 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1307 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1308 requirementsmod.REVLOGV1_REQUIREMENT,
1308 requirementsmod.REVLOGV1_REQUIREMENT,
1309 requirementsmod.REVLOGV2_REQUIREMENT,
1309 requirementsmod.REVLOGV2_REQUIREMENT,
1310 requirementsmod.SHARED_REQUIREMENT,
1310 requirementsmod.SHARED_REQUIREMENT,
1311 requirementsmod.SHARESAFE_REQUIREMENT,
1311 requirementsmod.SHARESAFE_REQUIREMENT,
1312 requirementsmod.SPARSE_REQUIREMENT,
1312 requirementsmod.SPARSE_REQUIREMENT,
1313 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1313 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1314 requirementsmod.STORE_REQUIREMENT,
1314 requirementsmod.STORE_REQUIREMENT,
1315 requirementsmod.TREEMANIFEST_REQUIREMENT,
1315 requirementsmod.TREEMANIFEST_REQUIREMENT,
1316 }
1316 }
1317
1317
1318 # list of prefix for file which can be written without 'wlock'
1318 # list of prefix for file which can be written without 'wlock'
1319 # Extensions should extend this list when needed
1319 # Extensions should extend this list when needed
1320 _wlockfreeprefix = {
1320 _wlockfreeprefix = {
1321 # We migh consider requiring 'wlock' for the next
1321 # We migh consider requiring 'wlock' for the next
1322 # two, but pretty much all the existing code assume
1322 # two, but pretty much all the existing code assume
1323 # wlock is not needed so we keep them excluded for
1323 # wlock is not needed so we keep them excluded for
1324 # now.
1324 # now.
1325 b'hgrc',
1325 b'hgrc',
1326 b'requires',
1326 b'requires',
1327 # XXX cache is a complicatged business someone
1327 # XXX cache is a complicatged business someone
1328 # should investigate this in depth at some point
1328 # should investigate this in depth at some point
1329 b'cache/',
1329 b'cache/',
1330 # XXX bisect was still a bit too messy at the time
1330 # XXX bisect was still a bit too messy at the time
1331 # this changeset was introduced. Someone should fix
1331 # this changeset was introduced. Someone should fix
1332 # the remainig bit and drop this line
1332 # the remainig bit and drop this line
1333 b'bisect.state',
1333 b'bisect.state',
1334 }
1334 }
1335
1335
1336 def __init__(
1336 def __init__(
1337 self,
1337 self,
1338 baseui,
1338 baseui,
1339 ui,
1339 ui,
1340 origroot: bytes,
1340 origroot: bytes,
1341 wdirvfs: vfsmod.vfs,
1341 wdirvfs: vfsmod.vfs,
1342 hgvfs: vfsmod.vfs,
1342 hgvfs: vfsmod.vfs,
1343 requirements,
1343 requirements,
1344 supportedrequirements,
1344 supportedrequirements,
1345 sharedpath: bytes,
1345 sharedpath: bytes,
1346 store,
1346 store,
1347 cachevfs: vfsmod.vfs,
1347 cachevfs: vfsmod.vfs,
1348 wcachevfs: vfsmod.vfs,
1348 wcachevfs: vfsmod.vfs,
1349 features,
1349 features,
1350 intents=None,
1350 intents=None,
1351 ):
1351 ):
1352 """Create a new local repository instance.
1352 """Create a new local repository instance.
1353
1353
1354 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1354 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1355 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1355 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1356 object.
1356 object.
1357
1357
1358 Arguments:
1358 Arguments:
1359
1359
1360 baseui
1360 baseui
1361 ``ui.ui`` instance that ``ui`` argument was based off of.
1361 ``ui.ui`` instance that ``ui`` argument was based off of.
1362
1362
1363 ui
1363 ui
1364 ``ui.ui`` instance for use by the repository.
1364 ``ui.ui`` instance for use by the repository.
1365
1365
1366 origroot
1366 origroot
1367 ``bytes`` path to working directory root of this repository.
1367 ``bytes`` path to working directory root of this repository.
1368
1368
1369 wdirvfs
1369 wdirvfs
1370 ``vfs.vfs`` rooted at the working directory.
1370 ``vfs.vfs`` rooted at the working directory.
1371
1371
1372 hgvfs
1372 hgvfs
1373 ``vfs.vfs`` rooted at .hg/
1373 ``vfs.vfs`` rooted at .hg/
1374
1374
1375 requirements
1375 requirements
1376 ``set`` of bytestrings representing repository opening requirements.
1376 ``set`` of bytestrings representing repository opening requirements.
1377
1377
1378 supportedrequirements
1378 supportedrequirements
1379 ``set`` of bytestrings representing repository requirements that we
1379 ``set`` of bytestrings representing repository requirements that we
1380 know how to open. May be a supetset of ``requirements``.
1380 know how to open. May be a supetset of ``requirements``.
1381
1381
1382 sharedpath
1382 sharedpath
1383 ``bytes`` Defining path to storage base directory. Points to a
1383 ``bytes`` Defining path to storage base directory. Points to a
1384 ``.hg/`` directory somewhere.
1384 ``.hg/`` directory somewhere.
1385
1385
1386 store
1386 store
1387 ``store.basicstore`` (or derived) instance providing access to
1387 ``store.basicstore`` (or derived) instance providing access to
1388 versioned storage.
1388 versioned storage.
1389
1389
1390 cachevfs
1390 cachevfs
1391 ``vfs.vfs`` used for cache files.
1391 ``vfs.vfs`` used for cache files.
1392
1392
1393 wcachevfs
1393 wcachevfs
1394 ``vfs.vfs`` used for cache files related to the working copy.
1394 ``vfs.vfs`` used for cache files related to the working copy.
1395
1395
1396 features
1396 features
1397 ``set`` of bytestrings defining features/capabilities of this
1397 ``set`` of bytestrings defining features/capabilities of this
1398 instance.
1398 instance.
1399
1399
1400 intents
1400 intents
1401 ``set`` of system strings indicating what this repo will be used
1401 ``set`` of system strings indicating what this repo will be used
1402 for.
1402 for.
1403 """
1403 """
1404 self.baseui = baseui
1404 self.baseui = baseui
1405 self.ui = ui
1405 self.ui = ui
1406 self.origroot = origroot
1406 self.origroot = origroot
1407 # vfs rooted at working directory.
1407 # vfs rooted at working directory.
1408 self.wvfs = wdirvfs
1408 self.wvfs = wdirvfs
1409 self.root = wdirvfs.base
1409 self.root = wdirvfs.base
1410 # vfs rooted at .hg/. Used to access most non-store paths.
1410 # vfs rooted at .hg/. Used to access most non-store paths.
1411 self.vfs = hgvfs
1411 self.vfs = hgvfs
1412 self.path = hgvfs.base
1412 self.path = hgvfs.base
1413 self.requirements = requirements
1413 self.requirements = requirements
1414 self.nodeconstants = sha1nodeconstants
1414 self.nodeconstants = sha1nodeconstants
1415 self.nullid = self.nodeconstants.nullid
1415 self.nullid = self.nodeconstants.nullid
1416 self.supported = supportedrequirements
1416 self.supported = supportedrequirements
1417 self.sharedpath = sharedpath
1417 self.sharedpath = sharedpath
1418 self.store = store
1418 self.store = store
1419 self.cachevfs = cachevfs
1419 self.cachevfs = cachevfs
1420 self.wcachevfs = wcachevfs
1420 self.wcachevfs = wcachevfs
1421 self.features = features
1421 self.features = features
1422
1422
1423 self.filtername = None
1423 self.filtername = None
1424
1424
1425 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1425 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1426 b'devel', b'check-locks'
1426 b'devel', b'check-locks'
1427 ):
1427 ):
1428 self.vfs.audit = self._getvfsward(self.vfs.audit)
1428 self.vfs.audit = self._getvfsward(self.vfs.audit)
1429 # A list of callback to shape the phase if no data were found.
1429 # A list of callback to shape the phase if no data were found.
1430 # Callback are in the form: func(repo, roots) --> processed root.
1430 # Callback are in the form: func(repo, roots) --> processed root.
1431 # This list it to be filled by extension during repo setup
1431 # This list it to be filled by extension during repo setup
1432 self._phasedefaults = []
1432 self._phasedefaults = []
1433
1433
1434 color.setup(self.ui)
1434 color.setup(self.ui)
1435
1435
1436 self.spath = self.store.path
1436 self.spath = self.store.path
1437 self.svfs = self.store.vfs
1437 self.svfs = self.store.vfs
1438 self.sjoin = self.store.join
1438 self.sjoin = self.store.join
1439 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1439 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1440 b'devel', b'check-locks'
1440 b'devel', b'check-locks'
1441 ):
1441 ):
1442 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1442 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1443 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1443 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1444 else: # standard vfs
1444 else: # standard vfs
1445 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1445 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1446
1446
1447 self._dirstatevalidatewarned = False
1447 self._dirstatevalidatewarned = False
1448
1448
1449 self._branchcaches = branchmap.BranchMapCache()
1449 self._branchcaches = branchmap.BranchMapCache()
1450 self._revbranchcache = None
1450 self._revbranchcache = None
1451 self._filterpats = {}
1451 self._filterpats = {}
1452 self._datafilters = {}
1452 self._datafilters = {}
1453 self._transref = self._lockref = self._wlockref = None
1453 self._transref = self._lockref = self._wlockref = None
1454
1454
1455 # A cache for various files under .hg/ that tracks file changes,
1455 # A cache for various files under .hg/ that tracks file changes,
1456 # (used by the filecache decorator)
1456 # (used by the filecache decorator)
1457 #
1457 #
1458 # Maps a property name to its util.filecacheentry
1458 # Maps a property name to its util.filecacheentry
1459 self._filecache = {}
1459 self._filecache = {}
1460
1460
1461 # hold sets of revision to be filtered
1461 # hold sets of revision to be filtered
1462 # should be cleared when something might have changed the filter value:
1462 # should be cleared when something might have changed the filter value:
1463 # - new changesets,
1463 # - new changesets,
1464 # - phase change,
1464 # - phase change,
1465 # - new obsolescence marker,
1465 # - new obsolescence marker,
1466 # - working directory parent change,
1466 # - working directory parent change,
1467 # - bookmark changes
1467 # - bookmark changes
1468 self.filteredrevcache = {}
1468 self.filteredrevcache = {}
1469
1469
1470 self._dirstate = None
1470 self._dirstate = None
1471 # post-dirstate-status hooks
1471 # post-dirstate-status hooks
1472 self._postdsstatus = []
1472 self._postdsstatus = []
1473
1473
1474 self._pending_narrow_pats = None
1474 self._pending_narrow_pats = None
1475 self._pending_narrow_pats_dirstate = None
1475 self._pending_narrow_pats_dirstate = None
1476
1476
1477 # generic mapping between names and nodes
1477 # generic mapping between names and nodes
1478 self.names = namespaces.namespaces()
1478 self.names = namespaces.namespaces()
1479
1479
1480 # Key to signature value.
1480 # Key to signature value.
1481 self._sparsesignaturecache = {}
1481 self._sparsesignaturecache = {}
1482 # Signature to cached matcher instance.
1482 # Signature to cached matcher instance.
1483 self._sparsematchercache = {}
1483 self._sparsematchercache = {}
1484
1484
1485 self._extrafilterid = repoview.extrafilter(ui)
1485 self._extrafilterid = repoview.extrafilter(ui)
1486
1486
1487 self.filecopiesmode = None
1487 self.filecopiesmode = None
1488 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1488 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1489 self.filecopiesmode = b'changeset-sidedata'
1489 self.filecopiesmode = b'changeset-sidedata'
1490
1490
1491 self._wanted_sidedata = set()
1491 self._wanted_sidedata = set()
1492 self._sidedata_computers = {}
1492 self._sidedata_computers = {}
1493 sidedatamod.set_sidedata_spec_for_repo(self)
1493 sidedatamod.set_sidedata_spec_for_repo(self)
1494
1494
1495 def _getvfsward(self, origfunc):
1495 def _getvfsward(self, origfunc):
1496 """build a ward for self.vfs"""
1496 """build a ward for self.vfs"""
1497 rref = weakref.ref(self)
1497 rref = weakref.ref(self)
1498
1498
1499 def checkvfs(path, mode=None):
1499 def checkvfs(path, mode=None):
1500 ret = origfunc(path, mode=mode)
1500 ret = origfunc(path, mode=mode)
1501 repo = rref()
1501 repo = rref()
1502 if (
1502 if (
1503 repo is None
1503 repo is None
1504 or not util.safehasattr(repo, b'_wlockref')
1504 or not util.safehasattr(repo, b'_wlockref')
1505 or not util.safehasattr(repo, b'_lockref')
1505 or not util.safehasattr(repo, b'_lockref')
1506 ):
1506 ):
1507 return
1507 return
1508 if mode in (None, b'r', b'rb'):
1508 if mode in (None, b'r', b'rb'):
1509 return
1509 return
1510 if path.startswith(repo.path):
1510 if path.startswith(repo.path):
1511 # truncate name relative to the repository (.hg)
1511 # truncate name relative to the repository (.hg)
1512 path = path[len(repo.path) + 1 :]
1512 path = path[len(repo.path) + 1 :]
1513 if path.startswith(b'cache/'):
1513 if path.startswith(b'cache/'):
1514 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1514 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1515 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1515 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1516 # path prefixes covered by 'lock'
1516 # path prefixes covered by 'lock'
1517 vfs_path_prefixes = (
1517 vfs_path_prefixes = (
1518 b'journal.',
1518 b'journal.',
1519 b'undo.',
1519 b'undo.',
1520 b'strip-backup/',
1520 b'strip-backup/',
1521 b'cache/',
1521 b'cache/',
1522 )
1522 )
1523 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1523 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1524 if repo._currentlock(repo._lockref) is None:
1524 if repo._currentlock(repo._lockref) is None:
1525 repo.ui.develwarn(
1525 repo.ui.develwarn(
1526 b'write with no lock: "%s"' % path,
1526 b'write with no lock: "%s"' % path,
1527 stacklevel=3,
1527 stacklevel=3,
1528 config=b'check-locks',
1528 config=b'check-locks',
1529 )
1529 )
1530 elif repo._currentlock(repo._wlockref) is None:
1530 elif repo._currentlock(repo._wlockref) is None:
1531 # rest of vfs files are covered by 'wlock'
1531 # rest of vfs files are covered by 'wlock'
1532 #
1532 #
1533 # exclude special files
1533 # exclude special files
1534 for prefix in self._wlockfreeprefix:
1534 for prefix in self._wlockfreeprefix:
1535 if path.startswith(prefix):
1535 if path.startswith(prefix):
1536 return
1536 return
1537 repo.ui.develwarn(
1537 repo.ui.develwarn(
1538 b'write with no wlock: "%s"' % path,
1538 b'write with no wlock: "%s"' % path,
1539 stacklevel=3,
1539 stacklevel=3,
1540 config=b'check-locks',
1540 config=b'check-locks',
1541 )
1541 )
1542 return ret
1542 return ret
1543
1543
1544 return checkvfs
1544 return checkvfs
1545
1545
1546 def _getsvfsward(self, origfunc):
1546 def _getsvfsward(self, origfunc):
1547 """build a ward for self.svfs"""
1547 """build a ward for self.svfs"""
1548 rref = weakref.ref(self)
1548 rref = weakref.ref(self)
1549
1549
1550 def checksvfs(path, mode=None):
1550 def checksvfs(path, mode=None):
1551 ret = origfunc(path, mode=mode)
1551 ret = origfunc(path, mode=mode)
1552 repo = rref()
1552 repo = rref()
1553 if repo is None or not util.safehasattr(repo, b'_lockref'):
1553 if repo is None or not util.safehasattr(repo, b'_lockref'):
1554 return
1554 return
1555 if mode in (None, b'r', b'rb'):
1555 if mode in (None, b'r', b'rb'):
1556 return
1556 return
1557 if path.startswith(repo.sharedpath):
1557 if path.startswith(repo.sharedpath):
1558 # truncate name relative to the repository (.hg)
1558 # truncate name relative to the repository (.hg)
1559 path = path[len(repo.sharedpath) + 1 :]
1559 path = path[len(repo.sharedpath) + 1 :]
1560 if repo._currentlock(repo._lockref) is None:
1560 if repo._currentlock(repo._lockref) is None:
1561 repo.ui.develwarn(
1561 repo.ui.develwarn(
1562 b'write with no lock: "%s"' % path, stacklevel=4
1562 b'write with no lock: "%s"' % path, stacklevel=4
1563 )
1563 )
1564 return ret
1564 return ret
1565
1565
1566 return checksvfs
1566 return checksvfs
1567
1567
1568 @property
1569 def vfs_map(self):
1570 return {
1571 b'': self.svfs,
1572 b'plain': self.vfs,
1573 b'store': self.svfs,
1574 }
1575
1568 def close(self):
1576 def close(self):
1569 self._writecaches()
1577 self._writecaches()
1570
1578
1571 def _writecaches(self):
1579 def _writecaches(self):
1572 if self._revbranchcache:
1580 if self._revbranchcache:
1573 self._revbranchcache.write()
1581 self._revbranchcache.write()
1574
1582
1575 def _restrictcapabilities(self, caps):
1583 def _restrictcapabilities(self, caps):
1576 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1584 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1577 caps = set(caps)
1585 caps = set(caps)
1578 capsblob = bundle2.encodecaps(
1586 capsblob = bundle2.encodecaps(
1579 bundle2.getrepocaps(self, role=b'client')
1587 bundle2.getrepocaps(self, role=b'client')
1580 )
1588 )
1581 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1589 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1582 if self.ui.configbool(b'experimental', b'narrow'):
1590 if self.ui.configbool(b'experimental', b'narrow'):
1583 caps.add(wireprototypes.NARROWCAP)
1591 caps.add(wireprototypes.NARROWCAP)
1584 return caps
1592 return caps
1585
1593
1586 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1594 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1587 # self -> auditor -> self._checknested -> self
1595 # self -> auditor -> self._checknested -> self
1588
1596
1589 @property
1597 @property
1590 def auditor(self):
1598 def auditor(self):
1591 # This is only used by context.workingctx.match in order to
1599 # This is only used by context.workingctx.match in order to
1592 # detect files in subrepos.
1600 # detect files in subrepos.
1593 return pathutil.pathauditor(self.root, callback=self._checknested)
1601 return pathutil.pathauditor(self.root, callback=self._checknested)
1594
1602
1595 @property
1603 @property
1596 def nofsauditor(self):
1604 def nofsauditor(self):
1597 # This is only used by context.basectx.match in order to detect
1605 # This is only used by context.basectx.match in order to detect
1598 # files in subrepos.
1606 # files in subrepos.
1599 return pathutil.pathauditor(
1607 return pathutil.pathauditor(
1600 self.root, callback=self._checknested, realfs=False, cached=True
1608 self.root, callback=self._checknested, realfs=False, cached=True
1601 )
1609 )
1602
1610
1603 def _checknested(self, path):
1611 def _checknested(self, path):
1604 """Determine if path is a legal nested repository."""
1612 """Determine if path is a legal nested repository."""
1605 if not path.startswith(self.root):
1613 if not path.startswith(self.root):
1606 return False
1614 return False
1607 subpath = path[len(self.root) + 1 :]
1615 subpath = path[len(self.root) + 1 :]
1608 normsubpath = util.pconvert(subpath)
1616 normsubpath = util.pconvert(subpath)
1609
1617
1610 # XXX: Checking against the current working copy is wrong in
1618 # XXX: Checking against the current working copy is wrong in
1611 # the sense that it can reject things like
1619 # the sense that it can reject things like
1612 #
1620 #
1613 # $ hg cat -r 10 sub/x.txt
1621 # $ hg cat -r 10 sub/x.txt
1614 #
1622 #
1615 # if sub/ is no longer a subrepository in the working copy
1623 # if sub/ is no longer a subrepository in the working copy
1616 # parent revision.
1624 # parent revision.
1617 #
1625 #
1618 # However, it can of course also allow things that would have
1626 # However, it can of course also allow things that would have
1619 # been rejected before, such as the above cat command if sub/
1627 # been rejected before, such as the above cat command if sub/
1620 # is a subrepository now, but was a normal directory before.
1628 # is a subrepository now, but was a normal directory before.
1621 # The old path auditor would have rejected by mistake since it
1629 # The old path auditor would have rejected by mistake since it
1622 # panics when it sees sub/.hg/.
1630 # panics when it sees sub/.hg/.
1623 #
1631 #
1624 # All in all, checking against the working copy seems sensible
1632 # All in all, checking against the working copy seems sensible
1625 # since we want to prevent access to nested repositories on
1633 # since we want to prevent access to nested repositories on
1626 # the filesystem *now*.
1634 # the filesystem *now*.
1627 ctx = self[None]
1635 ctx = self[None]
1628 parts = util.splitpath(subpath)
1636 parts = util.splitpath(subpath)
1629 while parts:
1637 while parts:
1630 prefix = b'/'.join(parts)
1638 prefix = b'/'.join(parts)
1631 if prefix in ctx.substate:
1639 if prefix in ctx.substate:
1632 if prefix == normsubpath:
1640 if prefix == normsubpath:
1633 return True
1641 return True
1634 else:
1642 else:
1635 sub = ctx.sub(prefix)
1643 sub = ctx.sub(prefix)
1636 return sub.checknested(subpath[len(prefix) + 1 :])
1644 return sub.checknested(subpath[len(prefix) + 1 :])
1637 else:
1645 else:
1638 parts.pop()
1646 parts.pop()
1639 return False
1647 return False
1640
1648
1641 def peer(self, path=None):
1649 def peer(self, path=None):
1642 return localpeer(self, path=path) # not cached to avoid reference cycle
1650 return localpeer(self, path=path) # not cached to avoid reference cycle
1643
1651
1644 def unfiltered(self):
1652 def unfiltered(self):
1645 """Return unfiltered version of the repository
1653 """Return unfiltered version of the repository
1646
1654
1647 Intended to be overwritten by filtered repo."""
1655 Intended to be overwritten by filtered repo."""
1648 return self
1656 return self
1649
1657
1650 def filtered(self, name, visibilityexceptions=None):
1658 def filtered(self, name, visibilityexceptions=None):
1651 """Return a filtered version of a repository
1659 """Return a filtered version of a repository
1652
1660
1653 The `name` parameter is the identifier of the requested view. This
1661 The `name` parameter is the identifier of the requested view. This
1654 will return a repoview object set "exactly" to the specified view.
1662 will return a repoview object set "exactly" to the specified view.
1655
1663
1656 This function does not apply recursive filtering to a repository. For
1664 This function does not apply recursive filtering to a repository. For
1657 example calling `repo.filtered("served")` will return a repoview using
1665 example calling `repo.filtered("served")` will return a repoview using
1658 the "served" view, regardless of the initial view used by `repo`.
1666 the "served" view, regardless of the initial view used by `repo`.
1659
1667
1660 In other word, there is always only one level of `repoview` "filtering".
1668 In other word, there is always only one level of `repoview` "filtering".
1661 """
1669 """
1662 if self._extrafilterid is not None and b'%' not in name:
1670 if self._extrafilterid is not None and b'%' not in name:
1663 name = name + b'%' + self._extrafilterid
1671 name = name + b'%' + self._extrafilterid
1664
1672
1665 cls = repoview.newtype(self.unfiltered().__class__)
1673 cls = repoview.newtype(self.unfiltered().__class__)
1666 return cls(self, name, visibilityexceptions)
1674 return cls(self, name, visibilityexceptions)
1667
1675
1668 @mixedrepostorecache(
1676 @mixedrepostorecache(
1669 (b'bookmarks', b'plain'),
1677 (b'bookmarks', b'plain'),
1670 (b'bookmarks.current', b'plain'),
1678 (b'bookmarks.current', b'plain'),
1671 (b'bookmarks', b''),
1679 (b'bookmarks', b''),
1672 (b'00changelog.i', b''),
1680 (b'00changelog.i', b''),
1673 )
1681 )
1674 def _bookmarks(self):
1682 def _bookmarks(self):
1675 # Since the multiple files involved in the transaction cannot be
1683 # Since the multiple files involved in the transaction cannot be
1676 # written atomically (with current repository format), there is a race
1684 # written atomically (with current repository format), there is a race
1677 # condition here.
1685 # condition here.
1678 #
1686 #
1679 # 1) changelog content A is read
1687 # 1) changelog content A is read
1680 # 2) outside transaction update changelog to content B
1688 # 2) outside transaction update changelog to content B
1681 # 3) outside transaction update bookmark file referring to content B
1689 # 3) outside transaction update bookmark file referring to content B
1682 # 4) bookmarks file content is read and filtered against changelog-A
1690 # 4) bookmarks file content is read and filtered against changelog-A
1683 #
1691 #
1684 # When this happens, bookmarks against nodes missing from A are dropped.
1692 # When this happens, bookmarks against nodes missing from A are dropped.
1685 #
1693 #
1686 # Having this happening during read is not great, but it become worse
1694 # Having this happening during read is not great, but it become worse
1687 # when this happen during write because the bookmarks to the "unknown"
1695 # when this happen during write because the bookmarks to the "unknown"
1688 # nodes will be dropped for good. However, writes happen within locks.
1696 # nodes will be dropped for good. However, writes happen within locks.
1689 # This locking makes it possible to have a race free consistent read.
1697 # This locking makes it possible to have a race free consistent read.
1690 # For this purpose data read from disc before locking are
1698 # For this purpose data read from disc before locking are
1691 # "invalidated" right after the locks are taken. This invalidations are
1699 # "invalidated" right after the locks are taken. This invalidations are
1692 # "light", the `filecache` mechanism keep the data in memory and will
1700 # "light", the `filecache` mechanism keep the data in memory and will
1693 # reuse them if the underlying files did not changed. Not parsing the
1701 # reuse them if the underlying files did not changed. Not parsing the
1694 # same data multiple times helps performances.
1702 # same data multiple times helps performances.
1695 #
1703 #
1696 # Unfortunately in the case describe above, the files tracked by the
1704 # Unfortunately in the case describe above, the files tracked by the
1697 # bookmarks file cache might not have changed, but the in-memory
1705 # bookmarks file cache might not have changed, but the in-memory
1698 # content is still "wrong" because we used an older changelog content
1706 # content is still "wrong" because we used an older changelog content
1699 # to process the on-disk data. So after locking, the changelog would be
1707 # to process the on-disk data. So after locking, the changelog would be
1700 # refreshed but `_bookmarks` would be preserved.
1708 # refreshed but `_bookmarks` would be preserved.
1701 # Adding `00changelog.i` to the list of tracked file is not
1709 # Adding `00changelog.i` to the list of tracked file is not
1702 # enough, because at the time we build the content for `_bookmarks` in
1710 # enough, because at the time we build the content for `_bookmarks` in
1703 # (4), the changelog file has already diverged from the content used
1711 # (4), the changelog file has already diverged from the content used
1704 # for loading `changelog` in (1)
1712 # for loading `changelog` in (1)
1705 #
1713 #
1706 # To prevent the issue, we force the changelog to be explicitly
1714 # To prevent the issue, we force the changelog to be explicitly
1707 # reloaded while computing `_bookmarks`. The data race can still happen
1715 # reloaded while computing `_bookmarks`. The data race can still happen
1708 # without the lock (with a narrower window), but it would no longer go
1716 # without the lock (with a narrower window), but it would no longer go
1709 # undetected during the lock time refresh.
1717 # undetected during the lock time refresh.
1710 #
1718 #
1711 # The new schedule is as follow
1719 # The new schedule is as follow
1712 #
1720 #
1713 # 1) filecache logic detect that `_bookmarks` needs to be computed
1721 # 1) filecache logic detect that `_bookmarks` needs to be computed
1714 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1722 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1715 # 3) We force `changelog` filecache to be tested
1723 # 3) We force `changelog` filecache to be tested
1716 # 4) cachestat for `changelog` are captured (for changelog)
1724 # 4) cachestat for `changelog` are captured (for changelog)
1717 # 5) `_bookmarks` is computed and cached
1725 # 5) `_bookmarks` is computed and cached
1718 #
1726 #
1719 # The step in (3) ensure we have a changelog at least as recent as the
1727 # The step in (3) ensure we have a changelog at least as recent as the
1720 # cache stat computed in (1). As a result at locking time:
1728 # cache stat computed in (1). As a result at locking time:
1721 # * if the changelog did not changed since (1) -> we can reuse the data
1729 # * if the changelog did not changed since (1) -> we can reuse the data
1722 # * otherwise -> the bookmarks get refreshed.
1730 # * otherwise -> the bookmarks get refreshed.
1723 self._refreshchangelog()
1731 self._refreshchangelog()
1724 return bookmarks.bmstore(self)
1732 return bookmarks.bmstore(self)
1725
1733
1726 def _refreshchangelog(self):
1734 def _refreshchangelog(self):
1727 """make sure the in memory changelog match the on-disk one"""
1735 """make sure the in memory changelog match the on-disk one"""
1728 if 'changelog' in vars(self) and self.currenttransaction() is None:
1736 if 'changelog' in vars(self) and self.currenttransaction() is None:
1729 del self.changelog
1737 del self.changelog
1730
1738
1731 @property
1739 @property
1732 def _activebookmark(self):
1740 def _activebookmark(self):
1733 return self._bookmarks.active
1741 return self._bookmarks.active
1734
1742
1735 # _phasesets depend on changelog. what we need is to call
1743 # _phasesets depend on changelog. what we need is to call
1736 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1744 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1737 # can't be easily expressed in filecache mechanism.
1745 # can't be easily expressed in filecache mechanism.
1738 @storecache(b'phaseroots', b'00changelog.i')
1746 @storecache(b'phaseroots', b'00changelog.i')
1739 def _phasecache(self):
1747 def _phasecache(self):
1740 return phases.phasecache(self, self._phasedefaults)
1748 return phases.phasecache(self, self._phasedefaults)
1741
1749
1742 @storecache(b'obsstore')
1750 @storecache(b'obsstore')
1743 def obsstore(self):
1751 def obsstore(self):
1744 return obsolete.makestore(self.ui, self)
1752 return obsolete.makestore(self.ui, self)
1745
1753
1746 @changelogcache()
1754 @changelogcache()
1747 def changelog(repo):
1755 def changelog(repo):
1748 # load dirstate before changelog to avoid race see issue6303
1756 # load dirstate before changelog to avoid race see issue6303
1749 repo.dirstate.prefetch_parents()
1757 repo.dirstate.prefetch_parents()
1750 return repo.store.changelog(
1758 return repo.store.changelog(
1751 txnutil.mayhavepending(repo.root),
1759 txnutil.mayhavepending(repo.root),
1752 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1760 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1753 )
1761 )
1754
1762
1755 @manifestlogcache()
1763 @manifestlogcache()
1756 def manifestlog(self):
1764 def manifestlog(self):
1757 return self.store.manifestlog(self, self._storenarrowmatch)
1765 return self.store.manifestlog(self, self._storenarrowmatch)
1758
1766
1759 @unfilteredpropertycache
1767 @unfilteredpropertycache
1760 def dirstate(self):
1768 def dirstate(self):
1761 if self._dirstate is None:
1769 if self._dirstate is None:
1762 self._dirstate = self._makedirstate()
1770 self._dirstate = self._makedirstate()
1763 else:
1771 else:
1764 self._dirstate.refresh()
1772 self._dirstate.refresh()
1765 return self._dirstate
1773 return self._dirstate
1766
1774
1767 def _makedirstate(self):
1775 def _makedirstate(self):
1768 """Extension point for wrapping the dirstate per-repo."""
1776 """Extension point for wrapping the dirstate per-repo."""
1769 sparsematchfn = None
1777 sparsematchfn = None
1770 if sparse.use_sparse(self):
1778 if sparse.use_sparse(self):
1771 sparsematchfn = lambda: sparse.matcher(self)
1779 sparsematchfn = lambda: sparse.matcher(self)
1772 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1780 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1773 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1781 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1774 use_dirstate_v2 = v2_req in self.requirements
1782 use_dirstate_v2 = v2_req in self.requirements
1775 use_tracked_hint = th in self.requirements
1783 use_tracked_hint = th in self.requirements
1776
1784
1777 return dirstate.dirstate(
1785 return dirstate.dirstate(
1778 self.vfs,
1786 self.vfs,
1779 self.ui,
1787 self.ui,
1780 self.root,
1788 self.root,
1781 self._dirstatevalidate,
1789 self._dirstatevalidate,
1782 sparsematchfn,
1790 sparsematchfn,
1783 self.nodeconstants,
1791 self.nodeconstants,
1784 use_dirstate_v2,
1792 use_dirstate_v2,
1785 use_tracked_hint=use_tracked_hint,
1793 use_tracked_hint=use_tracked_hint,
1786 )
1794 )
1787
1795
1788 def _dirstatevalidate(self, node):
1796 def _dirstatevalidate(self, node):
1789 try:
1797 try:
1790 self.changelog.rev(node)
1798 self.changelog.rev(node)
1791 return node
1799 return node
1792 except error.LookupError:
1800 except error.LookupError:
1793 if not self._dirstatevalidatewarned:
1801 if not self._dirstatevalidatewarned:
1794 self._dirstatevalidatewarned = True
1802 self._dirstatevalidatewarned = True
1795 self.ui.warn(
1803 self.ui.warn(
1796 _(b"warning: ignoring unknown working parent %s!\n")
1804 _(b"warning: ignoring unknown working parent %s!\n")
1797 % short(node)
1805 % short(node)
1798 )
1806 )
1799 return self.nullid
1807 return self.nullid
1800
1808
1801 @storecache(narrowspec.FILENAME)
1809 @storecache(narrowspec.FILENAME)
1802 def narrowpats(self):
1810 def narrowpats(self):
1803 """matcher patterns for this repository's narrowspec
1811 """matcher patterns for this repository's narrowspec
1804
1812
1805 A tuple of (includes, excludes).
1813 A tuple of (includes, excludes).
1806 """
1814 """
1807 # the narrow management should probably move into its own object
1815 # the narrow management should probably move into its own object
1808 val = self._pending_narrow_pats
1816 val = self._pending_narrow_pats
1809 if val is None:
1817 if val is None:
1810 val = narrowspec.load(self)
1818 val = narrowspec.load(self)
1811 return val
1819 return val
1812
1820
1813 @storecache(narrowspec.FILENAME)
1821 @storecache(narrowspec.FILENAME)
1814 def _storenarrowmatch(self):
1822 def _storenarrowmatch(self):
1815 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1823 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1816 return matchmod.always()
1824 return matchmod.always()
1817 include, exclude = self.narrowpats
1825 include, exclude = self.narrowpats
1818 return narrowspec.match(self.root, include=include, exclude=exclude)
1826 return narrowspec.match(self.root, include=include, exclude=exclude)
1819
1827
1820 @storecache(narrowspec.FILENAME)
1828 @storecache(narrowspec.FILENAME)
1821 def _narrowmatch(self):
1829 def _narrowmatch(self):
1822 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1830 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1823 return matchmod.always()
1831 return matchmod.always()
1824 narrowspec.checkworkingcopynarrowspec(self)
1832 narrowspec.checkworkingcopynarrowspec(self)
1825 include, exclude = self.narrowpats
1833 include, exclude = self.narrowpats
1826 return narrowspec.match(self.root, include=include, exclude=exclude)
1834 return narrowspec.match(self.root, include=include, exclude=exclude)
1827
1835
1828 def narrowmatch(self, match=None, includeexact=False):
1836 def narrowmatch(self, match=None, includeexact=False):
1829 """matcher corresponding the the repo's narrowspec
1837 """matcher corresponding the the repo's narrowspec
1830
1838
1831 If `match` is given, then that will be intersected with the narrow
1839 If `match` is given, then that will be intersected with the narrow
1832 matcher.
1840 matcher.
1833
1841
1834 If `includeexact` is True, then any exact matches from `match` will
1842 If `includeexact` is True, then any exact matches from `match` will
1835 be included even if they're outside the narrowspec.
1843 be included even if they're outside the narrowspec.
1836 """
1844 """
1837 if match:
1845 if match:
1838 if includeexact and not self._narrowmatch.always():
1846 if includeexact and not self._narrowmatch.always():
1839 # do not exclude explicitly-specified paths so that they can
1847 # do not exclude explicitly-specified paths so that they can
1840 # be warned later on
1848 # be warned later on
1841 em = matchmod.exact(match.files())
1849 em = matchmod.exact(match.files())
1842 nm = matchmod.unionmatcher([self._narrowmatch, em])
1850 nm = matchmod.unionmatcher([self._narrowmatch, em])
1843 return matchmod.intersectmatchers(match, nm)
1851 return matchmod.intersectmatchers(match, nm)
1844 return matchmod.intersectmatchers(match, self._narrowmatch)
1852 return matchmod.intersectmatchers(match, self._narrowmatch)
1845 return self._narrowmatch
1853 return self._narrowmatch
1846
1854
1847 def setnarrowpats(self, newincludes, newexcludes):
1855 def setnarrowpats(self, newincludes, newexcludes):
1848 narrowspec.save(self, newincludes, newexcludes)
1856 narrowspec.save(self, newincludes, newexcludes)
1849 self.invalidate(clearfilecache=True)
1857 self.invalidate(clearfilecache=True)
1850
1858
1851 @unfilteredpropertycache
1859 @unfilteredpropertycache
1852 def _quick_access_changeid_null(self):
1860 def _quick_access_changeid_null(self):
1853 return {
1861 return {
1854 b'null': (nullrev, self.nodeconstants.nullid),
1862 b'null': (nullrev, self.nodeconstants.nullid),
1855 nullrev: (nullrev, self.nodeconstants.nullid),
1863 nullrev: (nullrev, self.nodeconstants.nullid),
1856 self.nullid: (nullrev, self.nullid),
1864 self.nullid: (nullrev, self.nullid),
1857 }
1865 }
1858
1866
1859 @unfilteredpropertycache
1867 @unfilteredpropertycache
1860 def _quick_access_changeid_wc(self):
1868 def _quick_access_changeid_wc(self):
1861 # also fast path access to the working copy parents
1869 # also fast path access to the working copy parents
1862 # however, only do it for filter that ensure wc is visible.
1870 # however, only do it for filter that ensure wc is visible.
1863 quick = self._quick_access_changeid_null.copy()
1871 quick = self._quick_access_changeid_null.copy()
1864 cl = self.unfiltered().changelog
1872 cl = self.unfiltered().changelog
1865 for node in self.dirstate.parents():
1873 for node in self.dirstate.parents():
1866 if node == self.nullid:
1874 if node == self.nullid:
1867 continue
1875 continue
1868 rev = cl.index.get_rev(node)
1876 rev = cl.index.get_rev(node)
1869 if rev is None:
1877 if rev is None:
1870 # unknown working copy parent case:
1878 # unknown working copy parent case:
1871 #
1879 #
1872 # skip the fast path and let higher code deal with it
1880 # skip the fast path and let higher code deal with it
1873 continue
1881 continue
1874 pair = (rev, node)
1882 pair = (rev, node)
1875 quick[rev] = pair
1883 quick[rev] = pair
1876 quick[node] = pair
1884 quick[node] = pair
1877 # also add the parents of the parents
1885 # also add the parents of the parents
1878 for r in cl.parentrevs(rev):
1886 for r in cl.parentrevs(rev):
1879 if r == nullrev:
1887 if r == nullrev:
1880 continue
1888 continue
1881 n = cl.node(r)
1889 n = cl.node(r)
1882 pair = (r, n)
1890 pair = (r, n)
1883 quick[r] = pair
1891 quick[r] = pair
1884 quick[n] = pair
1892 quick[n] = pair
1885 p1node = self.dirstate.p1()
1893 p1node = self.dirstate.p1()
1886 if p1node != self.nullid:
1894 if p1node != self.nullid:
1887 quick[b'.'] = quick[p1node]
1895 quick[b'.'] = quick[p1node]
1888 return quick
1896 return quick
1889
1897
1890 @unfilteredmethod
1898 @unfilteredmethod
1891 def _quick_access_changeid_invalidate(self):
1899 def _quick_access_changeid_invalidate(self):
1892 if '_quick_access_changeid_wc' in vars(self):
1900 if '_quick_access_changeid_wc' in vars(self):
1893 del self.__dict__['_quick_access_changeid_wc']
1901 del self.__dict__['_quick_access_changeid_wc']
1894
1902
1895 @property
1903 @property
1896 def _quick_access_changeid(self):
1904 def _quick_access_changeid(self):
1897 """an helper dictionnary for __getitem__ calls
1905 """an helper dictionnary for __getitem__ calls
1898
1906
1899 This contains a list of symbol we can recognise right away without
1907 This contains a list of symbol we can recognise right away without
1900 further processing.
1908 further processing.
1901 """
1909 """
1902 if self.filtername in repoview.filter_has_wc:
1910 if self.filtername in repoview.filter_has_wc:
1903 return self._quick_access_changeid_wc
1911 return self._quick_access_changeid_wc
1904 return self._quick_access_changeid_null
1912 return self._quick_access_changeid_null
1905
1913
1906 def __getitem__(self, changeid):
1914 def __getitem__(self, changeid):
1907 # dealing with special cases
1915 # dealing with special cases
1908 if changeid is None:
1916 if changeid is None:
1909 return context.workingctx(self)
1917 return context.workingctx(self)
1910 if isinstance(changeid, context.basectx):
1918 if isinstance(changeid, context.basectx):
1911 return changeid
1919 return changeid
1912
1920
1913 # dealing with multiple revisions
1921 # dealing with multiple revisions
1914 if isinstance(changeid, slice):
1922 if isinstance(changeid, slice):
1915 # wdirrev isn't contiguous so the slice shouldn't include it
1923 # wdirrev isn't contiguous so the slice shouldn't include it
1916 return [
1924 return [
1917 self[i]
1925 self[i]
1918 for i in range(*changeid.indices(len(self)))
1926 for i in range(*changeid.indices(len(self)))
1919 if i not in self.changelog.filteredrevs
1927 if i not in self.changelog.filteredrevs
1920 ]
1928 ]
1921
1929
1922 # dealing with some special values
1930 # dealing with some special values
1923 quick_access = self._quick_access_changeid.get(changeid)
1931 quick_access = self._quick_access_changeid.get(changeid)
1924 if quick_access is not None:
1932 if quick_access is not None:
1925 rev, node = quick_access
1933 rev, node = quick_access
1926 return context.changectx(self, rev, node, maybe_filtered=False)
1934 return context.changectx(self, rev, node, maybe_filtered=False)
1927 if changeid == b'tip':
1935 if changeid == b'tip':
1928 node = self.changelog.tip()
1936 node = self.changelog.tip()
1929 rev = self.changelog.rev(node)
1937 rev = self.changelog.rev(node)
1930 return context.changectx(self, rev, node)
1938 return context.changectx(self, rev, node)
1931
1939
1932 # dealing with arbitrary values
1940 # dealing with arbitrary values
1933 try:
1941 try:
1934 if isinstance(changeid, int):
1942 if isinstance(changeid, int):
1935 node = self.changelog.node(changeid)
1943 node = self.changelog.node(changeid)
1936 rev = changeid
1944 rev = changeid
1937 elif changeid == b'.':
1945 elif changeid == b'.':
1938 # this is a hack to delay/avoid loading obsmarkers
1946 # this is a hack to delay/avoid loading obsmarkers
1939 # when we know that '.' won't be hidden
1947 # when we know that '.' won't be hidden
1940 node = self.dirstate.p1()
1948 node = self.dirstate.p1()
1941 rev = self.unfiltered().changelog.rev(node)
1949 rev = self.unfiltered().changelog.rev(node)
1942 elif len(changeid) == self.nodeconstants.nodelen:
1950 elif len(changeid) == self.nodeconstants.nodelen:
1943 try:
1951 try:
1944 node = changeid
1952 node = changeid
1945 rev = self.changelog.rev(changeid)
1953 rev = self.changelog.rev(changeid)
1946 except error.FilteredLookupError:
1954 except error.FilteredLookupError:
1947 changeid = hex(changeid) # for the error message
1955 changeid = hex(changeid) # for the error message
1948 raise
1956 raise
1949 except LookupError:
1957 except LookupError:
1950 # check if it might have come from damaged dirstate
1958 # check if it might have come from damaged dirstate
1951 #
1959 #
1952 # XXX we could avoid the unfiltered if we had a recognizable
1960 # XXX we could avoid the unfiltered if we had a recognizable
1953 # exception for filtered changeset access
1961 # exception for filtered changeset access
1954 if (
1962 if (
1955 self.local()
1963 self.local()
1956 and changeid in self.unfiltered().dirstate.parents()
1964 and changeid in self.unfiltered().dirstate.parents()
1957 ):
1965 ):
1958 msg = _(b"working directory has unknown parent '%s'!")
1966 msg = _(b"working directory has unknown parent '%s'!")
1959 raise error.Abort(msg % short(changeid))
1967 raise error.Abort(msg % short(changeid))
1960 changeid = hex(changeid) # for the error message
1968 changeid = hex(changeid) # for the error message
1961 raise
1969 raise
1962
1970
1963 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1971 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1964 node = bin(changeid)
1972 node = bin(changeid)
1965 rev = self.changelog.rev(node)
1973 rev = self.changelog.rev(node)
1966 else:
1974 else:
1967 raise error.ProgrammingError(
1975 raise error.ProgrammingError(
1968 b"unsupported changeid '%s' of type %s"
1976 b"unsupported changeid '%s' of type %s"
1969 % (changeid, pycompat.bytestr(type(changeid)))
1977 % (changeid, pycompat.bytestr(type(changeid)))
1970 )
1978 )
1971
1979
1972 return context.changectx(self, rev, node)
1980 return context.changectx(self, rev, node)
1973
1981
1974 except (error.FilteredIndexError, error.FilteredLookupError):
1982 except (error.FilteredIndexError, error.FilteredLookupError):
1975 raise error.FilteredRepoLookupError(
1983 raise error.FilteredRepoLookupError(
1976 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1984 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1977 )
1985 )
1978 except (IndexError, LookupError):
1986 except (IndexError, LookupError):
1979 raise error.RepoLookupError(
1987 raise error.RepoLookupError(
1980 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1988 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1981 )
1989 )
1982 except error.WdirUnsupported:
1990 except error.WdirUnsupported:
1983 return context.workingctx(self)
1991 return context.workingctx(self)
1984
1992
1985 def __contains__(self, changeid):
1993 def __contains__(self, changeid):
1986 """True if the given changeid exists"""
1994 """True if the given changeid exists"""
1987 try:
1995 try:
1988 self[changeid]
1996 self[changeid]
1989 return True
1997 return True
1990 except error.RepoLookupError:
1998 except error.RepoLookupError:
1991 return False
1999 return False
1992
2000
1993 def __nonzero__(self):
2001 def __nonzero__(self):
1994 return True
2002 return True
1995
2003
1996 __bool__ = __nonzero__
2004 __bool__ = __nonzero__
1997
2005
1998 def __len__(self):
2006 def __len__(self):
1999 # no need to pay the cost of repoview.changelog
2007 # no need to pay the cost of repoview.changelog
2000 unfi = self.unfiltered()
2008 unfi = self.unfiltered()
2001 return len(unfi.changelog)
2009 return len(unfi.changelog)
2002
2010
2003 def __iter__(self):
2011 def __iter__(self):
2004 return iter(self.changelog)
2012 return iter(self.changelog)
2005
2013
2006 def revs(self, expr: bytes, *args):
2014 def revs(self, expr: bytes, *args):
2007 """Find revisions matching a revset.
2015 """Find revisions matching a revset.
2008
2016
2009 The revset is specified as a string ``expr`` that may contain
2017 The revset is specified as a string ``expr`` that may contain
2010 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2018 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2011
2019
2012 Revset aliases from the configuration are not expanded. To expand
2020 Revset aliases from the configuration are not expanded. To expand
2013 user aliases, consider calling ``scmutil.revrange()`` or
2021 user aliases, consider calling ``scmutil.revrange()`` or
2014 ``repo.anyrevs([expr], user=True)``.
2022 ``repo.anyrevs([expr], user=True)``.
2015
2023
2016 Returns a smartset.abstractsmartset, which is a list-like interface
2024 Returns a smartset.abstractsmartset, which is a list-like interface
2017 that contains integer revisions.
2025 that contains integer revisions.
2018 """
2026 """
2019 tree = revsetlang.spectree(expr, *args)
2027 tree = revsetlang.spectree(expr, *args)
2020 return revset.makematcher(tree)(self)
2028 return revset.makematcher(tree)(self)
2021
2029
2022 def set(self, expr: bytes, *args):
2030 def set(self, expr: bytes, *args):
2023 """Find revisions matching a revset and emit changectx instances.
2031 """Find revisions matching a revset and emit changectx instances.
2024
2032
2025 This is a convenience wrapper around ``revs()`` that iterates the
2033 This is a convenience wrapper around ``revs()`` that iterates the
2026 result and is a generator of changectx instances.
2034 result and is a generator of changectx instances.
2027
2035
2028 Revset aliases from the configuration are not expanded. To expand
2036 Revset aliases from the configuration are not expanded. To expand
2029 user aliases, consider calling ``scmutil.revrange()``.
2037 user aliases, consider calling ``scmutil.revrange()``.
2030 """
2038 """
2031 for r in self.revs(expr, *args):
2039 for r in self.revs(expr, *args):
2032 yield self[r]
2040 yield self[r]
2033
2041
2034 def anyrevs(self, specs: bytes, user=False, localalias=None):
2042 def anyrevs(self, specs: bytes, user=False, localalias=None):
2035 """Find revisions matching one of the given revsets.
2043 """Find revisions matching one of the given revsets.
2036
2044
2037 Revset aliases from the configuration are not expanded by default. To
2045 Revset aliases from the configuration are not expanded by default. To
2038 expand user aliases, specify ``user=True``. To provide some local
2046 expand user aliases, specify ``user=True``. To provide some local
2039 definitions overriding user aliases, set ``localalias`` to
2047 definitions overriding user aliases, set ``localalias`` to
2040 ``{name: definitionstring}``.
2048 ``{name: definitionstring}``.
2041 """
2049 """
2042 if specs == [b'null']:
2050 if specs == [b'null']:
2043 return revset.baseset([nullrev])
2051 return revset.baseset([nullrev])
2044 if specs == [b'.']:
2052 if specs == [b'.']:
2045 quick_data = self._quick_access_changeid.get(b'.')
2053 quick_data = self._quick_access_changeid.get(b'.')
2046 if quick_data is not None:
2054 if quick_data is not None:
2047 return revset.baseset([quick_data[0]])
2055 return revset.baseset([quick_data[0]])
2048 if user:
2056 if user:
2049 m = revset.matchany(
2057 m = revset.matchany(
2050 self.ui,
2058 self.ui,
2051 specs,
2059 specs,
2052 lookup=revset.lookupfn(self),
2060 lookup=revset.lookupfn(self),
2053 localalias=localalias,
2061 localalias=localalias,
2054 )
2062 )
2055 else:
2063 else:
2056 m = revset.matchany(None, specs, localalias=localalias)
2064 m = revset.matchany(None, specs, localalias=localalias)
2057 return m(self)
2065 return m(self)
2058
2066
2059 def url(self) -> bytes:
2067 def url(self) -> bytes:
2060 return b'file:' + self.root
2068 return b'file:' + self.root
2061
2069
2062 def hook(self, name, throw=False, **args):
2070 def hook(self, name, throw=False, **args):
2063 """Call a hook, passing this repo instance.
2071 """Call a hook, passing this repo instance.
2064
2072
2065 This a convenience method to aid invoking hooks. Extensions likely
2073 This a convenience method to aid invoking hooks. Extensions likely
2066 won't call this unless they have registered a custom hook or are
2074 won't call this unless they have registered a custom hook or are
2067 replacing code that is expected to call a hook.
2075 replacing code that is expected to call a hook.
2068 """
2076 """
2069 return hook.hook(self.ui, self, name, throw, **args)
2077 return hook.hook(self.ui, self, name, throw, **args)
2070
2078
2071 @filteredpropertycache
2079 @filteredpropertycache
2072 def _tagscache(self):
2080 def _tagscache(self):
2073 """Returns a tagscache object that contains various tags related
2081 """Returns a tagscache object that contains various tags related
2074 caches."""
2082 caches."""
2075
2083
2076 # This simplifies its cache management by having one decorated
2084 # This simplifies its cache management by having one decorated
2077 # function (this one) and the rest simply fetch things from it.
2085 # function (this one) and the rest simply fetch things from it.
2078 class tagscache:
2086 class tagscache:
2079 def __init__(self):
2087 def __init__(self):
2080 # These two define the set of tags for this repository. tags
2088 # These two define the set of tags for this repository. tags
2081 # maps tag name to node; tagtypes maps tag name to 'global' or
2089 # maps tag name to node; tagtypes maps tag name to 'global' or
2082 # 'local'. (Global tags are defined by .hgtags across all
2090 # 'local'. (Global tags are defined by .hgtags across all
2083 # heads, and local tags are defined in .hg/localtags.)
2091 # heads, and local tags are defined in .hg/localtags.)
2084 # They constitute the in-memory cache of tags.
2092 # They constitute the in-memory cache of tags.
2085 self.tags = self.tagtypes = None
2093 self.tags = self.tagtypes = None
2086
2094
2087 self.nodetagscache = self.tagslist = None
2095 self.nodetagscache = self.tagslist = None
2088
2096
2089 cache = tagscache()
2097 cache = tagscache()
2090 cache.tags, cache.tagtypes = self._findtags()
2098 cache.tags, cache.tagtypes = self._findtags()
2091
2099
2092 return cache
2100 return cache
2093
2101
2094 def tags(self):
2102 def tags(self):
2095 '''return a mapping of tag to node'''
2103 '''return a mapping of tag to node'''
2096 t = {}
2104 t = {}
2097 if self.changelog.filteredrevs:
2105 if self.changelog.filteredrevs:
2098 tags, tt = self._findtags()
2106 tags, tt = self._findtags()
2099 else:
2107 else:
2100 tags = self._tagscache.tags
2108 tags = self._tagscache.tags
2101 rev = self.changelog.rev
2109 rev = self.changelog.rev
2102 for k, v in tags.items():
2110 for k, v in tags.items():
2103 try:
2111 try:
2104 # ignore tags to unknown nodes
2112 # ignore tags to unknown nodes
2105 rev(v)
2113 rev(v)
2106 t[k] = v
2114 t[k] = v
2107 except (error.LookupError, ValueError):
2115 except (error.LookupError, ValueError):
2108 pass
2116 pass
2109 return t
2117 return t
2110
2118
2111 def _findtags(self):
2119 def _findtags(self):
2112 """Do the hard work of finding tags. Return a pair of dicts
2120 """Do the hard work of finding tags. Return a pair of dicts
2113 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2121 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2114 maps tag name to a string like \'global\' or \'local\'.
2122 maps tag name to a string like \'global\' or \'local\'.
2115 Subclasses or extensions are free to add their own tags, but
2123 Subclasses or extensions are free to add their own tags, but
2116 should be aware that the returned dicts will be retained for the
2124 should be aware that the returned dicts will be retained for the
2117 duration of the localrepo object."""
2125 duration of the localrepo object."""
2118
2126
2119 # XXX what tagtype should subclasses/extensions use? Currently
2127 # XXX what tagtype should subclasses/extensions use? Currently
2120 # mq and bookmarks add tags, but do not set the tagtype at all.
2128 # mq and bookmarks add tags, but do not set the tagtype at all.
2121 # Should each extension invent its own tag type? Should there
2129 # Should each extension invent its own tag type? Should there
2122 # be one tagtype for all such "virtual" tags? Or is the status
2130 # be one tagtype for all such "virtual" tags? Or is the status
2123 # quo fine?
2131 # quo fine?
2124
2132
2125 # map tag name to (node, hist)
2133 # map tag name to (node, hist)
2126 alltags = tagsmod.findglobaltags(self.ui, self)
2134 alltags = tagsmod.findglobaltags(self.ui, self)
2127 # map tag name to tag type
2135 # map tag name to tag type
2128 tagtypes = {tag: b'global' for tag in alltags}
2136 tagtypes = {tag: b'global' for tag in alltags}
2129
2137
2130 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2138 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2131
2139
2132 # Build the return dicts. Have to re-encode tag names because
2140 # Build the return dicts. Have to re-encode tag names because
2133 # the tags module always uses UTF-8 (in order not to lose info
2141 # the tags module always uses UTF-8 (in order not to lose info
2134 # writing to the cache), but the rest of Mercurial wants them in
2142 # writing to the cache), but the rest of Mercurial wants them in
2135 # local encoding.
2143 # local encoding.
2136 tags = {}
2144 tags = {}
2137 for name, (node, hist) in alltags.items():
2145 for name, (node, hist) in alltags.items():
2138 if node != self.nullid:
2146 if node != self.nullid:
2139 tags[encoding.tolocal(name)] = node
2147 tags[encoding.tolocal(name)] = node
2140 tags[b'tip'] = self.changelog.tip()
2148 tags[b'tip'] = self.changelog.tip()
2141 tagtypes = {
2149 tagtypes = {
2142 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2150 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2143 }
2151 }
2144 return (tags, tagtypes)
2152 return (tags, tagtypes)
2145
2153
2146 def tagtype(self, tagname):
2154 def tagtype(self, tagname):
2147 """
2155 """
2148 return the type of the given tag. result can be:
2156 return the type of the given tag. result can be:
2149
2157
2150 'local' : a local tag
2158 'local' : a local tag
2151 'global' : a global tag
2159 'global' : a global tag
2152 None : tag does not exist
2160 None : tag does not exist
2153 """
2161 """
2154
2162
2155 return self._tagscache.tagtypes.get(tagname)
2163 return self._tagscache.tagtypes.get(tagname)
2156
2164
2157 def tagslist(self):
2165 def tagslist(self):
2158 '''return a list of tags ordered by revision'''
2166 '''return a list of tags ordered by revision'''
2159 if not self._tagscache.tagslist:
2167 if not self._tagscache.tagslist:
2160 l = []
2168 l = []
2161 for t, n in self.tags().items():
2169 for t, n in self.tags().items():
2162 l.append((self.changelog.rev(n), t, n))
2170 l.append((self.changelog.rev(n), t, n))
2163 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2171 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2164
2172
2165 return self._tagscache.tagslist
2173 return self._tagscache.tagslist
2166
2174
2167 def nodetags(self, node):
2175 def nodetags(self, node):
2168 '''return the tags associated with a node'''
2176 '''return the tags associated with a node'''
2169 if not self._tagscache.nodetagscache:
2177 if not self._tagscache.nodetagscache:
2170 nodetagscache = {}
2178 nodetagscache = {}
2171 for t, n in self._tagscache.tags.items():
2179 for t, n in self._tagscache.tags.items():
2172 nodetagscache.setdefault(n, []).append(t)
2180 nodetagscache.setdefault(n, []).append(t)
2173 for tags in nodetagscache.values():
2181 for tags in nodetagscache.values():
2174 tags.sort()
2182 tags.sort()
2175 self._tagscache.nodetagscache = nodetagscache
2183 self._tagscache.nodetagscache = nodetagscache
2176 return self._tagscache.nodetagscache.get(node, [])
2184 return self._tagscache.nodetagscache.get(node, [])
2177
2185
2178 def nodebookmarks(self, node):
2186 def nodebookmarks(self, node):
2179 """return the list of bookmarks pointing to the specified node"""
2187 """return the list of bookmarks pointing to the specified node"""
2180 return self._bookmarks.names(node)
2188 return self._bookmarks.names(node)
2181
2189
2182 def branchmap(self):
2190 def branchmap(self):
2183 """returns a dictionary {branch: [branchheads]} with branchheads
2191 """returns a dictionary {branch: [branchheads]} with branchheads
2184 ordered by increasing revision number"""
2192 ordered by increasing revision number"""
2185 return self._branchcaches[self]
2193 return self._branchcaches[self]
2186
2194
2187 @unfilteredmethod
2195 @unfilteredmethod
2188 def revbranchcache(self):
2196 def revbranchcache(self):
2189 if not self._revbranchcache:
2197 if not self._revbranchcache:
2190 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2198 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2191 return self._revbranchcache
2199 return self._revbranchcache
2192
2200
2193 def register_changeset(self, rev, changelogrevision):
2201 def register_changeset(self, rev, changelogrevision):
2194 self.revbranchcache().setdata(rev, changelogrevision)
2202 self.revbranchcache().setdata(rev, changelogrevision)
2195
2203
2196 def branchtip(self, branch, ignoremissing=False):
2204 def branchtip(self, branch, ignoremissing=False):
2197 """return the tip node for a given branch
2205 """return the tip node for a given branch
2198
2206
2199 If ignoremissing is True, then this method will not raise an error.
2207 If ignoremissing is True, then this method will not raise an error.
2200 This is helpful for callers that only expect None for a missing branch
2208 This is helpful for callers that only expect None for a missing branch
2201 (e.g. namespace).
2209 (e.g. namespace).
2202
2210
2203 """
2211 """
2204 try:
2212 try:
2205 return self.branchmap().branchtip(branch)
2213 return self.branchmap().branchtip(branch)
2206 except KeyError:
2214 except KeyError:
2207 if not ignoremissing:
2215 if not ignoremissing:
2208 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2216 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2209 else:
2217 else:
2210 pass
2218 pass
2211
2219
2212 def lookup(self, key):
2220 def lookup(self, key):
2213 node = scmutil.revsymbol(self, key).node()
2221 node = scmutil.revsymbol(self, key).node()
2214 if node is None:
2222 if node is None:
2215 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2223 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2216 return node
2224 return node
2217
2225
2218 def lookupbranch(self, key):
2226 def lookupbranch(self, key):
2219 if self.branchmap().hasbranch(key):
2227 if self.branchmap().hasbranch(key):
2220 return key
2228 return key
2221
2229
2222 return scmutil.revsymbol(self, key).branch()
2230 return scmutil.revsymbol(self, key).branch()
2223
2231
2224 def known(self, nodes):
2232 def known(self, nodes):
2225 cl = self.changelog
2233 cl = self.changelog
2226 get_rev = cl.index.get_rev
2234 get_rev = cl.index.get_rev
2227 filtered = cl.filteredrevs
2235 filtered = cl.filteredrevs
2228 result = []
2236 result = []
2229 for n in nodes:
2237 for n in nodes:
2230 r = get_rev(n)
2238 r = get_rev(n)
2231 resp = not (r is None or r in filtered)
2239 resp = not (r is None or r in filtered)
2232 result.append(resp)
2240 result.append(resp)
2233 return result
2241 return result
2234
2242
2235 def local(self):
2243 def local(self):
2236 return self
2244 return self
2237
2245
2238 def publishing(self):
2246 def publishing(self):
2239 # it's safe (and desirable) to trust the publish flag unconditionally
2247 # it's safe (and desirable) to trust the publish flag unconditionally
2240 # so that we don't finalize changes shared between users via ssh or nfs
2248 # so that we don't finalize changes shared between users via ssh or nfs
2241 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2249 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2242
2250
2243 def cancopy(self):
2251 def cancopy(self):
2244 # so statichttprepo's override of local() works
2252 # so statichttprepo's override of local() works
2245 if not self.local():
2253 if not self.local():
2246 return False
2254 return False
2247 if not self.publishing():
2255 if not self.publishing():
2248 return True
2256 return True
2249 # if publishing we can't copy if there is filtered content
2257 # if publishing we can't copy if there is filtered content
2250 return not self.filtered(b'visible').changelog.filteredrevs
2258 return not self.filtered(b'visible').changelog.filteredrevs
2251
2259
2252 def shared(self):
2260 def shared(self):
2253 '''the type of shared repository (None if not shared)'''
2261 '''the type of shared repository (None if not shared)'''
2254 if self.sharedpath != self.path:
2262 if self.sharedpath != self.path:
2255 return b'store'
2263 return b'store'
2256 return None
2264 return None
2257
2265
2258 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2266 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2259 return self.vfs.reljoin(self.root, f, *insidef)
2267 return self.vfs.reljoin(self.root, f, *insidef)
2260
2268
2261 def setparents(self, p1, p2=None):
2269 def setparents(self, p1, p2=None):
2262 if p2 is None:
2270 if p2 is None:
2263 p2 = self.nullid
2271 p2 = self.nullid
2264 self[None].setparents(p1, p2)
2272 self[None].setparents(p1, p2)
2265 self._quick_access_changeid_invalidate()
2273 self._quick_access_changeid_invalidate()
2266
2274
2267 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2275 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2268 """changeid must be a changeset revision, if specified.
2276 """changeid must be a changeset revision, if specified.
2269 fileid can be a file revision or node."""
2277 fileid can be a file revision or node."""
2270 return context.filectx(
2278 return context.filectx(
2271 self, path, changeid, fileid, changectx=changectx
2279 self, path, changeid, fileid, changectx=changectx
2272 )
2280 )
2273
2281
2274 def getcwd(self) -> bytes:
2282 def getcwd(self) -> bytes:
2275 return self.dirstate.getcwd()
2283 return self.dirstate.getcwd()
2276
2284
2277 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2285 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2278 return self.dirstate.pathto(f, cwd)
2286 return self.dirstate.pathto(f, cwd)
2279
2287
2280 def _loadfilter(self, filter):
2288 def _loadfilter(self, filter):
2281 if filter not in self._filterpats:
2289 if filter not in self._filterpats:
2282 l = []
2290 l = []
2283 for pat, cmd in self.ui.configitems(filter):
2291 for pat, cmd in self.ui.configitems(filter):
2284 if cmd == b'!':
2292 if cmd == b'!':
2285 continue
2293 continue
2286 mf = matchmod.match(self.root, b'', [pat])
2294 mf = matchmod.match(self.root, b'', [pat])
2287 fn = None
2295 fn = None
2288 params = cmd
2296 params = cmd
2289 for name, filterfn in self._datafilters.items():
2297 for name, filterfn in self._datafilters.items():
2290 if cmd.startswith(name):
2298 if cmd.startswith(name):
2291 fn = filterfn
2299 fn = filterfn
2292 params = cmd[len(name) :].lstrip()
2300 params = cmd[len(name) :].lstrip()
2293 break
2301 break
2294 if not fn:
2302 if not fn:
2295 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2303 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2296 fn.__name__ = 'commandfilter'
2304 fn.__name__ = 'commandfilter'
2297 # Wrap old filters not supporting keyword arguments
2305 # Wrap old filters not supporting keyword arguments
2298 if not pycompat.getargspec(fn)[2]:
2306 if not pycompat.getargspec(fn)[2]:
2299 oldfn = fn
2307 oldfn = fn
2300 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2308 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2301 fn.__name__ = 'compat-' + oldfn.__name__
2309 fn.__name__ = 'compat-' + oldfn.__name__
2302 l.append((mf, fn, params))
2310 l.append((mf, fn, params))
2303 self._filterpats[filter] = l
2311 self._filterpats[filter] = l
2304 return self._filterpats[filter]
2312 return self._filterpats[filter]
2305
2313
2306 def _filter(self, filterpats, filename, data):
2314 def _filter(self, filterpats, filename, data):
2307 for mf, fn, cmd in filterpats:
2315 for mf, fn, cmd in filterpats:
2308 if mf(filename):
2316 if mf(filename):
2309 self.ui.debug(
2317 self.ui.debug(
2310 b"filtering %s through %s\n"
2318 b"filtering %s through %s\n"
2311 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2319 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2312 )
2320 )
2313 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2321 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2314 break
2322 break
2315
2323
2316 return data
2324 return data
2317
2325
2318 @unfilteredpropertycache
2326 @unfilteredpropertycache
2319 def _encodefilterpats(self):
2327 def _encodefilterpats(self):
2320 return self._loadfilter(b'encode')
2328 return self._loadfilter(b'encode')
2321
2329
2322 @unfilteredpropertycache
2330 @unfilteredpropertycache
2323 def _decodefilterpats(self):
2331 def _decodefilterpats(self):
2324 return self._loadfilter(b'decode')
2332 return self._loadfilter(b'decode')
2325
2333
2326 def adddatafilter(self, name, filter):
2334 def adddatafilter(self, name, filter):
2327 self._datafilters[name] = filter
2335 self._datafilters[name] = filter
2328
2336
2329 def wread(self, filename: bytes) -> bytes:
2337 def wread(self, filename: bytes) -> bytes:
2330 if self.wvfs.islink(filename):
2338 if self.wvfs.islink(filename):
2331 data = self.wvfs.readlink(filename)
2339 data = self.wvfs.readlink(filename)
2332 else:
2340 else:
2333 data = self.wvfs.read(filename)
2341 data = self.wvfs.read(filename)
2334 return self._filter(self._encodefilterpats, filename, data)
2342 return self._filter(self._encodefilterpats, filename, data)
2335
2343
2336 def wwrite(
2344 def wwrite(
2337 self,
2345 self,
2338 filename: bytes,
2346 filename: bytes,
2339 data: bytes,
2347 data: bytes,
2340 flags: bytes,
2348 flags: bytes,
2341 backgroundclose=False,
2349 backgroundclose=False,
2342 **kwargs
2350 **kwargs
2343 ) -> int:
2351 ) -> int:
2344 """write ``data`` into ``filename`` in the working directory
2352 """write ``data`` into ``filename`` in the working directory
2345
2353
2346 This returns length of written (maybe decoded) data.
2354 This returns length of written (maybe decoded) data.
2347 """
2355 """
2348 data = self._filter(self._decodefilterpats, filename, data)
2356 data = self._filter(self._decodefilterpats, filename, data)
2349 if b'l' in flags:
2357 if b'l' in flags:
2350 self.wvfs.symlink(data, filename)
2358 self.wvfs.symlink(data, filename)
2351 else:
2359 else:
2352 self.wvfs.write(
2360 self.wvfs.write(
2353 filename, data, backgroundclose=backgroundclose, **kwargs
2361 filename, data, backgroundclose=backgroundclose, **kwargs
2354 )
2362 )
2355 if b'x' in flags:
2363 if b'x' in flags:
2356 self.wvfs.setflags(filename, False, True)
2364 self.wvfs.setflags(filename, False, True)
2357 else:
2365 else:
2358 self.wvfs.setflags(filename, False, False)
2366 self.wvfs.setflags(filename, False, False)
2359 return len(data)
2367 return len(data)
2360
2368
2361 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2369 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2362 return self._filter(self._decodefilterpats, filename, data)
2370 return self._filter(self._decodefilterpats, filename, data)
2363
2371
2364 def currenttransaction(self):
2372 def currenttransaction(self):
2365 """return the current transaction or None if non exists"""
2373 """return the current transaction or None if non exists"""
2366 if self._transref:
2374 if self._transref:
2367 tr = self._transref()
2375 tr = self._transref()
2368 else:
2376 else:
2369 tr = None
2377 tr = None
2370
2378
2371 if tr and tr.running():
2379 if tr and tr.running():
2372 return tr
2380 return tr
2373 return None
2381 return None
2374
2382
2375 def transaction(self, desc, report=None):
2383 def transaction(self, desc, report=None):
2376 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2384 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2377 b'devel', b'check-locks'
2385 b'devel', b'check-locks'
2378 ):
2386 ):
2379 if self._currentlock(self._lockref) is None:
2387 if self._currentlock(self._lockref) is None:
2380 raise error.ProgrammingError(b'transaction requires locking')
2388 raise error.ProgrammingError(b'transaction requires locking')
2381 tr = self.currenttransaction()
2389 tr = self.currenttransaction()
2382 if tr is not None:
2390 if tr is not None:
2383 return tr.nest(name=desc)
2391 return tr.nest(name=desc)
2384
2392
2385 # abort here if the journal already exists
2393 # abort here if the journal already exists
2386 if self.svfs.exists(b"journal"):
2394 if self.svfs.exists(b"journal"):
2387 raise error.RepoError(
2395 raise error.RepoError(
2388 _(b"abandoned transaction found"),
2396 _(b"abandoned transaction found"),
2389 hint=_(b"run 'hg recover' to clean up transaction"),
2397 hint=_(b"run 'hg recover' to clean up transaction"),
2390 )
2398 )
2391
2399
2392 # At that point your dirstate should be clean:
2400 # At that point your dirstate should be clean:
2393 #
2401 #
2394 # - If you don't have the wlock, why would you still have a dirty
2402 # - If you don't have the wlock, why would you still have a dirty
2395 # dirstate ?
2403 # dirstate ?
2396 #
2404 #
2397 # - If you hold the wlock, you should not be opening a transaction in
2405 # - If you hold the wlock, you should not be opening a transaction in
2398 # the middle of a `distate.changing_*` block. The transaction needs to
2406 # the middle of a `distate.changing_*` block. The transaction needs to
2399 # be open before that and wrap the change-context.
2407 # be open before that and wrap the change-context.
2400 #
2408 #
2401 # - If you are not within a `dirstate.changing_*` context, why is our
2409 # - If you are not within a `dirstate.changing_*` context, why is our
2402 # dirstate dirty?
2410 # dirstate dirty?
2403 if self.dirstate._dirty:
2411 if self.dirstate._dirty:
2404 m = "cannot open a transaction with a dirty dirstate"
2412 m = "cannot open a transaction with a dirty dirstate"
2405 raise error.ProgrammingError(m)
2413 raise error.ProgrammingError(m)
2406
2414
2407 idbase = b"%.40f#%f" % (random.random(), time.time())
2415 idbase = b"%.40f#%f" % (random.random(), time.time())
2408 ha = hex(hashutil.sha1(idbase).digest())
2416 ha = hex(hashutil.sha1(idbase).digest())
2409 txnid = b'TXN:' + ha
2417 txnid = b'TXN:' + ha
2410 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2418 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2411
2419
2412 self._writejournal(desc)
2420 self._writejournal(desc)
2413 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2421 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2414 if report:
2422 if report:
2415 rp = report
2423 rp = report
2416 else:
2424 else:
2417 rp = self.ui.warn
2425 rp = self.ui.warn
2418 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2426 vfsmap = self.vfs_map
2419 # we must avoid cyclic reference between repo and transaction.
2427 # we must avoid cyclic reference between repo and transaction.
2420 reporef = weakref.ref(self)
2428 reporef = weakref.ref(self)
2421 # Code to track tag movement
2429 # Code to track tag movement
2422 #
2430 #
2423 # Since tags are all handled as file content, it is actually quite hard
2431 # Since tags are all handled as file content, it is actually quite hard
2424 # to track these movement from a code perspective. So we fallback to a
2432 # to track these movement from a code perspective. So we fallback to a
2425 # tracking at the repository level. One could envision to track changes
2433 # tracking at the repository level. One could envision to track changes
2426 # to the '.hgtags' file through changegroup apply but that fails to
2434 # to the '.hgtags' file through changegroup apply but that fails to
2427 # cope with case where transaction expose new heads without changegroup
2435 # cope with case where transaction expose new heads without changegroup
2428 # being involved (eg: phase movement).
2436 # being involved (eg: phase movement).
2429 #
2437 #
2430 # For now, We gate the feature behind a flag since this likely comes
2438 # For now, We gate the feature behind a flag since this likely comes
2431 # with performance impacts. The current code run more often than needed
2439 # with performance impacts. The current code run more often than needed
2432 # and do not use caches as much as it could. The current focus is on
2440 # and do not use caches as much as it could. The current focus is on
2433 # the behavior of the feature so we disable it by default. The flag
2441 # the behavior of the feature so we disable it by default. The flag
2434 # will be removed when we are happy with the performance impact.
2442 # will be removed when we are happy with the performance impact.
2435 #
2443 #
2436 # Once this feature is no longer experimental move the following
2444 # Once this feature is no longer experimental move the following
2437 # documentation to the appropriate help section:
2445 # documentation to the appropriate help section:
2438 #
2446 #
2439 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2447 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2440 # tags (new or changed or deleted tags). In addition the details of
2448 # tags (new or changed or deleted tags). In addition the details of
2441 # these changes are made available in a file at:
2449 # these changes are made available in a file at:
2442 # ``REPOROOT/.hg/changes/tags.changes``.
2450 # ``REPOROOT/.hg/changes/tags.changes``.
2443 # Make sure you check for HG_TAG_MOVED before reading that file as it
2451 # Make sure you check for HG_TAG_MOVED before reading that file as it
2444 # might exist from a previous transaction even if no tag were touched
2452 # might exist from a previous transaction even if no tag were touched
2445 # in this one. Changes are recorded in a line base format::
2453 # in this one. Changes are recorded in a line base format::
2446 #
2454 #
2447 # <action> <hex-node> <tag-name>\n
2455 # <action> <hex-node> <tag-name>\n
2448 #
2456 #
2449 # Actions are defined as follow:
2457 # Actions are defined as follow:
2450 # "-R": tag is removed,
2458 # "-R": tag is removed,
2451 # "+A": tag is added,
2459 # "+A": tag is added,
2452 # "-M": tag is moved (old value),
2460 # "-M": tag is moved (old value),
2453 # "+M": tag is moved (new value),
2461 # "+M": tag is moved (new value),
2454 tracktags = lambda x: None
2462 tracktags = lambda x: None
2455 # experimental config: experimental.hook-track-tags
2463 # experimental config: experimental.hook-track-tags
2456 shouldtracktags = self.ui.configbool(
2464 shouldtracktags = self.ui.configbool(
2457 b'experimental', b'hook-track-tags'
2465 b'experimental', b'hook-track-tags'
2458 )
2466 )
2459 if desc != b'strip' and shouldtracktags:
2467 if desc != b'strip' and shouldtracktags:
2460 oldheads = self.changelog.headrevs()
2468 oldheads = self.changelog.headrevs()
2461
2469
2462 def tracktags(tr2):
2470 def tracktags(tr2):
2463 repo = reporef()
2471 repo = reporef()
2464 assert repo is not None # help pytype
2472 assert repo is not None # help pytype
2465 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2473 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2466 newheads = repo.changelog.headrevs()
2474 newheads = repo.changelog.headrevs()
2467 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2475 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2468 # notes: we compare lists here.
2476 # notes: we compare lists here.
2469 # As we do it only once buiding set would not be cheaper
2477 # As we do it only once buiding set would not be cheaper
2470 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2478 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2471 if changes:
2479 if changes:
2472 tr2.hookargs[b'tag_moved'] = b'1'
2480 tr2.hookargs[b'tag_moved'] = b'1'
2473 with repo.vfs(
2481 with repo.vfs(
2474 b'changes/tags.changes', b'w', atomictemp=True
2482 b'changes/tags.changes', b'w', atomictemp=True
2475 ) as changesfile:
2483 ) as changesfile:
2476 # note: we do not register the file to the transaction
2484 # note: we do not register the file to the transaction
2477 # because we needs it to still exist on the transaction
2485 # because we needs it to still exist on the transaction
2478 # is close (for txnclose hooks)
2486 # is close (for txnclose hooks)
2479 tagsmod.writediff(changesfile, changes)
2487 tagsmod.writediff(changesfile, changes)
2480
2488
2481 def validate(tr2):
2489 def validate(tr2):
2482 """will run pre-closing hooks"""
2490 """will run pre-closing hooks"""
2483 # XXX the transaction API is a bit lacking here so we take a hacky
2491 # XXX the transaction API is a bit lacking here so we take a hacky
2484 # path for now
2492 # path for now
2485 #
2493 #
2486 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2494 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2487 # dict is copied before these run. In addition we needs the data
2495 # dict is copied before these run. In addition we needs the data
2488 # available to in memory hooks too.
2496 # available to in memory hooks too.
2489 #
2497 #
2490 # Moreover, we also need to make sure this runs before txnclose
2498 # Moreover, we also need to make sure this runs before txnclose
2491 # hooks and there is no "pending" mechanism that would execute
2499 # hooks and there is no "pending" mechanism that would execute
2492 # logic only if hooks are about to run.
2500 # logic only if hooks are about to run.
2493 #
2501 #
2494 # Fixing this limitation of the transaction is also needed to track
2502 # Fixing this limitation of the transaction is also needed to track
2495 # other families of changes (bookmarks, phases, obsolescence).
2503 # other families of changes (bookmarks, phases, obsolescence).
2496 #
2504 #
2497 # This will have to be fixed before we remove the experimental
2505 # This will have to be fixed before we remove the experimental
2498 # gating.
2506 # gating.
2499 tracktags(tr2)
2507 tracktags(tr2)
2500 repo = reporef()
2508 repo = reporef()
2501 assert repo is not None # help pytype
2509 assert repo is not None # help pytype
2502
2510
2503 singleheadopt = (b'experimental', b'single-head-per-branch')
2511 singleheadopt = (b'experimental', b'single-head-per-branch')
2504 singlehead = repo.ui.configbool(*singleheadopt)
2512 singlehead = repo.ui.configbool(*singleheadopt)
2505 if singlehead:
2513 if singlehead:
2506 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2514 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2507 accountclosed = singleheadsub.get(
2515 accountclosed = singleheadsub.get(
2508 b"account-closed-heads", False
2516 b"account-closed-heads", False
2509 )
2517 )
2510 if singleheadsub.get(b"public-changes-only", False):
2518 if singleheadsub.get(b"public-changes-only", False):
2511 filtername = b"immutable"
2519 filtername = b"immutable"
2512 else:
2520 else:
2513 filtername = b"visible"
2521 filtername = b"visible"
2514 scmutil.enforcesinglehead(
2522 scmutil.enforcesinglehead(
2515 repo, tr2, desc, accountclosed, filtername
2523 repo, tr2, desc, accountclosed, filtername
2516 )
2524 )
2517 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2525 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2518 for name, (old, new) in sorted(
2526 for name, (old, new) in sorted(
2519 tr.changes[b'bookmarks'].items()
2527 tr.changes[b'bookmarks'].items()
2520 ):
2528 ):
2521 args = tr.hookargs.copy()
2529 args = tr.hookargs.copy()
2522 args.update(bookmarks.preparehookargs(name, old, new))
2530 args.update(bookmarks.preparehookargs(name, old, new))
2523 repo.hook(
2531 repo.hook(
2524 b'pretxnclose-bookmark',
2532 b'pretxnclose-bookmark',
2525 throw=True,
2533 throw=True,
2526 **pycompat.strkwargs(args)
2534 **pycompat.strkwargs(args)
2527 )
2535 )
2528 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2536 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2529 cl = repo.unfiltered().changelog
2537 cl = repo.unfiltered().changelog
2530 for revs, (old, new) in tr.changes[b'phases']:
2538 for revs, (old, new) in tr.changes[b'phases']:
2531 for rev in revs:
2539 for rev in revs:
2532 args = tr.hookargs.copy()
2540 args = tr.hookargs.copy()
2533 node = hex(cl.node(rev))
2541 node = hex(cl.node(rev))
2534 args.update(phases.preparehookargs(node, old, new))
2542 args.update(phases.preparehookargs(node, old, new))
2535 repo.hook(
2543 repo.hook(
2536 b'pretxnclose-phase',
2544 b'pretxnclose-phase',
2537 throw=True,
2545 throw=True,
2538 **pycompat.strkwargs(args)
2546 **pycompat.strkwargs(args)
2539 )
2547 )
2540
2548
2541 repo.hook(
2549 repo.hook(
2542 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2550 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2543 )
2551 )
2544
2552
2545 def releasefn(tr, success):
2553 def releasefn(tr, success):
2546 repo = reporef()
2554 repo = reporef()
2547 if repo is None:
2555 if repo is None:
2548 # If the repo has been GC'd (and this release function is being
2556 # If the repo has been GC'd (and this release function is being
2549 # called from transaction.__del__), there's not much we can do,
2557 # called from transaction.__del__), there's not much we can do,
2550 # so just leave the unfinished transaction there and let the
2558 # so just leave the unfinished transaction there and let the
2551 # user run `hg recover`.
2559 # user run `hg recover`.
2552 return
2560 return
2553 if success:
2561 if success:
2554 # this should be explicitly invoked here, because
2562 # this should be explicitly invoked here, because
2555 # in-memory changes aren't written out at closing
2563 # in-memory changes aren't written out at closing
2556 # transaction, if tr.addfilegenerator (via
2564 # transaction, if tr.addfilegenerator (via
2557 # dirstate.write or so) isn't invoked while
2565 # dirstate.write or so) isn't invoked while
2558 # transaction running
2566 # transaction running
2559 repo.dirstate.write(None)
2567 repo.dirstate.write(None)
2560 else:
2568 else:
2561 # discard all changes (including ones already written
2569 # discard all changes (including ones already written
2562 # out) in this transaction
2570 # out) in this transaction
2563 repo.invalidate(clearfilecache=True)
2571 repo.invalidate(clearfilecache=True)
2564
2572
2565 tr = transaction.transaction(
2573 tr = transaction.transaction(
2566 rp,
2574 rp,
2567 self.svfs,
2575 self.svfs,
2568 vfsmap,
2576 vfsmap,
2569 b"journal",
2577 b"journal",
2570 b"undo",
2578 b"undo",
2571 aftertrans(renames),
2579 aftertrans(renames),
2572 self.store.createmode,
2580 self.store.createmode,
2573 validator=validate,
2581 validator=validate,
2574 releasefn=releasefn,
2582 releasefn=releasefn,
2575 checkambigfiles=_cachedfiles,
2583 checkambigfiles=_cachedfiles,
2576 name=desc,
2584 name=desc,
2577 )
2585 )
2578 tr.changes[b'origrepolen'] = len(self)
2586 tr.changes[b'origrepolen'] = len(self)
2579 tr.changes[b'obsmarkers'] = set()
2587 tr.changes[b'obsmarkers'] = set()
2580 tr.changes[b'phases'] = []
2588 tr.changes[b'phases'] = []
2581 tr.changes[b'bookmarks'] = {}
2589 tr.changes[b'bookmarks'] = {}
2582
2590
2583 tr.hookargs[b'txnid'] = txnid
2591 tr.hookargs[b'txnid'] = txnid
2584 tr.hookargs[b'txnname'] = desc
2592 tr.hookargs[b'txnname'] = desc
2585 tr.hookargs[b'changes'] = tr.changes
2593 tr.hookargs[b'changes'] = tr.changes
2586 # note: writing the fncache only during finalize mean that the file is
2594 # note: writing the fncache only during finalize mean that the file is
2587 # outdated when running hooks. As fncache is used for streaming clone,
2595 # outdated when running hooks. As fncache is used for streaming clone,
2588 # this is not expected to break anything that happen during the hooks.
2596 # this is not expected to break anything that happen during the hooks.
2589 tr.addfinalize(b'flush-fncache', self.store.write)
2597 tr.addfinalize(b'flush-fncache', self.store.write)
2590
2598
2591 def txnclosehook(tr2):
2599 def txnclosehook(tr2):
2592 """To be run if transaction is successful, will schedule a hook run"""
2600 """To be run if transaction is successful, will schedule a hook run"""
2593 # Don't reference tr2 in hook() so we don't hold a reference.
2601 # Don't reference tr2 in hook() so we don't hold a reference.
2594 # This reduces memory consumption when there are multiple
2602 # This reduces memory consumption when there are multiple
2595 # transactions per lock. This can likely go away if issue5045
2603 # transactions per lock. This can likely go away if issue5045
2596 # fixes the function accumulation.
2604 # fixes the function accumulation.
2597 hookargs = tr2.hookargs
2605 hookargs = tr2.hookargs
2598
2606
2599 def hookfunc(unused_success):
2607 def hookfunc(unused_success):
2600 repo = reporef()
2608 repo = reporef()
2601 assert repo is not None # help pytype
2609 assert repo is not None # help pytype
2602
2610
2603 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2611 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2604 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2612 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2605 for name, (old, new) in bmchanges:
2613 for name, (old, new) in bmchanges:
2606 args = tr.hookargs.copy()
2614 args = tr.hookargs.copy()
2607 args.update(bookmarks.preparehookargs(name, old, new))
2615 args.update(bookmarks.preparehookargs(name, old, new))
2608 repo.hook(
2616 repo.hook(
2609 b'txnclose-bookmark',
2617 b'txnclose-bookmark',
2610 throw=False,
2618 throw=False,
2611 **pycompat.strkwargs(args)
2619 **pycompat.strkwargs(args)
2612 )
2620 )
2613
2621
2614 if hook.hashook(repo.ui, b'txnclose-phase'):
2622 if hook.hashook(repo.ui, b'txnclose-phase'):
2615 cl = repo.unfiltered().changelog
2623 cl = repo.unfiltered().changelog
2616 phasemv = sorted(
2624 phasemv = sorted(
2617 tr.changes[b'phases'], key=lambda r: r[0][0]
2625 tr.changes[b'phases'], key=lambda r: r[0][0]
2618 )
2626 )
2619 for revs, (old, new) in phasemv:
2627 for revs, (old, new) in phasemv:
2620 for rev in revs:
2628 for rev in revs:
2621 args = tr.hookargs.copy()
2629 args = tr.hookargs.copy()
2622 node = hex(cl.node(rev))
2630 node = hex(cl.node(rev))
2623 args.update(phases.preparehookargs(node, old, new))
2631 args.update(phases.preparehookargs(node, old, new))
2624 repo.hook(
2632 repo.hook(
2625 b'txnclose-phase',
2633 b'txnclose-phase',
2626 throw=False,
2634 throw=False,
2627 **pycompat.strkwargs(args)
2635 **pycompat.strkwargs(args)
2628 )
2636 )
2629
2637
2630 repo.hook(
2638 repo.hook(
2631 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2639 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2632 )
2640 )
2633
2641
2634 repo = reporef()
2642 repo = reporef()
2635 assert repo is not None # help pytype
2643 assert repo is not None # help pytype
2636 repo._afterlock(hookfunc)
2644 repo._afterlock(hookfunc)
2637
2645
2638 tr.addfinalize(b'txnclose-hook', txnclosehook)
2646 tr.addfinalize(b'txnclose-hook', txnclosehook)
2639 # Include a leading "-" to make it happen before the transaction summary
2647 # Include a leading "-" to make it happen before the transaction summary
2640 # reports registered via scmutil.registersummarycallback() whose names
2648 # reports registered via scmutil.registersummarycallback() whose names
2641 # are 00-txnreport etc. That way, the caches will be warm when the
2649 # are 00-txnreport etc. That way, the caches will be warm when the
2642 # callbacks run.
2650 # callbacks run.
2643 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2651 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2644
2652
2645 def txnaborthook(tr2):
2653 def txnaborthook(tr2):
2646 """To be run if transaction is aborted"""
2654 """To be run if transaction is aborted"""
2647 repo = reporef()
2655 repo = reporef()
2648 assert repo is not None # help pytype
2656 assert repo is not None # help pytype
2649 repo.hook(
2657 repo.hook(
2650 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2658 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2651 )
2659 )
2652
2660
2653 tr.addabort(b'txnabort-hook', txnaborthook)
2661 tr.addabort(b'txnabort-hook', txnaborthook)
2654 # avoid eager cache invalidation. in-memory data should be identical
2662 # avoid eager cache invalidation. in-memory data should be identical
2655 # to stored data if transaction has no error.
2663 # to stored data if transaction has no error.
2656 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2664 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2657 self._transref = weakref.ref(tr)
2665 self._transref = weakref.ref(tr)
2658 scmutil.registersummarycallback(self, tr, desc)
2666 scmutil.registersummarycallback(self, tr, desc)
2659 # This only exist to deal with the need of rollback to have viable
2667 # This only exist to deal with the need of rollback to have viable
2660 # parents at the end of the operation. So backup viable parents at the
2668 # parents at the end of the operation. So backup viable parents at the
2661 # time of this operation.
2669 # time of this operation.
2662 #
2670 #
2663 # We only do it when the `wlock` is taken, otherwise other might be
2671 # We only do it when the `wlock` is taken, otherwise other might be
2664 # altering the dirstate under us.
2672 # altering the dirstate under us.
2665 #
2673 #
2666 # This is really not a great way to do this (first, because we cannot
2674 # This is really not a great way to do this (first, because we cannot
2667 # always do it). There are more viable alternative that exists
2675 # always do it). There are more viable alternative that exists
2668 #
2676 #
2669 # - backing only the working copy parent in a dedicated files and doing
2677 # - backing only the working copy parent in a dedicated files and doing
2670 # a clean "keep-update" to them on `hg rollback`.
2678 # a clean "keep-update" to them on `hg rollback`.
2671 #
2679 #
2672 # - slightly changing the behavior an applying a logic similar to "hg
2680 # - slightly changing the behavior an applying a logic similar to "hg
2673 # strip" to pick a working copy destination on `hg rollback`
2681 # strip" to pick a working copy destination on `hg rollback`
2674 if self.currentwlock() is not None:
2682 if self.currentwlock() is not None:
2675 ds = self.dirstate
2683 ds = self.dirstate
2676 if not self.vfs.exists(b'branch'):
2684 if not self.vfs.exists(b'branch'):
2677 # force a file to be written if None exist
2685 # force a file to be written if None exist
2678 ds.setbranch(b'default', None)
2686 ds.setbranch(b'default', None)
2679
2687
2680 def backup_dirstate(tr):
2688 def backup_dirstate(tr):
2681 for f in ds.all_file_names():
2689 for f in ds.all_file_names():
2682 # hardlink backup is okay because `dirstate` is always
2690 # hardlink backup is okay because `dirstate` is always
2683 # atomically written and possible data file are append only
2691 # atomically written and possible data file are append only
2684 # and resistant to trailing data.
2692 # and resistant to trailing data.
2685 tr.addbackup(f, hardlink=True, location=b'plain')
2693 tr.addbackup(f, hardlink=True, location=b'plain')
2686
2694
2687 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2695 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2688 return tr
2696 return tr
2689
2697
2690 def _journalfiles(self):
2698 def _journalfiles(self):
2691 return (
2699 return (
2692 (self.svfs, b'journal'),
2700 (self.svfs, b'journal'),
2693 (self.vfs, b'journal.desc'),
2701 (self.vfs, b'journal.desc'),
2694 )
2702 )
2695
2703
2696 def undofiles(self):
2704 def undofiles(self):
2697 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2705 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2698
2706
2699 @unfilteredmethod
2707 @unfilteredmethod
2700 def _writejournal(self, desc):
2708 def _writejournal(self, desc):
2701 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2709 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2702
2710
2703 def recover(self):
2711 def recover(self):
2704 with self.lock():
2712 with self.lock():
2705 if self.svfs.exists(b"journal"):
2713 if self.svfs.exists(b"journal"):
2706 self.ui.status(_(b"rolling back interrupted transaction\n"))
2714 self.ui.status(_(b"rolling back interrupted transaction\n"))
2707 vfsmap = {
2715 vfsmap = self.vfs_map
2708 b'': self.svfs,
2709 b'plain': self.vfs,
2710 }
2711 transaction.rollback(
2716 transaction.rollback(
2712 self.svfs,
2717 self.svfs,
2713 vfsmap,
2718 vfsmap,
2714 b"journal",
2719 b"journal",
2715 self.ui.warn,
2720 self.ui.warn,
2716 checkambigfiles=_cachedfiles,
2721 checkambigfiles=_cachedfiles,
2717 )
2722 )
2718 self.invalidate()
2723 self.invalidate()
2719 return True
2724 return True
2720 else:
2725 else:
2721 self.ui.warn(_(b"no interrupted transaction available\n"))
2726 self.ui.warn(_(b"no interrupted transaction available\n"))
2722 return False
2727 return False
2723
2728
2724 def rollback(self, dryrun=False, force=False):
2729 def rollback(self, dryrun=False, force=False):
2725 wlock = lock = None
2730 wlock = lock = None
2726 try:
2731 try:
2727 wlock = self.wlock()
2732 wlock = self.wlock()
2728 lock = self.lock()
2733 lock = self.lock()
2729 if self.svfs.exists(b"undo"):
2734 if self.svfs.exists(b"undo"):
2730 return self._rollback(dryrun, force)
2735 return self._rollback(dryrun, force)
2731 else:
2736 else:
2732 self.ui.warn(_(b"no rollback information available\n"))
2737 self.ui.warn(_(b"no rollback information available\n"))
2733 return 1
2738 return 1
2734 finally:
2739 finally:
2735 release(lock, wlock)
2740 release(lock, wlock)
2736
2741
2737 @unfilteredmethod # Until we get smarter cache management
2742 @unfilteredmethod # Until we get smarter cache management
2738 def _rollback(self, dryrun, force):
2743 def _rollback(self, dryrun, force):
2739 ui = self.ui
2744 ui = self.ui
2740
2745
2741 parents = self.dirstate.parents()
2746 parents = self.dirstate.parents()
2742 try:
2747 try:
2743 args = self.vfs.read(b'undo.desc').splitlines()
2748 args = self.vfs.read(b'undo.desc').splitlines()
2744 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2749 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2745 if len(args) >= 3:
2750 if len(args) >= 3:
2746 detail = args[2]
2751 detail = args[2]
2747 oldtip = oldlen - 1
2752 oldtip = oldlen - 1
2748
2753
2749 if detail and ui.verbose:
2754 if detail and ui.verbose:
2750 msg = _(
2755 msg = _(
2751 b'repository tip rolled back to revision %d'
2756 b'repository tip rolled back to revision %d'
2752 b' (undo %s: %s)\n'
2757 b' (undo %s: %s)\n'
2753 ) % (oldtip, desc, detail)
2758 ) % (oldtip, desc, detail)
2754 else:
2759 else:
2755 msg = _(
2760 msg = _(
2756 b'repository tip rolled back to revision %d (undo %s)\n'
2761 b'repository tip rolled back to revision %d (undo %s)\n'
2757 ) % (oldtip, desc)
2762 ) % (oldtip, desc)
2758 parentgone = any(self[p].rev() > oldtip for p in parents)
2763 parentgone = any(self[p].rev() > oldtip for p in parents)
2759 except IOError:
2764 except IOError:
2760 msg = _(b'rolling back unknown transaction\n')
2765 msg = _(b'rolling back unknown transaction\n')
2761 desc = None
2766 desc = None
2762 parentgone = True
2767 parentgone = True
2763
2768
2764 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2769 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2765 raise error.Abort(
2770 raise error.Abort(
2766 _(
2771 _(
2767 b'rollback of last commit while not checked out '
2772 b'rollback of last commit while not checked out '
2768 b'may lose data'
2773 b'may lose data'
2769 ),
2774 ),
2770 hint=_(b'use -f to force'),
2775 hint=_(b'use -f to force'),
2771 )
2776 )
2772
2777
2773 ui.status(msg)
2778 ui.status(msg)
2774 if dryrun:
2779 if dryrun:
2775 return 0
2780 return 0
2776
2781
2777 self.destroying()
2782 self.destroying()
2778 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2783 vfsmap = self.vfs_map
2779 skip_journal_pattern = None
2784 skip_journal_pattern = None
2780 if not parentgone:
2785 if not parentgone:
2781 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2786 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2782 transaction.rollback(
2787 transaction.rollback(
2783 self.svfs,
2788 self.svfs,
2784 vfsmap,
2789 vfsmap,
2785 b'undo',
2790 b'undo',
2786 ui.warn,
2791 ui.warn,
2787 checkambigfiles=_cachedfiles,
2792 checkambigfiles=_cachedfiles,
2788 skip_journal_pattern=skip_journal_pattern,
2793 skip_journal_pattern=skip_journal_pattern,
2789 )
2794 )
2790 self.invalidate()
2795 self.invalidate()
2791 self.dirstate.invalidate()
2796 self.dirstate.invalidate()
2792
2797
2793 if parentgone:
2798 if parentgone:
2794 # replace this with some explicit parent update in the future.
2799 # replace this with some explicit parent update in the future.
2795 has_node = self.changelog.index.has_node
2800 has_node = self.changelog.index.has_node
2796 if not all(has_node(p) for p in self.dirstate._pl):
2801 if not all(has_node(p) for p in self.dirstate._pl):
2797 # There was no dirstate to backup initially, we need to drop
2802 # There was no dirstate to backup initially, we need to drop
2798 # the existing one.
2803 # the existing one.
2799 with self.dirstate.changing_parents(self):
2804 with self.dirstate.changing_parents(self):
2800 self.dirstate.setparents(self.nullid)
2805 self.dirstate.setparents(self.nullid)
2801 self.dirstate.clear()
2806 self.dirstate.clear()
2802
2807
2803 parents = tuple([p.rev() for p in self[None].parents()])
2808 parents = tuple([p.rev() for p in self[None].parents()])
2804 if len(parents) > 1:
2809 if len(parents) > 1:
2805 ui.status(
2810 ui.status(
2806 _(
2811 _(
2807 b'working directory now based on '
2812 b'working directory now based on '
2808 b'revisions %d and %d\n'
2813 b'revisions %d and %d\n'
2809 )
2814 )
2810 % parents
2815 % parents
2811 )
2816 )
2812 else:
2817 else:
2813 ui.status(
2818 ui.status(
2814 _(b'working directory now based on revision %d\n') % parents
2819 _(b'working directory now based on revision %d\n') % parents
2815 )
2820 )
2816 mergestatemod.mergestate.clean(self)
2821 mergestatemod.mergestate.clean(self)
2817
2822
2818 # TODO: if we know which new heads may result from this rollback, pass
2823 # TODO: if we know which new heads may result from this rollback, pass
2819 # them to destroy(), which will prevent the branchhead cache from being
2824 # them to destroy(), which will prevent the branchhead cache from being
2820 # invalidated.
2825 # invalidated.
2821 self.destroyed()
2826 self.destroyed()
2822 return 0
2827 return 0
2823
2828
2824 def _buildcacheupdater(self, newtransaction):
2829 def _buildcacheupdater(self, newtransaction):
2825 """called during transaction to build the callback updating cache
2830 """called during transaction to build the callback updating cache
2826
2831
2827 Lives on the repository to help extension who might want to augment
2832 Lives on the repository to help extension who might want to augment
2828 this logic. For this purpose, the created transaction is passed to the
2833 this logic. For this purpose, the created transaction is passed to the
2829 method.
2834 method.
2830 """
2835 """
2831 # we must avoid cyclic reference between repo and transaction.
2836 # we must avoid cyclic reference between repo and transaction.
2832 reporef = weakref.ref(self)
2837 reporef = weakref.ref(self)
2833
2838
2834 def updater(tr):
2839 def updater(tr):
2835 repo = reporef()
2840 repo = reporef()
2836 assert repo is not None # help pytype
2841 assert repo is not None # help pytype
2837 repo.updatecaches(tr)
2842 repo.updatecaches(tr)
2838
2843
2839 return updater
2844 return updater
2840
2845
2841 @unfilteredmethod
2846 @unfilteredmethod
2842 def updatecaches(self, tr=None, full=False, caches=None):
2847 def updatecaches(self, tr=None, full=False, caches=None):
2843 """warm appropriate caches
2848 """warm appropriate caches
2844
2849
2845 If this function is called after a transaction closed. The transaction
2850 If this function is called after a transaction closed. The transaction
2846 will be available in the 'tr' argument. This can be used to selectively
2851 will be available in the 'tr' argument. This can be used to selectively
2847 update caches relevant to the changes in that transaction.
2852 update caches relevant to the changes in that transaction.
2848
2853
2849 If 'full' is set, make sure all caches the function knows about have
2854 If 'full' is set, make sure all caches the function knows about have
2850 up-to-date data. Even the ones usually loaded more lazily.
2855 up-to-date data. Even the ones usually loaded more lazily.
2851
2856
2852 The `full` argument can take a special "post-clone" value. In this case
2857 The `full` argument can take a special "post-clone" value. In this case
2853 the cache warming is made after a clone and of the slower cache might
2858 the cache warming is made after a clone and of the slower cache might
2854 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2859 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2855 as we plan for a cleaner way to deal with this for 5.9.
2860 as we plan for a cleaner way to deal with this for 5.9.
2856 """
2861 """
2857 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2862 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2858 # During strip, many caches are invalid but
2863 # During strip, many caches are invalid but
2859 # later call to `destroyed` will refresh them.
2864 # later call to `destroyed` will refresh them.
2860 return
2865 return
2861
2866
2862 unfi = self.unfiltered()
2867 unfi = self.unfiltered()
2863
2868
2864 if full:
2869 if full:
2865 msg = (
2870 msg = (
2866 "`full` argument for `repo.updatecaches` is deprecated\n"
2871 "`full` argument for `repo.updatecaches` is deprecated\n"
2867 "(use `caches=repository.CACHE_ALL` instead)"
2872 "(use `caches=repository.CACHE_ALL` instead)"
2868 )
2873 )
2869 self.ui.deprecwarn(msg, b"5.9")
2874 self.ui.deprecwarn(msg, b"5.9")
2870 caches = repository.CACHES_ALL
2875 caches = repository.CACHES_ALL
2871 if full == b"post-clone":
2876 if full == b"post-clone":
2872 caches = repository.CACHES_POST_CLONE
2877 caches = repository.CACHES_POST_CLONE
2873 caches = repository.CACHES_ALL
2878 caches = repository.CACHES_ALL
2874 elif caches is None:
2879 elif caches is None:
2875 caches = repository.CACHES_DEFAULT
2880 caches = repository.CACHES_DEFAULT
2876
2881
2877 if repository.CACHE_BRANCHMAP_SERVED in caches:
2882 if repository.CACHE_BRANCHMAP_SERVED in caches:
2878 if tr is None or tr.changes[b'origrepolen'] < len(self):
2883 if tr is None or tr.changes[b'origrepolen'] < len(self):
2879 # accessing the 'served' branchmap should refresh all the others,
2884 # accessing the 'served' branchmap should refresh all the others,
2880 self.ui.debug(b'updating the branch cache\n')
2885 self.ui.debug(b'updating the branch cache\n')
2881 self.filtered(b'served').branchmap()
2886 self.filtered(b'served').branchmap()
2882 self.filtered(b'served.hidden').branchmap()
2887 self.filtered(b'served.hidden').branchmap()
2883 # flush all possibly delayed write.
2888 # flush all possibly delayed write.
2884 self._branchcaches.write_delayed(self)
2889 self._branchcaches.write_delayed(self)
2885
2890
2886 if repository.CACHE_CHANGELOG_CACHE in caches:
2891 if repository.CACHE_CHANGELOG_CACHE in caches:
2887 self.changelog.update_caches(transaction=tr)
2892 self.changelog.update_caches(transaction=tr)
2888
2893
2889 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2894 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2890 self.manifestlog.update_caches(transaction=tr)
2895 self.manifestlog.update_caches(transaction=tr)
2891
2896
2892 if repository.CACHE_REV_BRANCH in caches:
2897 if repository.CACHE_REV_BRANCH in caches:
2893 rbc = unfi.revbranchcache()
2898 rbc = unfi.revbranchcache()
2894 for r in unfi.changelog:
2899 for r in unfi.changelog:
2895 rbc.branchinfo(r)
2900 rbc.branchinfo(r)
2896 rbc.write()
2901 rbc.write()
2897
2902
2898 if repository.CACHE_FULL_MANIFEST in caches:
2903 if repository.CACHE_FULL_MANIFEST in caches:
2899 # ensure the working copy parents are in the manifestfulltextcache
2904 # ensure the working copy parents are in the manifestfulltextcache
2900 for ctx in self[b'.'].parents():
2905 for ctx in self[b'.'].parents():
2901 ctx.manifest() # accessing the manifest is enough
2906 ctx.manifest() # accessing the manifest is enough
2902
2907
2903 if repository.CACHE_FILE_NODE_TAGS in caches:
2908 if repository.CACHE_FILE_NODE_TAGS in caches:
2904 # accessing fnode cache warms the cache
2909 # accessing fnode cache warms the cache
2905 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2910 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2906
2911
2907 if repository.CACHE_TAGS_DEFAULT in caches:
2912 if repository.CACHE_TAGS_DEFAULT in caches:
2908 # accessing tags warm the cache
2913 # accessing tags warm the cache
2909 self.tags()
2914 self.tags()
2910 if repository.CACHE_TAGS_SERVED in caches:
2915 if repository.CACHE_TAGS_SERVED in caches:
2911 self.filtered(b'served').tags()
2916 self.filtered(b'served').tags()
2912
2917
2913 if repository.CACHE_BRANCHMAP_ALL in caches:
2918 if repository.CACHE_BRANCHMAP_ALL in caches:
2914 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2919 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2915 # so we're forcing a write to cause these caches to be warmed up
2920 # so we're forcing a write to cause these caches to be warmed up
2916 # even if they haven't explicitly been requested yet (if they've
2921 # even if they haven't explicitly been requested yet (if they've
2917 # never been used by hg, they won't ever have been written, even if
2922 # never been used by hg, they won't ever have been written, even if
2918 # they're a subset of another kind of cache that *has* been used).
2923 # they're a subset of another kind of cache that *has* been used).
2919 for filt in repoview.filtertable.keys():
2924 for filt in repoview.filtertable.keys():
2920 filtered = self.filtered(filt)
2925 filtered = self.filtered(filt)
2921 filtered.branchmap().write(filtered)
2926 filtered.branchmap().write(filtered)
2922
2927
2923 def invalidatecaches(self):
2928 def invalidatecaches(self):
2924 if '_tagscache' in vars(self):
2929 if '_tagscache' in vars(self):
2925 # can't use delattr on proxy
2930 # can't use delattr on proxy
2926 del self.__dict__['_tagscache']
2931 del self.__dict__['_tagscache']
2927
2932
2928 self._branchcaches.clear()
2933 self._branchcaches.clear()
2929 self.invalidatevolatilesets()
2934 self.invalidatevolatilesets()
2930 self._sparsesignaturecache.clear()
2935 self._sparsesignaturecache.clear()
2931
2936
2932 def invalidatevolatilesets(self):
2937 def invalidatevolatilesets(self):
2933 self.filteredrevcache.clear()
2938 self.filteredrevcache.clear()
2934 obsolete.clearobscaches(self)
2939 obsolete.clearobscaches(self)
2935 self._quick_access_changeid_invalidate()
2940 self._quick_access_changeid_invalidate()
2936
2941
2937 def invalidatedirstate(self):
2942 def invalidatedirstate(self):
2938 """Invalidates the dirstate, causing the next call to dirstate
2943 """Invalidates the dirstate, causing the next call to dirstate
2939 to check if it was modified since the last time it was read,
2944 to check if it was modified since the last time it was read,
2940 rereading it if it has.
2945 rereading it if it has.
2941
2946
2942 This is different to dirstate.invalidate() that it doesn't always
2947 This is different to dirstate.invalidate() that it doesn't always
2943 rereads the dirstate. Use dirstate.invalidate() if you want to
2948 rereads the dirstate. Use dirstate.invalidate() if you want to
2944 explicitly read the dirstate again (i.e. restoring it to a previous
2949 explicitly read the dirstate again (i.e. restoring it to a previous
2945 known good state)."""
2950 known good state)."""
2946 unfi = self.unfiltered()
2951 unfi = self.unfiltered()
2947 if 'dirstate' in unfi.__dict__:
2952 if 'dirstate' in unfi.__dict__:
2948 del unfi.__dict__['dirstate']
2953 del unfi.__dict__['dirstate']
2949
2954
2950 def invalidate(self, clearfilecache=False):
2955 def invalidate(self, clearfilecache=False):
2951 """Invalidates both store and non-store parts other than dirstate
2956 """Invalidates both store and non-store parts other than dirstate
2952
2957
2953 If a transaction is running, invalidation of store is omitted,
2958 If a transaction is running, invalidation of store is omitted,
2954 because discarding in-memory changes might cause inconsistency
2959 because discarding in-memory changes might cause inconsistency
2955 (e.g. incomplete fncache causes unintentional failure, but
2960 (e.g. incomplete fncache causes unintentional failure, but
2956 redundant one doesn't).
2961 redundant one doesn't).
2957 """
2962 """
2958 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2963 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2959 for k in list(self._filecache.keys()):
2964 for k in list(self._filecache.keys()):
2960 if (
2965 if (
2961 k == b'changelog'
2966 k == b'changelog'
2962 and self.currenttransaction()
2967 and self.currenttransaction()
2963 and self.changelog._delayed
2968 and self.changelog._delayed
2964 ):
2969 ):
2965 # The changelog object may store unwritten revisions. We don't
2970 # The changelog object may store unwritten revisions. We don't
2966 # want to lose them.
2971 # want to lose them.
2967 # TODO: Solve the problem instead of working around it.
2972 # TODO: Solve the problem instead of working around it.
2968 continue
2973 continue
2969
2974
2970 if clearfilecache:
2975 if clearfilecache:
2971 del self._filecache[k]
2976 del self._filecache[k]
2972 try:
2977 try:
2973 delattr(unfiltered, k)
2978 delattr(unfiltered, k)
2974 except AttributeError:
2979 except AttributeError:
2975 pass
2980 pass
2976 self.invalidatecaches()
2981 self.invalidatecaches()
2977 if not self.currenttransaction():
2982 if not self.currenttransaction():
2978 # TODO: Changing contents of store outside transaction
2983 # TODO: Changing contents of store outside transaction
2979 # causes inconsistency. We should make in-memory store
2984 # causes inconsistency. We should make in-memory store
2980 # changes detectable, and abort if changed.
2985 # changes detectable, and abort if changed.
2981 self.store.invalidatecaches()
2986 self.store.invalidatecaches()
2982
2987
2983 def invalidateall(self):
2988 def invalidateall(self):
2984 """Fully invalidates both store and non-store parts, causing the
2989 """Fully invalidates both store and non-store parts, causing the
2985 subsequent operation to reread any outside changes."""
2990 subsequent operation to reread any outside changes."""
2986 # extension should hook this to invalidate its caches
2991 # extension should hook this to invalidate its caches
2987 self.invalidate()
2992 self.invalidate()
2988 self.invalidatedirstate()
2993 self.invalidatedirstate()
2989
2994
2990 @unfilteredmethod
2995 @unfilteredmethod
2991 def _refreshfilecachestats(self, tr):
2996 def _refreshfilecachestats(self, tr):
2992 """Reload stats of cached files so that they are flagged as valid"""
2997 """Reload stats of cached files so that they are flagged as valid"""
2993 for k, ce in self._filecache.items():
2998 for k, ce in self._filecache.items():
2994 k = pycompat.sysstr(k)
2999 k = pycompat.sysstr(k)
2995 if k == 'dirstate' or k not in self.__dict__:
3000 if k == 'dirstate' or k not in self.__dict__:
2996 continue
3001 continue
2997 ce.refresh()
3002 ce.refresh()
2998
3003
2999 def _lock(
3004 def _lock(
3000 self,
3005 self,
3001 vfs,
3006 vfs,
3002 lockname,
3007 lockname,
3003 wait,
3008 wait,
3004 releasefn,
3009 releasefn,
3005 acquirefn,
3010 acquirefn,
3006 desc,
3011 desc,
3007 ):
3012 ):
3008 timeout = 0
3013 timeout = 0
3009 warntimeout = 0
3014 warntimeout = 0
3010 if wait:
3015 if wait:
3011 timeout = self.ui.configint(b"ui", b"timeout")
3016 timeout = self.ui.configint(b"ui", b"timeout")
3012 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3017 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3013 # internal config: ui.signal-safe-lock
3018 # internal config: ui.signal-safe-lock
3014 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3019 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3015
3020
3016 l = lockmod.trylock(
3021 l = lockmod.trylock(
3017 self.ui,
3022 self.ui,
3018 vfs,
3023 vfs,
3019 lockname,
3024 lockname,
3020 timeout,
3025 timeout,
3021 warntimeout,
3026 warntimeout,
3022 releasefn=releasefn,
3027 releasefn=releasefn,
3023 acquirefn=acquirefn,
3028 acquirefn=acquirefn,
3024 desc=desc,
3029 desc=desc,
3025 signalsafe=signalsafe,
3030 signalsafe=signalsafe,
3026 )
3031 )
3027 return l
3032 return l
3028
3033
3029 def _afterlock(self, callback):
3034 def _afterlock(self, callback):
3030 """add a callback to be run when the repository is fully unlocked
3035 """add a callback to be run when the repository is fully unlocked
3031
3036
3032 The callback will be executed when the outermost lock is released
3037 The callback will be executed when the outermost lock is released
3033 (with wlock being higher level than 'lock')."""
3038 (with wlock being higher level than 'lock')."""
3034 for ref in (self._wlockref, self._lockref):
3039 for ref in (self._wlockref, self._lockref):
3035 l = ref and ref()
3040 l = ref and ref()
3036 if l and l.held:
3041 if l and l.held:
3037 l.postrelease.append(callback)
3042 l.postrelease.append(callback)
3038 break
3043 break
3039 else: # no lock have been found.
3044 else: # no lock have been found.
3040 callback(True)
3045 callback(True)
3041
3046
3042 def lock(self, wait=True):
3047 def lock(self, wait=True):
3043 """Lock the repository store (.hg/store) and return a weak reference
3048 """Lock the repository store (.hg/store) and return a weak reference
3044 to the lock. Use this before modifying the store (e.g. committing or
3049 to the lock. Use this before modifying the store (e.g. committing or
3045 stripping). If you are opening a transaction, get a lock as well.)
3050 stripping). If you are opening a transaction, get a lock as well.)
3046
3051
3047 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3052 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3048 'wlock' first to avoid a dead-lock hazard."""
3053 'wlock' first to avoid a dead-lock hazard."""
3049 l = self._currentlock(self._lockref)
3054 l = self._currentlock(self._lockref)
3050 if l is not None:
3055 if l is not None:
3051 l.lock()
3056 l.lock()
3052 return l
3057 return l
3053
3058
3054 l = self._lock(
3059 l = self._lock(
3055 vfs=self.svfs,
3060 vfs=self.svfs,
3056 lockname=b"lock",
3061 lockname=b"lock",
3057 wait=wait,
3062 wait=wait,
3058 releasefn=None,
3063 releasefn=None,
3059 acquirefn=self.invalidate,
3064 acquirefn=self.invalidate,
3060 desc=_(b'repository %s') % self.origroot,
3065 desc=_(b'repository %s') % self.origroot,
3061 )
3066 )
3062 self._lockref = weakref.ref(l)
3067 self._lockref = weakref.ref(l)
3063 return l
3068 return l
3064
3069
3065 def wlock(self, wait=True):
3070 def wlock(self, wait=True):
3066 """Lock the non-store parts of the repository (everything under
3071 """Lock the non-store parts of the repository (everything under
3067 .hg except .hg/store) and return a weak reference to the lock.
3072 .hg except .hg/store) and return a weak reference to the lock.
3068
3073
3069 Use this before modifying files in .hg.
3074 Use this before modifying files in .hg.
3070
3075
3071 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3076 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3072 'wlock' first to avoid a dead-lock hazard."""
3077 'wlock' first to avoid a dead-lock hazard."""
3073 l = self._wlockref() if self._wlockref else None
3078 l = self._wlockref() if self._wlockref else None
3074 if l is not None and l.held:
3079 if l is not None and l.held:
3075 l.lock()
3080 l.lock()
3076 return l
3081 return l
3077
3082
3078 # We do not need to check for non-waiting lock acquisition. Such
3083 # We do not need to check for non-waiting lock acquisition. Such
3079 # acquisition would not cause dead-lock as they would just fail.
3084 # acquisition would not cause dead-lock as they would just fail.
3080 if wait and (
3085 if wait and (
3081 self.ui.configbool(b'devel', b'all-warnings')
3086 self.ui.configbool(b'devel', b'all-warnings')
3082 or self.ui.configbool(b'devel', b'check-locks')
3087 or self.ui.configbool(b'devel', b'check-locks')
3083 ):
3088 ):
3084 if self._currentlock(self._lockref) is not None:
3089 if self._currentlock(self._lockref) is not None:
3085 self.ui.develwarn(b'"wlock" acquired after "lock"')
3090 self.ui.develwarn(b'"wlock" acquired after "lock"')
3086
3091
3087 def unlock():
3092 def unlock():
3088 if self.dirstate.is_changing_any:
3093 if self.dirstate.is_changing_any:
3089 msg = b"wlock release in the middle of a changing parents"
3094 msg = b"wlock release in the middle of a changing parents"
3090 self.ui.develwarn(msg)
3095 self.ui.develwarn(msg)
3091 self.dirstate.invalidate()
3096 self.dirstate.invalidate()
3092 else:
3097 else:
3093 if self.dirstate._dirty:
3098 if self.dirstate._dirty:
3094 msg = b"dirty dirstate on wlock release"
3099 msg = b"dirty dirstate on wlock release"
3095 self.ui.develwarn(msg)
3100 self.ui.develwarn(msg)
3096 self.dirstate.write(None)
3101 self.dirstate.write(None)
3097
3102
3098 unfi = self.unfiltered()
3103 unfi = self.unfiltered()
3099 if 'dirstate' in unfi.__dict__:
3104 if 'dirstate' in unfi.__dict__:
3100 del unfi.__dict__['dirstate']
3105 del unfi.__dict__['dirstate']
3101
3106
3102 l = self._lock(
3107 l = self._lock(
3103 self.vfs,
3108 self.vfs,
3104 b"wlock",
3109 b"wlock",
3105 wait,
3110 wait,
3106 unlock,
3111 unlock,
3107 self.invalidatedirstate,
3112 self.invalidatedirstate,
3108 _(b'working directory of %s') % self.origroot,
3113 _(b'working directory of %s') % self.origroot,
3109 )
3114 )
3110 self._wlockref = weakref.ref(l)
3115 self._wlockref = weakref.ref(l)
3111 return l
3116 return l
3112
3117
3113 def _currentlock(self, lockref):
3118 def _currentlock(self, lockref):
3114 """Returns the lock if it's held, or None if it's not."""
3119 """Returns the lock if it's held, or None if it's not."""
3115 if lockref is None:
3120 if lockref is None:
3116 return None
3121 return None
3117 l = lockref()
3122 l = lockref()
3118 if l is None or not l.held:
3123 if l is None or not l.held:
3119 return None
3124 return None
3120 return l
3125 return l
3121
3126
3122 def currentwlock(self):
3127 def currentwlock(self):
3123 """Returns the wlock if it's held, or None if it's not."""
3128 """Returns the wlock if it's held, or None if it's not."""
3124 return self._currentlock(self._wlockref)
3129 return self._currentlock(self._wlockref)
3125
3130
3126 def checkcommitpatterns(self, wctx, match, status, fail):
3131 def checkcommitpatterns(self, wctx, match, status, fail):
3127 """check for commit arguments that aren't committable"""
3132 """check for commit arguments that aren't committable"""
3128 if match.isexact() or match.prefix():
3133 if match.isexact() or match.prefix():
3129 matched = set(status.modified + status.added + status.removed)
3134 matched = set(status.modified + status.added + status.removed)
3130
3135
3131 for f in match.files():
3136 for f in match.files():
3132 f = self.dirstate.normalize(f)
3137 f = self.dirstate.normalize(f)
3133 if f == b'.' or f in matched or f in wctx.substate:
3138 if f == b'.' or f in matched or f in wctx.substate:
3134 continue
3139 continue
3135 if f in status.deleted:
3140 if f in status.deleted:
3136 fail(f, _(b'file not found!'))
3141 fail(f, _(b'file not found!'))
3137 # Is it a directory that exists or used to exist?
3142 # Is it a directory that exists or used to exist?
3138 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3143 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3139 d = f + b'/'
3144 d = f + b'/'
3140 for mf in matched:
3145 for mf in matched:
3141 if mf.startswith(d):
3146 if mf.startswith(d):
3142 break
3147 break
3143 else:
3148 else:
3144 fail(f, _(b"no match under directory!"))
3149 fail(f, _(b"no match under directory!"))
3145 elif f not in self.dirstate:
3150 elif f not in self.dirstate:
3146 fail(f, _(b"file not tracked!"))
3151 fail(f, _(b"file not tracked!"))
3147
3152
3148 @unfilteredmethod
3153 @unfilteredmethod
3149 def commit(
3154 def commit(
3150 self,
3155 self,
3151 text=b"",
3156 text=b"",
3152 user=None,
3157 user=None,
3153 date=None,
3158 date=None,
3154 match=None,
3159 match=None,
3155 force=False,
3160 force=False,
3156 editor=None,
3161 editor=None,
3157 extra=None,
3162 extra=None,
3158 ):
3163 ):
3159 """Add a new revision to current repository.
3164 """Add a new revision to current repository.
3160
3165
3161 Revision information is gathered from the working directory,
3166 Revision information is gathered from the working directory,
3162 match can be used to filter the committed files. If editor is
3167 match can be used to filter the committed files. If editor is
3163 supplied, it is called to get a commit message.
3168 supplied, it is called to get a commit message.
3164 """
3169 """
3165 if extra is None:
3170 if extra is None:
3166 extra = {}
3171 extra = {}
3167
3172
3168 def fail(f, msg):
3173 def fail(f, msg):
3169 raise error.InputError(b'%s: %s' % (f, msg))
3174 raise error.InputError(b'%s: %s' % (f, msg))
3170
3175
3171 if not match:
3176 if not match:
3172 match = matchmod.always()
3177 match = matchmod.always()
3173
3178
3174 if not force:
3179 if not force:
3175 match.bad = fail
3180 match.bad = fail
3176
3181
3177 # lock() for recent changelog (see issue4368)
3182 # lock() for recent changelog (see issue4368)
3178 with self.wlock(), self.lock():
3183 with self.wlock(), self.lock():
3179 wctx = self[None]
3184 wctx = self[None]
3180 merge = len(wctx.parents()) > 1
3185 merge = len(wctx.parents()) > 1
3181
3186
3182 if not force and merge and not match.always():
3187 if not force and merge and not match.always():
3183 raise error.Abort(
3188 raise error.Abort(
3184 _(
3189 _(
3185 b'cannot partially commit a merge '
3190 b'cannot partially commit a merge '
3186 b'(do not specify files or patterns)'
3191 b'(do not specify files or patterns)'
3187 )
3192 )
3188 )
3193 )
3189
3194
3190 status = self.status(match=match, clean=force)
3195 status = self.status(match=match, clean=force)
3191 if force:
3196 if force:
3192 status.modified.extend(
3197 status.modified.extend(
3193 status.clean
3198 status.clean
3194 ) # mq may commit clean files
3199 ) # mq may commit clean files
3195
3200
3196 # check subrepos
3201 # check subrepos
3197 subs, commitsubs, newstate = subrepoutil.precommit(
3202 subs, commitsubs, newstate = subrepoutil.precommit(
3198 self.ui, wctx, status, match, force=force
3203 self.ui, wctx, status, match, force=force
3199 )
3204 )
3200
3205
3201 # make sure all explicit patterns are matched
3206 # make sure all explicit patterns are matched
3202 if not force:
3207 if not force:
3203 self.checkcommitpatterns(wctx, match, status, fail)
3208 self.checkcommitpatterns(wctx, match, status, fail)
3204
3209
3205 cctx = context.workingcommitctx(
3210 cctx = context.workingcommitctx(
3206 self, status, text, user, date, extra
3211 self, status, text, user, date, extra
3207 )
3212 )
3208
3213
3209 ms = mergestatemod.mergestate.read(self)
3214 ms = mergestatemod.mergestate.read(self)
3210 mergeutil.checkunresolved(ms)
3215 mergeutil.checkunresolved(ms)
3211
3216
3212 # internal config: ui.allowemptycommit
3217 # internal config: ui.allowemptycommit
3213 if cctx.isempty() and not self.ui.configbool(
3218 if cctx.isempty() and not self.ui.configbool(
3214 b'ui', b'allowemptycommit'
3219 b'ui', b'allowemptycommit'
3215 ):
3220 ):
3216 self.ui.debug(b'nothing to commit, clearing merge state\n')
3221 self.ui.debug(b'nothing to commit, clearing merge state\n')
3217 ms.reset()
3222 ms.reset()
3218 return None
3223 return None
3219
3224
3220 if merge and cctx.deleted():
3225 if merge and cctx.deleted():
3221 raise error.Abort(_(b"cannot commit merge with missing files"))
3226 raise error.Abort(_(b"cannot commit merge with missing files"))
3222
3227
3223 if editor:
3228 if editor:
3224 cctx._text = editor(self, cctx, subs)
3229 cctx._text = editor(self, cctx, subs)
3225 edited = text != cctx._text
3230 edited = text != cctx._text
3226
3231
3227 # Save commit message in case this transaction gets rolled back
3232 # Save commit message in case this transaction gets rolled back
3228 # (e.g. by a pretxncommit hook). Leave the content alone on
3233 # (e.g. by a pretxncommit hook). Leave the content alone on
3229 # the assumption that the user will use the same editor again.
3234 # the assumption that the user will use the same editor again.
3230 msg_path = self.savecommitmessage(cctx._text)
3235 msg_path = self.savecommitmessage(cctx._text)
3231
3236
3232 # commit subs and write new state
3237 # commit subs and write new state
3233 if subs:
3238 if subs:
3234 uipathfn = scmutil.getuipathfn(self)
3239 uipathfn = scmutil.getuipathfn(self)
3235 for s in sorted(commitsubs):
3240 for s in sorted(commitsubs):
3236 sub = wctx.sub(s)
3241 sub = wctx.sub(s)
3237 self.ui.status(
3242 self.ui.status(
3238 _(b'committing subrepository %s\n')
3243 _(b'committing subrepository %s\n')
3239 % uipathfn(subrepoutil.subrelpath(sub))
3244 % uipathfn(subrepoutil.subrelpath(sub))
3240 )
3245 )
3241 sr = sub.commit(cctx._text, user, date)
3246 sr = sub.commit(cctx._text, user, date)
3242 newstate[s] = (newstate[s][0], sr)
3247 newstate[s] = (newstate[s][0], sr)
3243 subrepoutil.writestate(self, newstate)
3248 subrepoutil.writestate(self, newstate)
3244
3249
3245 p1, p2 = self.dirstate.parents()
3250 p1, p2 = self.dirstate.parents()
3246 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3251 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3247 try:
3252 try:
3248 self.hook(
3253 self.hook(
3249 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3254 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3250 )
3255 )
3251 with self.transaction(b'commit'):
3256 with self.transaction(b'commit'):
3252 ret = self.commitctx(cctx, True)
3257 ret = self.commitctx(cctx, True)
3253 # update bookmarks, dirstate and mergestate
3258 # update bookmarks, dirstate and mergestate
3254 bookmarks.update(self, [p1, p2], ret)
3259 bookmarks.update(self, [p1, p2], ret)
3255 cctx.markcommitted(ret)
3260 cctx.markcommitted(ret)
3256 ms.reset()
3261 ms.reset()
3257 except: # re-raises
3262 except: # re-raises
3258 if edited:
3263 if edited:
3259 self.ui.write(
3264 self.ui.write(
3260 _(b'note: commit message saved in %s\n') % msg_path
3265 _(b'note: commit message saved in %s\n') % msg_path
3261 )
3266 )
3262 self.ui.write(
3267 self.ui.write(
3263 _(
3268 _(
3264 b"note: use 'hg commit --logfile "
3269 b"note: use 'hg commit --logfile "
3265 b"%s --edit' to reuse it\n"
3270 b"%s --edit' to reuse it\n"
3266 )
3271 )
3267 % msg_path
3272 % msg_path
3268 )
3273 )
3269 raise
3274 raise
3270
3275
3271 def commithook(unused_success):
3276 def commithook(unused_success):
3272 # hack for command that use a temporary commit (eg: histedit)
3277 # hack for command that use a temporary commit (eg: histedit)
3273 # temporary commit got stripped before hook release
3278 # temporary commit got stripped before hook release
3274 if self.changelog.hasnode(ret):
3279 if self.changelog.hasnode(ret):
3275 self.hook(
3280 self.hook(
3276 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3281 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3277 )
3282 )
3278
3283
3279 self._afterlock(commithook)
3284 self._afterlock(commithook)
3280 return ret
3285 return ret
3281
3286
3282 @unfilteredmethod
3287 @unfilteredmethod
3283 def commitctx(self, ctx, error=False, origctx=None):
3288 def commitctx(self, ctx, error=False, origctx=None):
3284 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3289 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3285
3290
3286 @unfilteredmethod
3291 @unfilteredmethod
3287 def destroying(self):
3292 def destroying(self):
3288 """Inform the repository that nodes are about to be destroyed.
3293 """Inform the repository that nodes are about to be destroyed.
3289 Intended for use by strip and rollback, so there's a common
3294 Intended for use by strip and rollback, so there's a common
3290 place for anything that has to be done before destroying history.
3295 place for anything that has to be done before destroying history.
3291
3296
3292 This is mostly useful for saving state that is in memory and waiting
3297 This is mostly useful for saving state that is in memory and waiting
3293 to be flushed when the current lock is released. Because a call to
3298 to be flushed when the current lock is released. Because a call to
3294 destroyed is imminent, the repo will be invalidated causing those
3299 destroyed is imminent, the repo will be invalidated causing those
3295 changes to stay in memory (waiting for the next unlock), or vanish
3300 changes to stay in memory (waiting for the next unlock), or vanish
3296 completely.
3301 completely.
3297 """
3302 """
3298 # When using the same lock to commit and strip, the phasecache is left
3303 # When using the same lock to commit and strip, the phasecache is left
3299 # dirty after committing. Then when we strip, the repo is invalidated,
3304 # dirty after committing. Then when we strip, the repo is invalidated,
3300 # causing those changes to disappear.
3305 # causing those changes to disappear.
3301 if '_phasecache' in vars(self):
3306 if '_phasecache' in vars(self):
3302 self._phasecache.write()
3307 self._phasecache.write()
3303
3308
3304 @unfilteredmethod
3309 @unfilteredmethod
3305 def destroyed(self):
3310 def destroyed(self):
3306 """Inform the repository that nodes have been destroyed.
3311 """Inform the repository that nodes have been destroyed.
3307 Intended for use by strip and rollback, so there's a common
3312 Intended for use by strip and rollback, so there's a common
3308 place for anything that has to be done after destroying history.
3313 place for anything that has to be done after destroying history.
3309 """
3314 """
3310 # When one tries to:
3315 # When one tries to:
3311 # 1) destroy nodes thus calling this method (e.g. strip)
3316 # 1) destroy nodes thus calling this method (e.g. strip)
3312 # 2) use phasecache somewhere (e.g. commit)
3317 # 2) use phasecache somewhere (e.g. commit)
3313 #
3318 #
3314 # then 2) will fail because the phasecache contains nodes that were
3319 # then 2) will fail because the phasecache contains nodes that were
3315 # removed. We can either remove phasecache from the filecache,
3320 # removed. We can either remove phasecache from the filecache,
3316 # causing it to reload next time it is accessed, or simply filter
3321 # causing it to reload next time it is accessed, or simply filter
3317 # the removed nodes now and write the updated cache.
3322 # the removed nodes now and write the updated cache.
3318 self._phasecache.filterunknown(self)
3323 self._phasecache.filterunknown(self)
3319 self._phasecache.write()
3324 self._phasecache.write()
3320
3325
3321 # refresh all repository caches
3326 # refresh all repository caches
3322 self.updatecaches()
3327 self.updatecaches()
3323
3328
3324 # Ensure the persistent tag cache is updated. Doing it now
3329 # Ensure the persistent tag cache is updated. Doing it now
3325 # means that the tag cache only has to worry about destroyed
3330 # means that the tag cache only has to worry about destroyed
3326 # heads immediately after a strip/rollback. That in turn
3331 # heads immediately after a strip/rollback. That in turn
3327 # guarantees that "cachetip == currenttip" (comparing both rev
3332 # guarantees that "cachetip == currenttip" (comparing both rev
3328 # and node) always means no nodes have been added or destroyed.
3333 # and node) always means no nodes have been added or destroyed.
3329
3334
3330 # XXX this is suboptimal when qrefresh'ing: we strip the current
3335 # XXX this is suboptimal when qrefresh'ing: we strip the current
3331 # head, refresh the tag cache, then immediately add a new head.
3336 # head, refresh the tag cache, then immediately add a new head.
3332 # But I think doing it this way is necessary for the "instant
3337 # But I think doing it this way is necessary for the "instant
3333 # tag cache retrieval" case to work.
3338 # tag cache retrieval" case to work.
3334 self.invalidate()
3339 self.invalidate()
3335
3340
3336 def status(
3341 def status(
3337 self,
3342 self,
3338 node1=b'.',
3343 node1=b'.',
3339 node2=None,
3344 node2=None,
3340 match=None,
3345 match=None,
3341 ignored=False,
3346 ignored=False,
3342 clean=False,
3347 clean=False,
3343 unknown=False,
3348 unknown=False,
3344 listsubrepos=False,
3349 listsubrepos=False,
3345 ):
3350 ):
3346 '''a convenience method that calls node1.status(node2)'''
3351 '''a convenience method that calls node1.status(node2)'''
3347 return self[node1].status(
3352 return self[node1].status(
3348 node2, match, ignored, clean, unknown, listsubrepos
3353 node2, match, ignored, clean, unknown, listsubrepos
3349 )
3354 )
3350
3355
3351 def addpostdsstatus(self, ps):
3356 def addpostdsstatus(self, ps):
3352 """Add a callback to run within the wlock, at the point at which status
3357 """Add a callback to run within the wlock, at the point at which status
3353 fixups happen.
3358 fixups happen.
3354
3359
3355 On status completion, callback(wctx, status) will be called with the
3360 On status completion, callback(wctx, status) will be called with the
3356 wlock held, unless the dirstate has changed from underneath or the wlock
3361 wlock held, unless the dirstate has changed from underneath or the wlock
3357 couldn't be grabbed.
3362 couldn't be grabbed.
3358
3363
3359 Callbacks should not capture and use a cached copy of the dirstate --
3364 Callbacks should not capture and use a cached copy of the dirstate --
3360 it might change in the meanwhile. Instead, they should access the
3365 it might change in the meanwhile. Instead, they should access the
3361 dirstate via wctx.repo().dirstate.
3366 dirstate via wctx.repo().dirstate.
3362
3367
3363 This list is emptied out after each status run -- extensions should
3368 This list is emptied out after each status run -- extensions should
3364 make sure it adds to this list each time dirstate.status is called.
3369 make sure it adds to this list each time dirstate.status is called.
3365 Extensions should also make sure they don't call this for statuses
3370 Extensions should also make sure they don't call this for statuses
3366 that don't involve the dirstate.
3371 that don't involve the dirstate.
3367 """
3372 """
3368
3373
3369 # The list is located here for uniqueness reasons -- it is actually
3374 # The list is located here for uniqueness reasons -- it is actually
3370 # managed by the workingctx, but that isn't unique per-repo.
3375 # managed by the workingctx, but that isn't unique per-repo.
3371 self._postdsstatus.append(ps)
3376 self._postdsstatus.append(ps)
3372
3377
3373 def postdsstatus(self):
3378 def postdsstatus(self):
3374 """Used by workingctx to get the list of post-dirstate-status hooks."""
3379 """Used by workingctx to get the list of post-dirstate-status hooks."""
3375 return self._postdsstatus
3380 return self._postdsstatus
3376
3381
3377 def clearpostdsstatus(self):
3382 def clearpostdsstatus(self):
3378 """Used by workingctx to clear post-dirstate-status hooks."""
3383 """Used by workingctx to clear post-dirstate-status hooks."""
3379 del self._postdsstatus[:]
3384 del self._postdsstatus[:]
3380
3385
3381 def heads(self, start=None):
3386 def heads(self, start=None):
3382 if start is None:
3387 if start is None:
3383 cl = self.changelog
3388 cl = self.changelog
3384 headrevs = reversed(cl.headrevs())
3389 headrevs = reversed(cl.headrevs())
3385 return [cl.node(rev) for rev in headrevs]
3390 return [cl.node(rev) for rev in headrevs]
3386
3391
3387 heads = self.changelog.heads(start)
3392 heads = self.changelog.heads(start)
3388 # sort the output in rev descending order
3393 # sort the output in rev descending order
3389 return sorted(heads, key=self.changelog.rev, reverse=True)
3394 return sorted(heads, key=self.changelog.rev, reverse=True)
3390
3395
3391 def branchheads(self, branch=None, start=None, closed=False):
3396 def branchheads(self, branch=None, start=None, closed=False):
3392 """return a (possibly filtered) list of heads for the given branch
3397 """return a (possibly filtered) list of heads for the given branch
3393
3398
3394 Heads are returned in topological order, from newest to oldest.
3399 Heads are returned in topological order, from newest to oldest.
3395 If branch is None, use the dirstate branch.
3400 If branch is None, use the dirstate branch.
3396 If start is not None, return only heads reachable from start.
3401 If start is not None, return only heads reachable from start.
3397 If closed is True, return heads that are marked as closed as well.
3402 If closed is True, return heads that are marked as closed as well.
3398 """
3403 """
3399 if branch is None:
3404 if branch is None:
3400 branch = self[None].branch()
3405 branch = self[None].branch()
3401 branches = self.branchmap()
3406 branches = self.branchmap()
3402 if not branches.hasbranch(branch):
3407 if not branches.hasbranch(branch):
3403 return []
3408 return []
3404 # the cache returns heads ordered lowest to highest
3409 # the cache returns heads ordered lowest to highest
3405 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3410 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3406 if start is not None:
3411 if start is not None:
3407 # filter out the heads that cannot be reached from startrev
3412 # filter out the heads that cannot be reached from startrev
3408 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3413 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3409 bheads = [h for h in bheads if h in fbheads]
3414 bheads = [h for h in bheads if h in fbheads]
3410 return bheads
3415 return bheads
3411
3416
3412 def branches(self, nodes):
3417 def branches(self, nodes):
3413 if not nodes:
3418 if not nodes:
3414 nodes = [self.changelog.tip()]
3419 nodes = [self.changelog.tip()]
3415 b = []
3420 b = []
3416 for n in nodes:
3421 for n in nodes:
3417 t = n
3422 t = n
3418 while True:
3423 while True:
3419 p = self.changelog.parents(n)
3424 p = self.changelog.parents(n)
3420 if p[1] != self.nullid or p[0] == self.nullid:
3425 if p[1] != self.nullid or p[0] == self.nullid:
3421 b.append((t, n, p[0], p[1]))
3426 b.append((t, n, p[0], p[1]))
3422 break
3427 break
3423 n = p[0]
3428 n = p[0]
3424 return b
3429 return b
3425
3430
3426 def between(self, pairs):
3431 def between(self, pairs):
3427 r = []
3432 r = []
3428
3433
3429 for top, bottom in pairs:
3434 for top, bottom in pairs:
3430 n, l, i = top, [], 0
3435 n, l, i = top, [], 0
3431 f = 1
3436 f = 1
3432
3437
3433 while n != bottom and n != self.nullid:
3438 while n != bottom and n != self.nullid:
3434 p = self.changelog.parents(n)[0]
3439 p = self.changelog.parents(n)[0]
3435 if i == f:
3440 if i == f:
3436 l.append(n)
3441 l.append(n)
3437 f = f * 2
3442 f = f * 2
3438 n = p
3443 n = p
3439 i += 1
3444 i += 1
3440
3445
3441 r.append(l)
3446 r.append(l)
3442
3447
3443 return r
3448 return r
3444
3449
3445 def checkpush(self, pushop):
3450 def checkpush(self, pushop):
3446 """Extensions can override this function if additional checks have
3451 """Extensions can override this function if additional checks have
3447 to be performed before pushing, or call it if they override push
3452 to be performed before pushing, or call it if they override push
3448 command.
3453 command.
3449 """
3454 """
3450
3455
3451 @unfilteredpropertycache
3456 @unfilteredpropertycache
3452 def prepushoutgoinghooks(self):
3457 def prepushoutgoinghooks(self):
3453 """Return util.hooks consists of a pushop with repo, remote, outgoing
3458 """Return util.hooks consists of a pushop with repo, remote, outgoing
3454 methods, which are called before pushing changesets.
3459 methods, which are called before pushing changesets.
3455 """
3460 """
3456 return util.hooks()
3461 return util.hooks()
3457
3462
3458 def pushkey(self, namespace, key, old, new):
3463 def pushkey(self, namespace, key, old, new):
3459 try:
3464 try:
3460 tr = self.currenttransaction()
3465 tr = self.currenttransaction()
3461 hookargs = {}
3466 hookargs = {}
3462 if tr is not None:
3467 if tr is not None:
3463 hookargs.update(tr.hookargs)
3468 hookargs.update(tr.hookargs)
3464 hookargs = pycompat.strkwargs(hookargs)
3469 hookargs = pycompat.strkwargs(hookargs)
3465 hookargs['namespace'] = namespace
3470 hookargs['namespace'] = namespace
3466 hookargs['key'] = key
3471 hookargs['key'] = key
3467 hookargs['old'] = old
3472 hookargs['old'] = old
3468 hookargs['new'] = new
3473 hookargs['new'] = new
3469 self.hook(b'prepushkey', throw=True, **hookargs)
3474 self.hook(b'prepushkey', throw=True, **hookargs)
3470 except error.HookAbort as exc:
3475 except error.HookAbort as exc:
3471 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3476 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3472 if exc.hint:
3477 if exc.hint:
3473 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3478 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3474 return False
3479 return False
3475 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3480 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3476 ret = pushkey.push(self, namespace, key, old, new)
3481 ret = pushkey.push(self, namespace, key, old, new)
3477
3482
3478 def runhook(unused_success):
3483 def runhook(unused_success):
3479 self.hook(
3484 self.hook(
3480 b'pushkey',
3485 b'pushkey',
3481 namespace=namespace,
3486 namespace=namespace,
3482 key=key,
3487 key=key,
3483 old=old,
3488 old=old,
3484 new=new,
3489 new=new,
3485 ret=ret,
3490 ret=ret,
3486 )
3491 )
3487
3492
3488 self._afterlock(runhook)
3493 self._afterlock(runhook)
3489 return ret
3494 return ret
3490
3495
3491 def listkeys(self, namespace):
3496 def listkeys(self, namespace):
3492 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3497 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3493 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3498 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3494 values = pushkey.list(self, namespace)
3499 values = pushkey.list(self, namespace)
3495 self.hook(b'listkeys', namespace=namespace, values=values)
3500 self.hook(b'listkeys', namespace=namespace, values=values)
3496 return values
3501 return values
3497
3502
3498 def debugwireargs(self, one, two, three=None, four=None, five=None):
3503 def debugwireargs(self, one, two, three=None, four=None, five=None):
3499 '''used to test argument passing over the wire'''
3504 '''used to test argument passing over the wire'''
3500 return b"%s %s %s %s %s" % (
3505 return b"%s %s %s %s %s" % (
3501 one,
3506 one,
3502 two,
3507 two,
3503 pycompat.bytestr(three),
3508 pycompat.bytestr(three),
3504 pycompat.bytestr(four),
3509 pycompat.bytestr(four),
3505 pycompat.bytestr(five),
3510 pycompat.bytestr(five),
3506 )
3511 )
3507
3512
3508 def savecommitmessage(self, text):
3513 def savecommitmessage(self, text):
3509 fp = self.vfs(b'last-message.txt', b'wb')
3514 fp = self.vfs(b'last-message.txt', b'wb')
3510 try:
3515 try:
3511 fp.write(text)
3516 fp.write(text)
3512 finally:
3517 finally:
3513 fp.close()
3518 fp.close()
3514 return self.pathto(fp.name[len(self.root) + 1 :])
3519 return self.pathto(fp.name[len(self.root) + 1 :])
3515
3520
3516 def register_wanted_sidedata(self, category):
3521 def register_wanted_sidedata(self, category):
3517 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3522 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3518 # Only revlogv2 repos can want sidedata.
3523 # Only revlogv2 repos can want sidedata.
3519 return
3524 return
3520 self._wanted_sidedata.add(pycompat.bytestr(category))
3525 self._wanted_sidedata.add(pycompat.bytestr(category))
3521
3526
3522 def register_sidedata_computer(
3527 def register_sidedata_computer(
3523 self, kind, category, keys, computer, flags, replace=False
3528 self, kind, category, keys, computer, flags, replace=False
3524 ):
3529 ):
3525 if kind not in revlogconst.ALL_KINDS:
3530 if kind not in revlogconst.ALL_KINDS:
3526 msg = _(b"unexpected revlog kind '%s'.")
3531 msg = _(b"unexpected revlog kind '%s'.")
3527 raise error.ProgrammingError(msg % kind)
3532 raise error.ProgrammingError(msg % kind)
3528 category = pycompat.bytestr(category)
3533 category = pycompat.bytestr(category)
3529 already_registered = category in self._sidedata_computers.get(kind, [])
3534 already_registered = category in self._sidedata_computers.get(kind, [])
3530 if already_registered and not replace:
3535 if already_registered and not replace:
3531 msg = _(
3536 msg = _(
3532 b"cannot register a sidedata computer twice for category '%s'."
3537 b"cannot register a sidedata computer twice for category '%s'."
3533 )
3538 )
3534 raise error.ProgrammingError(msg % category)
3539 raise error.ProgrammingError(msg % category)
3535 if replace and not already_registered:
3540 if replace and not already_registered:
3536 msg = _(
3541 msg = _(
3537 b"cannot replace a sidedata computer that isn't registered "
3542 b"cannot replace a sidedata computer that isn't registered "
3538 b"for category '%s'."
3543 b"for category '%s'."
3539 )
3544 )
3540 raise error.ProgrammingError(msg % category)
3545 raise error.ProgrammingError(msg % category)
3541 self._sidedata_computers.setdefault(kind, {})
3546 self._sidedata_computers.setdefault(kind, {})
3542 self._sidedata_computers[kind][category] = (keys, computer, flags)
3547 self._sidedata_computers[kind][category] = (keys, computer, flags)
3543
3548
3544
3549
3545 # used to avoid circular references so destructors work
3550 # used to avoid circular references so destructors work
3546 def aftertrans(files):
3551 def aftertrans(files):
3547 renamefiles = [tuple(t) for t in files]
3552 renamefiles = [tuple(t) for t in files]
3548
3553
3549 def a():
3554 def a():
3550 for vfs, src, dest in renamefiles:
3555 for vfs, src, dest in renamefiles:
3551 # if src and dest refer to a same file, vfs.rename is a no-op,
3556 # if src and dest refer to a same file, vfs.rename is a no-op,
3552 # leaving both src and dest on disk. delete dest to make sure
3557 # leaving both src and dest on disk. delete dest to make sure
3553 # the rename couldn't be such a no-op.
3558 # the rename couldn't be such a no-op.
3554 vfs.tryunlink(dest)
3559 vfs.tryunlink(dest)
3555 try:
3560 try:
3556 vfs.rename(src, dest)
3561 vfs.rename(src, dest)
3557 except FileNotFoundError: # journal file does not yet exist
3562 except FileNotFoundError: # journal file does not yet exist
3558 pass
3563 pass
3559
3564
3560 return a
3565 return a
3561
3566
3562
3567
3563 def undoname(fn: bytes) -> bytes:
3568 def undoname(fn: bytes) -> bytes:
3564 base, name = os.path.split(fn)
3569 base, name = os.path.split(fn)
3565 assert name.startswith(b'journal')
3570 assert name.startswith(b'journal')
3566 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3571 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3567
3572
3568
3573
3569 def instance(ui, path: bytes, create, intents=None, createopts=None):
3574 def instance(ui, path: bytes, create, intents=None, createopts=None):
3570 # prevent cyclic import localrepo -> upgrade -> localrepo
3575 # prevent cyclic import localrepo -> upgrade -> localrepo
3571 from . import upgrade
3576 from . import upgrade
3572
3577
3573 localpath = urlutil.urllocalpath(path)
3578 localpath = urlutil.urllocalpath(path)
3574 if create:
3579 if create:
3575 createrepository(ui, localpath, createopts=createopts)
3580 createrepository(ui, localpath, createopts=createopts)
3576
3581
3577 def repo_maker():
3582 def repo_maker():
3578 return makelocalrepository(ui, localpath, intents=intents)
3583 return makelocalrepository(ui, localpath, intents=intents)
3579
3584
3580 repo = repo_maker()
3585 repo = repo_maker()
3581 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3586 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3582 return repo
3587 return repo
3583
3588
3584
3589
3585 def islocal(path: bytes) -> bool:
3590 def islocal(path: bytes) -> bool:
3586 return True
3591 return True
3587
3592
3588
3593
3589 def defaultcreateopts(ui, createopts=None):
3594 def defaultcreateopts(ui, createopts=None):
3590 """Populate the default creation options for a repository.
3595 """Populate the default creation options for a repository.
3591
3596
3592 A dictionary of explicitly requested creation options can be passed
3597 A dictionary of explicitly requested creation options can be passed
3593 in. Missing keys will be populated.
3598 in. Missing keys will be populated.
3594 """
3599 """
3595 createopts = dict(createopts or {})
3600 createopts = dict(createopts or {})
3596
3601
3597 if b'backend' not in createopts:
3602 if b'backend' not in createopts:
3598 # experimental config: storage.new-repo-backend
3603 # experimental config: storage.new-repo-backend
3599 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3604 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3600
3605
3601 return createopts
3606 return createopts
3602
3607
3603
3608
3604 def clone_requirements(ui, createopts, srcrepo):
3609 def clone_requirements(ui, createopts, srcrepo):
3605 """clone the requirements of a local repo for a local clone
3610 """clone the requirements of a local repo for a local clone
3606
3611
3607 The store requirements are unchanged while the working copy requirements
3612 The store requirements are unchanged while the working copy requirements
3608 depends on the configuration
3613 depends on the configuration
3609 """
3614 """
3610 target_requirements = set()
3615 target_requirements = set()
3611 if not srcrepo.requirements:
3616 if not srcrepo.requirements:
3612 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3617 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3613 # with it.
3618 # with it.
3614 return target_requirements
3619 return target_requirements
3615 createopts = defaultcreateopts(ui, createopts=createopts)
3620 createopts = defaultcreateopts(ui, createopts=createopts)
3616 for r in newreporequirements(ui, createopts):
3621 for r in newreporequirements(ui, createopts):
3617 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3622 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3618 target_requirements.add(r)
3623 target_requirements.add(r)
3619
3624
3620 for r in srcrepo.requirements:
3625 for r in srcrepo.requirements:
3621 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3626 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3622 target_requirements.add(r)
3627 target_requirements.add(r)
3623 return target_requirements
3628 return target_requirements
3624
3629
3625
3630
3626 def newreporequirements(ui, createopts):
3631 def newreporequirements(ui, createopts):
3627 """Determine the set of requirements for a new local repository.
3632 """Determine the set of requirements for a new local repository.
3628
3633
3629 Extensions can wrap this function to specify custom requirements for
3634 Extensions can wrap this function to specify custom requirements for
3630 new repositories.
3635 new repositories.
3631 """
3636 """
3632
3637
3633 if b'backend' not in createopts:
3638 if b'backend' not in createopts:
3634 raise error.ProgrammingError(
3639 raise error.ProgrammingError(
3635 b'backend key not present in createopts; '
3640 b'backend key not present in createopts; '
3636 b'was defaultcreateopts() called?'
3641 b'was defaultcreateopts() called?'
3637 )
3642 )
3638
3643
3639 if createopts[b'backend'] != b'revlogv1':
3644 if createopts[b'backend'] != b'revlogv1':
3640 raise error.Abort(
3645 raise error.Abort(
3641 _(
3646 _(
3642 b'unable to determine repository requirements for '
3647 b'unable to determine repository requirements for '
3643 b'storage backend: %s'
3648 b'storage backend: %s'
3644 )
3649 )
3645 % createopts[b'backend']
3650 % createopts[b'backend']
3646 )
3651 )
3647
3652
3648 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3653 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3649 if ui.configbool(b'format', b'usestore'):
3654 if ui.configbool(b'format', b'usestore'):
3650 requirements.add(requirementsmod.STORE_REQUIREMENT)
3655 requirements.add(requirementsmod.STORE_REQUIREMENT)
3651 if ui.configbool(b'format', b'usefncache'):
3656 if ui.configbool(b'format', b'usefncache'):
3652 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3657 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3653 if ui.configbool(b'format', b'dotencode'):
3658 if ui.configbool(b'format', b'dotencode'):
3654 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3659 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3655
3660
3656 compengines = ui.configlist(b'format', b'revlog-compression')
3661 compengines = ui.configlist(b'format', b'revlog-compression')
3657 for compengine in compengines:
3662 for compengine in compengines:
3658 if compengine in util.compengines:
3663 if compengine in util.compengines:
3659 engine = util.compengines[compengine]
3664 engine = util.compengines[compengine]
3660 if engine.available() and engine.revlogheader():
3665 if engine.available() and engine.revlogheader():
3661 break
3666 break
3662 else:
3667 else:
3663 raise error.Abort(
3668 raise error.Abort(
3664 _(
3669 _(
3665 b'compression engines %s defined by '
3670 b'compression engines %s defined by '
3666 b'format.revlog-compression not available'
3671 b'format.revlog-compression not available'
3667 )
3672 )
3668 % b', '.join(b'"%s"' % e for e in compengines),
3673 % b', '.join(b'"%s"' % e for e in compengines),
3669 hint=_(
3674 hint=_(
3670 b'run "hg debuginstall" to list available '
3675 b'run "hg debuginstall" to list available '
3671 b'compression engines'
3676 b'compression engines'
3672 ),
3677 ),
3673 )
3678 )
3674
3679
3675 # zlib is the historical default and doesn't need an explicit requirement.
3680 # zlib is the historical default and doesn't need an explicit requirement.
3676 if compengine == b'zstd':
3681 if compengine == b'zstd':
3677 requirements.add(b'revlog-compression-zstd')
3682 requirements.add(b'revlog-compression-zstd')
3678 elif compengine != b'zlib':
3683 elif compengine != b'zlib':
3679 requirements.add(b'exp-compression-%s' % compengine)
3684 requirements.add(b'exp-compression-%s' % compengine)
3680
3685
3681 if scmutil.gdinitconfig(ui):
3686 if scmutil.gdinitconfig(ui):
3682 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3687 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3683 if ui.configbool(b'format', b'sparse-revlog'):
3688 if ui.configbool(b'format', b'sparse-revlog'):
3684 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3689 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3685
3690
3686 # experimental config: format.use-dirstate-v2
3691 # experimental config: format.use-dirstate-v2
3687 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3692 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3688 if ui.configbool(b'format', b'use-dirstate-v2'):
3693 if ui.configbool(b'format', b'use-dirstate-v2'):
3689 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3694 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3690
3695
3691 # experimental config: format.exp-use-copies-side-data-changeset
3696 # experimental config: format.exp-use-copies-side-data-changeset
3692 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3697 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3693 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3698 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3694 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3699 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3695 if ui.configbool(b'experimental', b'treemanifest'):
3700 if ui.configbool(b'experimental', b'treemanifest'):
3696 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3701 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3697
3702
3698 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3703 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3699 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3704 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3700 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3705 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3701
3706
3702 revlogv2 = ui.config(b'experimental', b'revlogv2')
3707 revlogv2 = ui.config(b'experimental', b'revlogv2')
3703 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3708 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3704 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3709 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3705 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3710 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3706 # experimental config: format.internal-phase
3711 # experimental config: format.internal-phase
3707 if ui.configbool(b'format', b'use-internal-phase'):
3712 if ui.configbool(b'format', b'use-internal-phase'):
3708 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3713 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3709
3714
3710 # experimental config: format.exp-archived-phase
3715 # experimental config: format.exp-archived-phase
3711 if ui.configbool(b'format', b'exp-archived-phase'):
3716 if ui.configbool(b'format', b'exp-archived-phase'):
3712 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3717 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3713
3718
3714 if createopts.get(b'narrowfiles'):
3719 if createopts.get(b'narrowfiles'):
3715 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3720 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3716
3721
3717 if createopts.get(b'lfs'):
3722 if createopts.get(b'lfs'):
3718 requirements.add(b'lfs')
3723 requirements.add(b'lfs')
3719
3724
3720 if ui.configbool(b'format', b'bookmarks-in-store'):
3725 if ui.configbool(b'format', b'bookmarks-in-store'):
3721 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3726 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3722
3727
3723 if ui.configbool(b'format', b'use-persistent-nodemap'):
3728 if ui.configbool(b'format', b'use-persistent-nodemap'):
3724 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3729 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3725
3730
3726 # if share-safe is enabled, let's create the new repository with the new
3731 # if share-safe is enabled, let's create the new repository with the new
3727 # requirement
3732 # requirement
3728 if ui.configbool(b'format', b'use-share-safe'):
3733 if ui.configbool(b'format', b'use-share-safe'):
3729 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3734 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3730
3735
3731 # if we are creating a share-repoΒΉ we have to handle requirement
3736 # if we are creating a share-repoΒΉ we have to handle requirement
3732 # differently.
3737 # differently.
3733 #
3738 #
3734 # [1] (i.e. reusing the store from another repository, just having a
3739 # [1] (i.e. reusing the store from another repository, just having a
3735 # working copy)
3740 # working copy)
3736 if b'sharedrepo' in createopts:
3741 if b'sharedrepo' in createopts:
3737 source_requirements = set(createopts[b'sharedrepo'].requirements)
3742 source_requirements = set(createopts[b'sharedrepo'].requirements)
3738
3743
3739 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3744 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3740 # share to an old school repository, we have to copy the
3745 # share to an old school repository, we have to copy the
3741 # requirements and hope for the best.
3746 # requirements and hope for the best.
3742 requirements = source_requirements
3747 requirements = source_requirements
3743 else:
3748 else:
3744 # We have control on the working copy only, so "copy" the non
3749 # We have control on the working copy only, so "copy" the non
3745 # working copy part over, ignoring previous logic.
3750 # working copy part over, ignoring previous logic.
3746 to_drop = set()
3751 to_drop = set()
3747 for req in requirements:
3752 for req in requirements:
3748 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3753 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3749 continue
3754 continue
3750 if req in source_requirements:
3755 if req in source_requirements:
3751 continue
3756 continue
3752 to_drop.add(req)
3757 to_drop.add(req)
3753 requirements -= to_drop
3758 requirements -= to_drop
3754 requirements |= source_requirements
3759 requirements |= source_requirements
3755
3760
3756 if createopts.get(b'sharedrelative'):
3761 if createopts.get(b'sharedrelative'):
3757 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3762 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3758 else:
3763 else:
3759 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3764 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3760
3765
3761 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3766 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3762 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3767 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3763 msg = _(b"ignoring unknown tracked key version: %d\n")
3768 msg = _(b"ignoring unknown tracked key version: %d\n")
3764 hint = _(
3769 hint = _(
3765 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3770 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3766 )
3771 )
3767 if version != 1:
3772 if version != 1:
3768 ui.warn(msg % version, hint=hint)
3773 ui.warn(msg % version, hint=hint)
3769 else:
3774 else:
3770 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3775 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3771
3776
3772 return requirements
3777 return requirements
3773
3778
3774
3779
3775 def checkrequirementscompat(ui, requirements):
3780 def checkrequirementscompat(ui, requirements):
3776 """Checks compatibility of repository requirements enabled and disabled.
3781 """Checks compatibility of repository requirements enabled and disabled.
3777
3782
3778 Returns a set of requirements which needs to be dropped because dependend
3783 Returns a set of requirements which needs to be dropped because dependend
3779 requirements are not enabled. Also warns users about it"""
3784 requirements are not enabled. Also warns users about it"""
3780
3785
3781 dropped = set()
3786 dropped = set()
3782
3787
3783 if requirementsmod.STORE_REQUIREMENT not in requirements:
3788 if requirementsmod.STORE_REQUIREMENT not in requirements:
3784 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3789 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3785 ui.warn(
3790 ui.warn(
3786 _(
3791 _(
3787 b'ignoring enabled \'format.bookmarks-in-store\' config '
3792 b'ignoring enabled \'format.bookmarks-in-store\' config '
3788 b'beacuse it is incompatible with disabled '
3793 b'beacuse it is incompatible with disabled '
3789 b'\'format.usestore\' config\n'
3794 b'\'format.usestore\' config\n'
3790 )
3795 )
3791 )
3796 )
3792 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3797 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3793
3798
3794 if (
3799 if (
3795 requirementsmod.SHARED_REQUIREMENT in requirements
3800 requirementsmod.SHARED_REQUIREMENT in requirements
3796 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3801 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3797 ):
3802 ):
3798 raise error.Abort(
3803 raise error.Abort(
3799 _(
3804 _(
3800 b"cannot create shared repository as source was created"
3805 b"cannot create shared repository as source was created"
3801 b" with 'format.usestore' config disabled"
3806 b" with 'format.usestore' config disabled"
3802 )
3807 )
3803 )
3808 )
3804
3809
3805 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3810 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3806 if ui.hasconfig(b'format', b'use-share-safe'):
3811 if ui.hasconfig(b'format', b'use-share-safe'):
3807 msg = _(
3812 msg = _(
3808 b"ignoring enabled 'format.use-share-safe' config because "
3813 b"ignoring enabled 'format.use-share-safe' config because "
3809 b"it is incompatible with disabled 'format.usestore'"
3814 b"it is incompatible with disabled 'format.usestore'"
3810 b" config\n"
3815 b" config\n"
3811 )
3816 )
3812 ui.warn(msg)
3817 ui.warn(msg)
3813 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3818 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3814
3819
3815 return dropped
3820 return dropped
3816
3821
3817
3822
3818 def filterknowncreateopts(ui, createopts):
3823 def filterknowncreateopts(ui, createopts):
3819 """Filters a dict of repo creation options against options that are known.
3824 """Filters a dict of repo creation options against options that are known.
3820
3825
3821 Receives a dict of repo creation options and returns a dict of those
3826 Receives a dict of repo creation options and returns a dict of those
3822 options that we don't know how to handle.
3827 options that we don't know how to handle.
3823
3828
3824 This function is called as part of repository creation. If the
3829 This function is called as part of repository creation. If the
3825 returned dict contains any items, repository creation will not
3830 returned dict contains any items, repository creation will not
3826 be allowed, as it means there was a request to create a repository
3831 be allowed, as it means there was a request to create a repository
3827 with options not recognized by loaded code.
3832 with options not recognized by loaded code.
3828
3833
3829 Extensions can wrap this function to filter out creation options
3834 Extensions can wrap this function to filter out creation options
3830 they know how to handle.
3835 they know how to handle.
3831 """
3836 """
3832 known = {
3837 known = {
3833 b'backend',
3838 b'backend',
3834 b'lfs',
3839 b'lfs',
3835 b'narrowfiles',
3840 b'narrowfiles',
3836 b'sharedrepo',
3841 b'sharedrepo',
3837 b'sharedrelative',
3842 b'sharedrelative',
3838 b'shareditems',
3843 b'shareditems',
3839 b'shallowfilestore',
3844 b'shallowfilestore',
3840 }
3845 }
3841
3846
3842 return {k: v for k, v in createopts.items() if k not in known}
3847 return {k: v for k, v in createopts.items() if k not in known}
3843
3848
3844
3849
3845 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3850 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3846 """Create a new repository in a vfs.
3851 """Create a new repository in a vfs.
3847
3852
3848 ``path`` path to the new repo's working directory.
3853 ``path`` path to the new repo's working directory.
3849 ``createopts`` options for the new repository.
3854 ``createopts`` options for the new repository.
3850 ``requirement`` predefined set of requirements.
3855 ``requirement`` predefined set of requirements.
3851 (incompatible with ``createopts``)
3856 (incompatible with ``createopts``)
3852
3857
3853 The following keys for ``createopts`` are recognized:
3858 The following keys for ``createopts`` are recognized:
3854
3859
3855 backend
3860 backend
3856 The storage backend to use.
3861 The storage backend to use.
3857 lfs
3862 lfs
3858 Repository will be created with ``lfs`` requirement. The lfs extension
3863 Repository will be created with ``lfs`` requirement. The lfs extension
3859 will automatically be loaded when the repository is accessed.
3864 will automatically be loaded when the repository is accessed.
3860 narrowfiles
3865 narrowfiles
3861 Set up repository to support narrow file storage.
3866 Set up repository to support narrow file storage.
3862 sharedrepo
3867 sharedrepo
3863 Repository object from which storage should be shared.
3868 Repository object from which storage should be shared.
3864 sharedrelative
3869 sharedrelative
3865 Boolean indicating if the path to the shared repo should be
3870 Boolean indicating if the path to the shared repo should be
3866 stored as relative. By default, the pointer to the "parent" repo
3871 stored as relative. By default, the pointer to the "parent" repo
3867 is stored as an absolute path.
3872 is stored as an absolute path.
3868 shareditems
3873 shareditems
3869 Set of items to share to the new repository (in addition to storage).
3874 Set of items to share to the new repository (in addition to storage).
3870 shallowfilestore
3875 shallowfilestore
3871 Indicates that storage for files should be shallow (not all ancestor
3876 Indicates that storage for files should be shallow (not all ancestor
3872 revisions are known).
3877 revisions are known).
3873 """
3878 """
3874
3879
3875 if requirements is not None:
3880 if requirements is not None:
3876 if createopts is not None:
3881 if createopts is not None:
3877 msg = b'cannot specify both createopts and requirements'
3882 msg = b'cannot specify both createopts and requirements'
3878 raise error.ProgrammingError(msg)
3883 raise error.ProgrammingError(msg)
3879 createopts = {}
3884 createopts = {}
3880 else:
3885 else:
3881 createopts = defaultcreateopts(ui, createopts=createopts)
3886 createopts = defaultcreateopts(ui, createopts=createopts)
3882
3887
3883 unknownopts = filterknowncreateopts(ui, createopts)
3888 unknownopts = filterknowncreateopts(ui, createopts)
3884
3889
3885 if not isinstance(unknownopts, dict):
3890 if not isinstance(unknownopts, dict):
3886 raise error.ProgrammingError(
3891 raise error.ProgrammingError(
3887 b'filterknowncreateopts() did not return a dict'
3892 b'filterknowncreateopts() did not return a dict'
3888 )
3893 )
3889
3894
3890 if unknownopts:
3895 if unknownopts:
3891 raise error.Abort(
3896 raise error.Abort(
3892 _(
3897 _(
3893 b'unable to create repository because of unknown '
3898 b'unable to create repository because of unknown '
3894 b'creation option: %s'
3899 b'creation option: %s'
3895 )
3900 )
3896 % b', '.join(sorted(unknownopts)),
3901 % b', '.join(sorted(unknownopts)),
3897 hint=_(b'is a required extension not loaded?'),
3902 hint=_(b'is a required extension not loaded?'),
3898 )
3903 )
3899
3904
3900 requirements = newreporequirements(ui, createopts=createopts)
3905 requirements = newreporequirements(ui, createopts=createopts)
3901 requirements -= checkrequirementscompat(ui, requirements)
3906 requirements -= checkrequirementscompat(ui, requirements)
3902
3907
3903 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3908 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3904
3909
3905 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3910 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3906 if hgvfs.exists():
3911 if hgvfs.exists():
3907 raise error.RepoError(_(b'repository %s already exists') % path)
3912 raise error.RepoError(_(b'repository %s already exists') % path)
3908
3913
3909 if b'sharedrepo' in createopts:
3914 if b'sharedrepo' in createopts:
3910 sharedpath = createopts[b'sharedrepo'].sharedpath
3915 sharedpath = createopts[b'sharedrepo'].sharedpath
3911
3916
3912 if createopts.get(b'sharedrelative'):
3917 if createopts.get(b'sharedrelative'):
3913 try:
3918 try:
3914 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3919 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3915 sharedpath = util.pconvert(sharedpath)
3920 sharedpath = util.pconvert(sharedpath)
3916 except (IOError, ValueError) as e:
3921 except (IOError, ValueError) as e:
3917 # ValueError is raised on Windows if the drive letters differ
3922 # ValueError is raised on Windows if the drive letters differ
3918 # on each path.
3923 # on each path.
3919 raise error.Abort(
3924 raise error.Abort(
3920 _(b'cannot calculate relative path'),
3925 _(b'cannot calculate relative path'),
3921 hint=stringutil.forcebytestr(e),
3926 hint=stringutil.forcebytestr(e),
3922 )
3927 )
3923
3928
3924 if not wdirvfs.exists():
3929 if not wdirvfs.exists():
3925 wdirvfs.makedirs()
3930 wdirvfs.makedirs()
3926
3931
3927 hgvfs.makedir(notindexed=True)
3932 hgvfs.makedir(notindexed=True)
3928 if b'sharedrepo' not in createopts:
3933 if b'sharedrepo' not in createopts:
3929 hgvfs.mkdir(b'cache')
3934 hgvfs.mkdir(b'cache')
3930 hgvfs.mkdir(b'wcache')
3935 hgvfs.mkdir(b'wcache')
3931
3936
3932 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3937 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3933 if has_store and b'sharedrepo' not in createopts:
3938 if has_store and b'sharedrepo' not in createopts:
3934 hgvfs.mkdir(b'store')
3939 hgvfs.mkdir(b'store')
3935
3940
3936 # We create an invalid changelog outside the store so very old
3941 # We create an invalid changelog outside the store so very old
3937 # Mercurial versions (which didn't know about the requirements
3942 # Mercurial versions (which didn't know about the requirements
3938 # file) encounter an error on reading the changelog. This
3943 # file) encounter an error on reading the changelog. This
3939 # effectively locks out old clients and prevents them from
3944 # effectively locks out old clients and prevents them from
3940 # mucking with a repo in an unknown format.
3945 # mucking with a repo in an unknown format.
3941 #
3946 #
3942 # The revlog header has version 65535, which won't be recognized by
3947 # The revlog header has version 65535, which won't be recognized by
3943 # such old clients.
3948 # such old clients.
3944 hgvfs.append(
3949 hgvfs.append(
3945 b'00changelog.i',
3950 b'00changelog.i',
3946 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3951 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3947 b'layout',
3952 b'layout',
3948 )
3953 )
3949
3954
3950 # Filter the requirements into working copy and store ones
3955 # Filter the requirements into working copy and store ones
3951 wcreq, storereq = scmutil.filterrequirements(requirements)
3956 wcreq, storereq = scmutil.filterrequirements(requirements)
3952 # write working copy ones
3957 # write working copy ones
3953 scmutil.writerequires(hgvfs, wcreq)
3958 scmutil.writerequires(hgvfs, wcreq)
3954 # If there are store requirements and the current repository
3959 # If there are store requirements and the current repository
3955 # is not a shared one, write stored requirements
3960 # is not a shared one, write stored requirements
3956 # For new shared repository, we don't need to write the store
3961 # For new shared repository, we don't need to write the store
3957 # requirements as they are already present in store requires
3962 # requirements as they are already present in store requires
3958 if storereq and b'sharedrepo' not in createopts:
3963 if storereq and b'sharedrepo' not in createopts:
3959 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3964 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3960 scmutil.writerequires(storevfs, storereq)
3965 scmutil.writerequires(storevfs, storereq)
3961
3966
3962 # Write out file telling readers where to find the shared store.
3967 # Write out file telling readers where to find the shared store.
3963 if b'sharedrepo' in createopts:
3968 if b'sharedrepo' in createopts:
3964 hgvfs.write(b'sharedpath', sharedpath)
3969 hgvfs.write(b'sharedpath', sharedpath)
3965
3970
3966 if createopts.get(b'shareditems'):
3971 if createopts.get(b'shareditems'):
3967 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3972 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3968 hgvfs.write(b'shared', shared)
3973 hgvfs.write(b'shared', shared)
3969
3974
3970
3975
3971 def poisonrepository(repo):
3976 def poisonrepository(repo):
3972 """Poison a repository instance so it can no longer be used."""
3977 """Poison a repository instance so it can no longer be used."""
3973 # Perform any cleanup on the instance.
3978 # Perform any cleanup on the instance.
3974 repo.close()
3979 repo.close()
3975
3980
3976 # Our strategy is to replace the type of the object with one that
3981 # Our strategy is to replace the type of the object with one that
3977 # has all attribute lookups result in error.
3982 # has all attribute lookups result in error.
3978 #
3983 #
3979 # But we have to allow the close() method because some constructors
3984 # But we have to allow the close() method because some constructors
3980 # of repos call close() on repo references.
3985 # of repos call close() on repo references.
3981 class poisonedrepository:
3986 class poisonedrepository:
3982 def __getattribute__(self, item):
3987 def __getattribute__(self, item):
3983 if item == 'close':
3988 if item == 'close':
3984 return object.__getattribute__(self, item)
3989 return object.__getattribute__(self, item)
3985
3990
3986 raise error.ProgrammingError(
3991 raise error.ProgrammingError(
3987 b'repo instances should not be used after unshare'
3992 b'repo instances should not be used after unshare'
3988 )
3993 )
3989
3994
3990 def close(self):
3995 def close(self):
3991 pass
3996 pass
3992
3997
3993 # We may have a repoview, which intercepts __setattr__. So be sure
3998 # We may have a repoview, which intercepts __setattr__. So be sure
3994 # we operate at the lowest level possible.
3999 # we operate at the lowest level possible.
3995 object.__setattr__(repo, '__class__', poisonedrepository)
4000 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now