##// END OF EJS Templates
localrepo: add a `currentlock` method...
marmoute -
r51250:69019164 stable
parent child Browse files
Show More
@@ -1,2062 +1,2065 b''
1 # repository.py - Interfaces and base classes for repositories and peers.
1 # repository.py - Interfaces and base classes for repositories and peers.
2 # coding: utf-8
2 # coding: utf-8
3 #
3 #
4 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 from ..i18n import _
10 from ..i18n import _
11 from .. import error
11 from .. import error
12 from . import util as interfaceutil
12 from . import util as interfaceutil
13
13
14 # Local repository feature string.
14 # Local repository feature string.
15
15
16 # Revlogs are being used for file storage.
16 # Revlogs are being used for file storage.
17 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
17 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
18 # The storage part of the repository is shared from an external source.
18 # The storage part of the repository is shared from an external source.
19 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
19 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
20 # LFS supported for backing file storage.
20 # LFS supported for backing file storage.
21 REPO_FEATURE_LFS = b'lfs'
21 REPO_FEATURE_LFS = b'lfs'
22 # Repository supports being stream cloned.
22 # Repository supports being stream cloned.
23 REPO_FEATURE_STREAM_CLONE = b'streamclone'
23 REPO_FEATURE_STREAM_CLONE = b'streamclone'
24 # Repository supports (at least) some sidedata to be stored
24 # Repository supports (at least) some sidedata to be stored
25 REPO_FEATURE_SIDE_DATA = b'side-data'
25 REPO_FEATURE_SIDE_DATA = b'side-data'
26 # Files storage may lack data for all ancestors.
26 # Files storage may lack data for all ancestors.
27 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
27 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
28
28
29 REVISION_FLAG_CENSORED = 1 << 15
29 REVISION_FLAG_CENSORED = 1 << 15
30 REVISION_FLAG_ELLIPSIS = 1 << 14
30 REVISION_FLAG_ELLIPSIS = 1 << 14
31 REVISION_FLAG_EXTSTORED = 1 << 13
31 REVISION_FLAG_EXTSTORED = 1 << 13
32 REVISION_FLAG_HASCOPIESINFO = 1 << 12
32 REVISION_FLAG_HASCOPIESINFO = 1 << 12
33
33
34 REVISION_FLAGS_KNOWN = (
34 REVISION_FLAGS_KNOWN = (
35 REVISION_FLAG_CENSORED
35 REVISION_FLAG_CENSORED
36 | REVISION_FLAG_ELLIPSIS
36 | REVISION_FLAG_ELLIPSIS
37 | REVISION_FLAG_EXTSTORED
37 | REVISION_FLAG_EXTSTORED
38 | REVISION_FLAG_HASCOPIESINFO
38 | REVISION_FLAG_HASCOPIESINFO
39 )
39 )
40
40
41 CG_DELTAMODE_STD = b'default'
41 CG_DELTAMODE_STD = b'default'
42 CG_DELTAMODE_PREV = b'previous'
42 CG_DELTAMODE_PREV = b'previous'
43 CG_DELTAMODE_FULL = b'fulltext'
43 CG_DELTAMODE_FULL = b'fulltext'
44 CG_DELTAMODE_P1 = b'p1'
44 CG_DELTAMODE_P1 = b'p1'
45
45
46
46
47 ## Cache related constants:
47 ## Cache related constants:
48 #
48 #
49 # Used to control which cache should be warmed in a repo.updatecaches(…) call.
49 # Used to control which cache should be warmed in a repo.updatecaches(…) call.
50
50
51 # Warm branchmaps of all known repoview's filter-level
51 # Warm branchmaps of all known repoview's filter-level
52 CACHE_BRANCHMAP_ALL = b"branchmap-all"
52 CACHE_BRANCHMAP_ALL = b"branchmap-all"
53 # Warm branchmaps of repoview's filter-level used by server
53 # Warm branchmaps of repoview's filter-level used by server
54 CACHE_BRANCHMAP_SERVED = b"branchmap-served"
54 CACHE_BRANCHMAP_SERVED = b"branchmap-served"
55 # Warm internal changelog cache (eg: persistent nodemap)
55 # Warm internal changelog cache (eg: persistent nodemap)
56 CACHE_CHANGELOG_CACHE = b"changelog-cache"
56 CACHE_CHANGELOG_CACHE = b"changelog-cache"
57 # Warm full manifest cache
57 # Warm full manifest cache
58 CACHE_FULL_MANIFEST = b"full-manifest"
58 CACHE_FULL_MANIFEST = b"full-manifest"
59 # Warm file-node-tags cache
59 # Warm file-node-tags cache
60 CACHE_FILE_NODE_TAGS = b"file-node-tags"
60 CACHE_FILE_NODE_TAGS = b"file-node-tags"
61 # Warm internal manifestlog cache (eg: persistent nodemap)
61 # Warm internal manifestlog cache (eg: persistent nodemap)
62 CACHE_MANIFESTLOG_CACHE = b"manifestlog-cache"
62 CACHE_MANIFESTLOG_CACHE = b"manifestlog-cache"
63 # Warn rev branch cache
63 # Warn rev branch cache
64 CACHE_REV_BRANCH = b"rev-branch-cache"
64 CACHE_REV_BRANCH = b"rev-branch-cache"
65 # Warm tags' cache for default repoview'
65 # Warm tags' cache for default repoview'
66 CACHE_TAGS_DEFAULT = b"tags-default"
66 CACHE_TAGS_DEFAULT = b"tags-default"
67 # Warm tags' cache for repoview's filter-level used by server
67 # Warm tags' cache for repoview's filter-level used by server
68 CACHE_TAGS_SERVED = b"tags-served"
68 CACHE_TAGS_SERVED = b"tags-served"
69
69
70 # the cache to warm by default after a simple transaction
70 # the cache to warm by default after a simple transaction
71 # (this is a mutable set to let extension update it)
71 # (this is a mutable set to let extension update it)
72 CACHES_DEFAULT = {
72 CACHES_DEFAULT = {
73 CACHE_BRANCHMAP_SERVED,
73 CACHE_BRANCHMAP_SERVED,
74 }
74 }
75
75
76 # the caches to warm when warming all of them
76 # the caches to warm when warming all of them
77 # (this is a mutable set to let extension update it)
77 # (this is a mutable set to let extension update it)
78 CACHES_ALL = {
78 CACHES_ALL = {
79 CACHE_BRANCHMAP_SERVED,
79 CACHE_BRANCHMAP_SERVED,
80 CACHE_BRANCHMAP_ALL,
80 CACHE_BRANCHMAP_ALL,
81 CACHE_CHANGELOG_CACHE,
81 CACHE_CHANGELOG_CACHE,
82 CACHE_FILE_NODE_TAGS,
82 CACHE_FILE_NODE_TAGS,
83 CACHE_FULL_MANIFEST,
83 CACHE_FULL_MANIFEST,
84 CACHE_MANIFESTLOG_CACHE,
84 CACHE_MANIFESTLOG_CACHE,
85 CACHE_TAGS_DEFAULT,
85 CACHE_TAGS_DEFAULT,
86 CACHE_TAGS_SERVED,
86 CACHE_TAGS_SERVED,
87 }
87 }
88
88
89 # the cache to warm by default on simple call
89 # the cache to warm by default on simple call
90 # (this is a mutable set to let extension update it)
90 # (this is a mutable set to let extension update it)
91 CACHES_POST_CLONE = CACHES_ALL.copy()
91 CACHES_POST_CLONE = CACHES_ALL.copy()
92 CACHES_POST_CLONE.discard(CACHE_FILE_NODE_TAGS)
92 CACHES_POST_CLONE.discard(CACHE_FILE_NODE_TAGS)
93
93
94
94
95 class ipeerconnection(interfaceutil.Interface):
95 class ipeerconnection(interfaceutil.Interface):
96 """Represents a "connection" to a repository.
96 """Represents a "connection" to a repository.
97
97
98 This is the base interface for representing a connection to a repository.
98 This is the base interface for representing a connection to a repository.
99 It holds basic properties and methods applicable to all peer types.
99 It holds basic properties and methods applicable to all peer types.
100
100
101 This is not a complete interface definition and should not be used
101 This is not a complete interface definition and should not be used
102 outside of this module.
102 outside of this module.
103 """
103 """
104
104
105 ui = interfaceutil.Attribute("""ui.ui instance""")
105 ui = interfaceutil.Attribute("""ui.ui instance""")
106 path = interfaceutil.Attribute("""a urlutil.path instance or None""")
106 path = interfaceutil.Attribute("""a urlutil.path instance or None""")
107
107
108 def url():
108 def url():
109 """Returns a URL string representing this peer.
109 """Returns a URL string representing this peer.
110
110
111 Currently, implementations expose the raw URL used to construct the
111 Currently, implementations expose the raw URL used to construct the
112 instance. It may contain credentials as part of the URL. The
112 instance. It may contain credentials as part of the URL. The
113 expectations of the value aren't well-defined and this could lead to
113 expectations of the value aren't well-defined and this could lead to
114 data leakage.
114 data leakage.
115
115
116 TODO audit/clean consumers and more clearly define the contents of this
116 TODO audit/clean consumers and more clearly define the contents of this
117 value.
117 value.
118 """
118 """
119
119
120 def local():
120 def local():
121 """Returns a local repository instance.
121 """Returns a local repository instance.
122
122
123 If the peer represents a local repository, returns an object that
123 If the peer represents a local repository, returns an object that
124 can be used to interface with it. Otherwise returns ``None``.
124 can be used to interface with it. Otherwise returns ``None``.
125 """
125 """
126
126
127 def canpush():
127 def canpush():
128 """Returns a boolean indicating if this peer can be pushed to."""
128 """Returns a boolean indicating if this peer can be pushed to."""
129
129
130 def close():
130 def close():
131 """Close the connection to this peer.
131 """Close the connection to this peer.
132
132
133 This is called when the peer will no longer be used. Resources
133 This is called when the peer will no longer be used. Resources
134 associated with the peer should be cleaned up.
134 associated with the peer should be cleaned up.
135 """
135 """
136
136
137
137
138 class ipeercapabilities(interfaceutil.Interface):
138 class ipeercapabilities(interfaceutil.Interface):
139 """Peer sub-interface related to capabilities."""
139 """Peer sub-interface related to capabilities."""
140
140
141 def capable(name):
141 def capable(name):
142 """Determine support for a named capability.
142 """Determine support for a named capability.
143
143
144 Returns ``False`` if capability not supported.
144 Returns ``False`` if capability not supported.
145
145
146 Returns ``True`` if boolean capability is supported. Returns a string
146 Returns ``True`` if boolean capability is supported. Returns a string
147 if capability support is non-boolean.
147 if capability support is non-boolean.
148
148
149 Capability strings may or may not map to wire protocol capabilities.
149 Capability strings may or may not map to wire protocol capabilities.
150 """
150 """
151
151
152 def requirecap(name, purpose):
152 def requirecap(name, purpose):
153 """Require a capability to be present.
153 """Require a capability to be present.
154
154
155 Raises a ``CapabilityError`` if the capability isn't present.
155 Raises a ``CapabilityError`` if the capability isn't present.
156 """
156 """
157
157
158
158
159 class ipeercommands(interfaceutil.Interface):
159 class ipeercommands(interfaceutil.Interface):
160 """Client-side interface for communicating over the wire protocol.
160 """Client-side interface for communicating over the wire protocol.
161
161
162 This interface is used as a gateway to the Mercurial wire protocol.
162 This interface is used as a gateway to the Mercurial wire protocol.
163 methods commonly call wire protocol commands of the same name.
163 methods commonly call wire protocol commands of the same name.
164 """
164 """
165
165
166 def branchmap():
166 def branchmap():
167 """Obtain heads in named branches.
167 """Obtain heads in named branches.
168
168
169 Returns a dict mapping branch name to an iterable of nodes that are
169 Returns a dict mapping branch name to an iterable of nodes that are
170 heads on that branch.
170 heads on that branch.
171 """
171 """
172
172
173 def capabilities():
173 def capabilities():
174 """Obtain capabilities of the peer.
174 """Obtain capabilities of the peer.
175
175
176 Returns a set of string capabilities.
176 Returns a set of string capabilities.
177 """
177 """
178
178
179 def clonebundles():
179 def clonebundles():
180 """Obtains the clone bundles manifest for the repo.
180 """Obtains the clone bundles manifest for the repo.
181
181
182 Returns the manifest as unparsed bytes.
182 Returns the manifest as unparsed bytes.
183 """
183 """
184
184
185 def debugwireargs(one, two, three=None, four=None, five=None):
185 def debugwireargs(one, two, three=None, four=None, five=None):
186 """Used to facilitate debugging of arguments passed over the wire."""
186 """Used to facilitate debugging of arguments passed over the wire."""
187
187
188 def getbundle(source, **kwargs):
188 def getbundle(source, **kwargs):
189 """Obtain remote repository data as a bundle.
189 """Obtain remote repository data as a bundle.
190
190
191 This command is how the bulk of repository data is transferred from
191 This command is how the bulk of repository data is transferred from
192 the peer to the local repository
192 the peer to the local repository
193
193
194 Returns a generator of bundle data.
194 Returns a generator of bundle data.
195 """
195 """
196
196
197 def heads():
197 def heads():
198 """Determine all known head revisions in the peer.
198 """Determine all known head revisions in the peer.
199
199
200 Returns an iterable of binary nodes.
200 Returns an iterable of binary nodes.
201 """
201 """
202
202
203 def known(nodes):
203 def known(nodes):
204 """Determine whether multiple nodes are known.
204 """Determine whether multiple nodes are known.
205
205
206 Accepts an iterable of nodes whose presence to check for.
206 Accepts an iterable of nodes whose presence to check for.
207
207
208 Returns an iterable of booleans indicating of the corresponding node
208 Returns an iterable of booleans indicating of the corresponding node
209 at that index is known to the peer.
209 at that index is known to the peer.
210 """
210 """
211
211
212 def listkeys(namespace):
212 def listkeys(namespace):
213 """Obtain all keys in a pushkey namespace.
213 """Obtain all keys in a pushkey namespace.
214
214
215 Returns an iterable of key names.
215 Returns an iterable of key names.
216 """
216 """
217
217
218 def lookup(key):
218 def lookup(key):
219 """Resolve a value to a known revision.
219 """Resolve a value to a known revision.
220
220
221 Returns a binary node of the resolved revision on success.
221 Returns a binary node of the resolved revision on success.
222 """
222 """
223
223
224 def pushkey(namespace, key, old, new):
224 def pushkey(namespace, key, old, new):
225 """Set a value using the ``pushkey`` protocol.
225 """Set a value using the ``pushkey`` protocol.
226
226
227 Arguments correspond to the pushkey namespace and key to operate on and
227 Arguments correspond to the pushkey namespace and key to operate on and
228 the old and new values for that key.
228 the old and new values for that key.
229
229
230 Returns a string with the peer result. The value inside varies by the
230 Returns a string with the peer result. The value inside varies by the
231 namespace.
231 namespace.
232 """
232 """
233
233
234 def stream_out():
234 def stream_out():
235 """Obtain streaming clone data.
235 """Obtain streaming clone data.
236
236
237 Successful result should be a generator of data chunks.
237 Successful result should be a generator of data chunks.
238 """
238 """
239
239
240 def unbundle(bundle, heads, url):
240 def unbundle(bundle, heads, url):
241 """Transfer repository data to the peer.
241 """Transfer repository data to the peer.
242
242
243 This is how the bulk of data during a push is transferred.
243 This is how the bulk of data during a push is transferred.
244
244
245 Returns the integer number of heads added to the peer.
245 Returns the integer number of heads added to the peer.
246 """
246 """
247
247
248
248
249 class ipeerlegacycommands(interfaceutil.Interface):
249 class ipeerlegacycommands(interfaceutil.Interface):
250 """Interface for implementing support for legacy wire protocol commands.
250 """Interface for implementing support for legacy wire protocol commands.
251
251
252 Wire protocol commands transition to legacy status when they are no longer
252 Wire protocol commands transition to legacy status when they are no longer
253 used by modern clients. To facilitate identifying which commands are
253 used by modern clients. To facilitate identifying which commands are
254 legacy, the interfaces are split.
254 legacy, the interfaces are split.
255 """
255 """
256
256
257 def between(pairs):
257 def between(pairs):
258 """Obtain nodes between pairs of nodes.
258 """Obtain nodes between pairs of nodes.
259
259
260 ``pairs`` is an iterable of node pairs.
260 ``pairs`` is an iterable of node pairs.
261
261
262 Returns an iterable of iterables of nodes corresponding to each
262 Returns an iterable of iterables of nodes corresponding to each
263 requested pair.
263 requested pair.
264 """
264 """
265
265
266 def branches(nodes):
266 def branches(nodes):
267 """Obtain ancestor changesets of specific nodes back to a branch point.
267 """Obtain ancestor changesets of specific nodes back to a branch point.
268
268
269 For each requested node, the peer finds the first ancestor node that is
269 For each requested node, the peer finds the first ancestor node that is
270 a DAG root or is a merge.
270 a DAG root or is a merge.
271
271
272 Returns an iterable of iterables with the resolved values for each node.
272 Returns an iterable of iterables with the resolved values for each node.
273 """
273 """
274
274
275 def changegroup(nodes, source):
275 def changegroup(nodes, source):
276 """Obtain a changegroup with data for descendants of specified nodes."""
276 """Obtain a changegroup with data for descendants of specified nodes."""
277
277
278 def changegroupsubset(bases, heads, source):
278 def changegroupsubset(bases, heads, source):
279 pass
279 pass
280
280
281
281
282 class ipeercommandexecutor(interfaceutil.Interface):
282 class ipeercommandexecutor(interfaceutil.Interface):
283 """Represents a mechanism to execute remote commands.
283 """Represents a mechanism to execute remote commands.
284
284
285 This is the primary interface for requesting that wire protocol commands
285 This is the primary interface for requesting that wire protocol commands
286 be executed. Instances of this interface are active in a context manager
286 be executed. Instances of this interface are active in a context manager
287 and have a well-defined lifetime. When the context manager exits, all
287 and have a well-defined lifetime. When the context manager exits, all
288 outstanding requests are waited on.
288 outstanding requests are waited on.
289 """
289 """
290
290
291 def callcommand(name, args):
291 def callcommand(name, args):
292 """Request that a named command be executed.
292 """Request that a named command be executed.
293
293
294 Receives the command name and a dictionary of command arguments.
294 Receives the command name and a dictionary of command arguments.
295
295
296 Returns a ``concurrent.futures.Future`` that will resolve to the
296 Returns a ``concurrent.futures.Future`` that will resolve to the
297 result of that command request. That exact value is left up to
297 result of that command request. That exact value is left up to
298 the implementation and possibly varies by command.
298 the implementation and possibly varies by command.
299
299
300 Not all commands can coexist with other commands in an executor
300 Not all commands can coexist with other commands in an executor
301 instance: it depends on the underlying wire protocol transport being
301 instance: it depends on the underlying wire protocol transport being
302 used and the command itself.
302 used and the command itself.
303
303
304 Implementations MAY call ``sendcommands()`` automatically if the
304 Implementations MAY call ``sendcommands()`` automatically if the
305 requested command can not coexist with other commands in this executor.
305 requested command can not coexist with other commands in this executor.
306
306
307 Implementations MAY call ``sendcommands()`` automatically when the
307 Implementations MAY call ``sendcommands()`` automatically when the
308 future's ``result()`` is called. So, consumers using multiple
308 future's ``result()`` is called. So, consumers using multiple
309 commands with an executor MUST ensure that ``result()`` is not called
309 commands with an executor MUST ensure that ``result()`` is not called
310 until all command requests have been issued.
310 until all command requests have been issued.
311 """
311 """
312
312
313 def sendcommands():
313 def sendcommands():
314 """Trigger submission of queued command requests.
314 """Trigger submission of queued command requests.
315
315
316 Not all transports submit commands as soon as they are requested to
316 Not all transports submit commands as soon as they are requested to
317 run. When called, this method forces queued command requests to be
317 run. When called, this method forces queued command requests to be
318 issued. It will no-op if all commands have already been sent.
318 issued. It will no-op if all commands have already been sent.
319
319
320 When called, no more new commands may be issued with this executor.
320 When called, no more new commands may be issued with this executor.
321 """
321 """
322
322
323 def close():
323 def close():
324 """Signal that this command request is finished.
324 """Signal that this command request is finished.
325
325
326 When called, no more new commands may be issued. All outstanding
326 When called, no more new commands may be issued. All outstanding
327 commands that have previously been issued are waited on before
327 commands that have previously been issued are waited on before
328 returning. This not only includes waiting for the futures to resolve,
328 returning. This not only includes waiting for the futures to resolve,
329 but also waiting for all response data to arrive. In other words,
329 but also waiting for all response data to arrive. In other words,
330 calling this waits for all on-wire state for issued command requests
330 calling this waits for all on-wire state for issued command requests
331 to finish.
331 to finish.
332
332
333 When used as a context manager, this method is called when exiting the
333 When used as a context manager, this method is called when exiting the
334 context manager.
334 context manager.
335
335
336 This method may call ``sendcommands()`` if there are buffered commands.
336 This method may call ``sendcommands()`` if there are buffered commands.
337 """
337 """
338
338
339
339
340 class ipeerrequests(interfaceutil.Interface):
340 class ipeerrequests(interfaceutil.Interface):
341 """Interface for executing commands on a peer."""
341 """Interface for executing commands on a peer."""
342
342
343 limitedarguments = interfaceutil.Attribute(
343 limitedarguments = interfaceutil.Attribute(
344 """True if the peer cannot receive large argument value for commands."""
344 """True if the peer cannot receive large argument value for commands."""
345 )
345 )
346
346
347 def commandexecutor():
347 def commandexecutor():
348 """A context manager that resolves to an ipeercommandexecutor.
348 """A context manager that resolves to an ipeercommandexecutor.
349
349
350 The object this resolves to can be used to issue command requests
350 The object this resolves to can be used to issue command requests
351 to the peer.
351 to the peer.
352
352
353 Callers should call its ``callcommand`` method to issue command
353 Callers should call its ``callcommand`` method to issue command
354 requests.
354 requests.
355
355
356 A new executor should be obtained for each distinct set of commands
356 A new executor should be obtained for each distinct set of commands
357 (possibly just a single command) that the consumer wants to execute
357 (possibly just a single command) that the consumer wants to execute
358 as part of a single operation or round trip. This is because some
358 as part of a single operation or round trip. This is because some
359 peers are half-duplex and/or don't support persistent connections.
359 peers are half-duplex and/or don't support persistent connections.
360 e.g. in the case of HTTP peers, commands sent to an executor represent
360 e.g. in the case of HTTP peers, commands sent to an executor represent
361 a single HTTP request. While some peers may support multiple command
361 a single HTTP request. While some peers may support multiple command
362 sends over the wire per executor, consumers need to code to the least
362 sends over the wire per executor, consumers need to code to the least
363 capable peer. So it should be assumed that command executors buffer
363 capable peer. So it should be assumed that command executors buffer
364 called commands until they are told to send them and that each
364 called commands until they are told to send them and that each
365 command executor could result in a new connection or wire-level request
365 command executor could result in a new connection or wire-level request
366 being issued.
366 being issued.
367 """
367 """
368
368
369
369
370 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
370 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
371 """Unified interface for peer repositories.
371 """Unified interface for peer repositories.
372
372
373 All peer instances must conform to this interface.
373 All peer instances must conform to this interface.
374 """
374 """
375
375
376
376
377 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
377 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
378 """Unified peer interface for wire protocol version 2 peers."""
378 """Unified peer interface for wire protocol version 2 peers."""
379
379
380 apidescriptor = interfaceutil.Attribute(
380 apidescriptor = interfaceutil.Attribute(
381 """Data structure holding description of server API."""
381 """Data structure holding description of server API."""
382 )
382 )
383
383
384
384
385 @interfaceutil.implementer(ipeerbase)
385 @interfaceutil.implementer(ipeerbase)
386 class peer:
386 class peer:
387 """Base class for peer repositories."""
387 """Base class for peer repositories."""
388
388
389 limitedarguments = False
389 limitedarguments = False
390
390
391 def __init__(self, ui, path=None):
391 def __init__(self, ui, path=None):
392 self.ui = ui
392 self.ui = ui
393 self.path = path
393 self.path = path
394
394
395 def capable(self, name):
395 def capable(self, name):
396 caps = self.capabilities()
396 caps = self.capabilities()
397 if name in caps:
397 if name in caps:
398 return True
398 return True
399
399
400 name = b'%s=' % name
400 name = b'%s=' % name
401 for cap in caps:
401 for cap in caps:
402 if cap.startswith(name):
402 if cap.startswith(name):
403 return cap[len(name) :]
403 return cap[len(name) :]
404
404
405 return False
405 return False
406
406
407 def requirecap(self, name, purpose):
407 def requirecap(self, name, purpose):
408 if self.capable(name):
408 if self.capable(name):
409 return
409 return
410
410
411 raise error.CapabilityError(
411 raise error.CapabilityError(
412 _(
412 _(
413 b'cannot %s; remote repository does not support the '
413 b'cannot %s; remote repository does not support the '
414 b'\'%s\' capability'
414 b'\'%s\' capability'
415 )
415 )
416 % (purpose, name)
416 % (purpose, name)
417 )
417 )
418
418
419
419
420 class iverifyproblem(interfaceutil.Interface):
420 class iverifyproblem(interfaceutil.Interface):
421 """Represents a problem with the integrity of the repository.
421 """Represents a problem with the integrity of the repository.
422
422
423 Instances of this interface are emitted to describe an integrity issue
423 Instances of this interface are emitted to describe an integrity issue
424 with a repository (e.g. corrupt storage, missing data, etc).
424 with a repository (e.g. corrupt storage, missing data, etc).
425
425
426 Instances are essentially messages associated with severity.
426 Instances are essentially messages associated with severity.
427 """
427 """
428
428
429 warning = interfaceutil.Attribute(
429 warning = interfaceutil.Attribute(
430 """Message indicating a non-fatal problem."""
430 """Message indicating a non-fatal problem."""
431 )
431 )
432
432
433 error = interfaceutil.Attribute("""Message indicating a fatal problem.""")
433 error = interfaceutil.Attribute("""Message indicating a fatal problem.""")
434
434
435 node = interfaceutil.Attribute(
435 node = interfaceutil.Attribute(
436 """Revision encountering the problem.
436 """Revision encountering the problem.
437
437
438 ``None`` means the problem doesn't apply to a single revision.
438 ``None`` means the problem doesn't apply to a single revision.
439 """
439 """
440 )
440 )
441
441
442
442
443 class irevisiondelta(interfaceutil.Interface):
443 class irevisiondelta(interfaceutil.Interface):
444 """Represents a delta between one revision and another.
444 """Represents a delta between one revision and another.
445
445
446 Instances convey enough information to allow a revision to be exchanged
446 Instances convey enough information to allow a revision to be exchanged
447 with another repository.
447 with another repository.
448
448
449 Instances represent the fulltext revision data or a delta against
449 Instances represent the fulltext revision data or a delta against
450 another revision. Therefore the ``revision`` and ``delta`` attributes
450 another revision. Therefore the ``revision`` and ``delta`` attributes
451 are mutually exclusive.
451 are mutually exclusive.
452
452
453 Typically used for changegroup generation.
453 Typically used for changegroup generation.
454 """
454 """
455
455
456 node = interfaceutil.Attribute("""20 byte node of this revision.""")
456 node = interfaceutil.Attribute("""20 byte node of this revision.""")
457
457
458 p1node = interfaceutil.Attribute(
458 p1node = interfaceutil.Attribute(
459 """20 byte node of 1st parent of this revision."""
459 """20 byte node of 1st parent of this revision."""
460 )
460 )
461
461
462 p2node = interfaceutil.Attribute(
462 p2node = interfaceutil.Attribute(
463 """20 byte node of 2nd parent of this revision."""
463 """20 byte node of 2nd parent of this revision."""
464 )
464 )
465
465
466 linknode = interfaceutil.Attribute(
466 linknode = interfaceutil.Attribute(
467 """20 byte node of the changelog revision this node is linked to."""
467 """20 byte node of the changelog revision this node is linked to."""
468 )
468 )
469
469
470 flags = interfaceutil.Attribute(
470 flags = interfaceutil.Attribute(
471 """2 bytes of integer flags that apply to this revision.
471 """2 bytes of integer flags that apply to this revision.
472
472
473 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
473 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
474 """
474 """
475 )
475 )
476
476
477 basenode = interfaceutil.Attribute(
477 basenode = interfaceutil.Attribute(
478 """20 byte node of the revision this data is a delta against.
478 """20 byte node of the revision this data is a delta against.
479
479
480 ``nullid`` indicates that the revision is a full revision and not
480 ``nullid`` indicates that the revision is a full revision and not
481 a delta.
481 a delta.
482 """
482 """
483 )
483 )
484
484
485 baserevisionsize = interfaceutil.Attribute(
485 baserevisionsize = interfaceutil.Attribute(
486 """Size of base revision this delta is against.
486 """Size of base revision this delta is against.
487
487
488 May be ``None`` if ``basenode`` is ``nullid``.
488 May be ``None`` if ``basenode`` is ``nullid``.
489 """
489 """
490 )
490 )
491
491
492 revision = interfaceutil.Attribute(
492 revision = interfaceutil.Attribute(
493 """Raw fulltext of revision data for this node."""
493 """Raw fulltext of revision data for this node."""
494 )
494 )
495
495
496 delta = interfaceutil.Attribute(
496 delta = interfaceutil.Attribute(
497 """Delta between ``basenode`` and ``node``.
497 """Delta between ``basenode`` and ``node``.
498
498
499 Stored in the bdiff delta format.
499 Stored in the bdiff delta format.
500 """
500 """
501 )
501 )
502
502
503 sidedata = interfaceutil.Attribute(
503 sidedata = interfaceutil.Attribute(
504 """Raw sidedata bytes for the given revision."""
504 """Raw sidedata bytes for the given revision."""
505 )
505 )
506
506
507 protocol_flags = interfaceutil.Attribute(
507 protocol_flags = interfaceutil.Attribute(
508 """Single byte of integer flags that can influence the protocol.
508 """Single byte of integer flags that can influence the protocol.
509
509
510 This is a bitwise composition of the ``storageutil.CG_FLAG*`` constants.
510 This is a bitwise composition of the ``storageutil.CG_FLAG*`` constants.
511 """
511 """
512 )
512 )
513
513
514
514
515 class ifilerevisionssequence(interfaceutil.Interface):
515 class ifilerevisionssequence(interfaceutil.Interface):
516 """Contains index data for all revisions of a file.
516 """Contains index data for all revisions of a file.
517
517
518 Types implementing this behave like lists of tuples. The index
518 Types implementing this behave like lists of tuples. The index
519 in the list corresponds to the revision number. The values contain
519 in the list corresponds to the revision number. The values contain
520 index metadata.
520 index metadata.
521
521
522 The *null* revision (revision number -1) is always the last item
522 The *null* revision (revision number -1) is always the last item
523 in the index.
523 in the index.
524 """
524 """
525
525
526 def __len__():
526 def __len__():
527 """The total number of revisions."""
527 """The total number of revisions."""
528
528
529 def __getitem__(rev):
529 def __getitem__(rev):
530 """Returns the object having a specific revision number.
530 """Returns the object having a specific revision number.
531
531
532 Returns an 8-tuple with the following fields:
532 Returns an 8-tuple with the following fields:
533
533
534 offset+flags
534 offset+flags
535 Contains the offset and flags for the revision. 64-bit unsigned
535 Contains the offset and flags for the revision. 64-bit unsigned
536 integer where first 6 bytes are the offset and the next 2 bytes
536 integer where first 6 bytes are the offset and the next 2 bytes
537 are flags. The offset can be 0 if it is not used by the store.
537 are flags. The offset can be 0 if it is not used by the store.
538 compressed size
538 compressed size
539 Size of the revision data in the store. It can be 0 if it isn't
539 Size of the revision data in the store. It can be 0 if it isn't
540 needed by the store.
540 needed by the store.
541 uncompressed size
541 uncompressed size
542 Fulltext size. It can be 0 if it isn't needed by the store.
542 Fulltext size. It can be 0 if it isn't needed by the store.
543 base revision
543 base revision
544 Revision number of revision the delta for storage is encoded
544 Revision number of revision the delta for storage is encoded
545 against. -1 indicates not encoded against a base revision.
545 against. -1 indicates not encoded against a base revision.
546 link revision
546 link revision
547 Revision number of changelog revision this entry is related to.
547 Revision number of changelog revision this entry is related to.
548 p1 revision
548 p1 revision
549 Revision number of 1st parent. -1 if no 1st parent.
549 Revision number of 1st parent. -1 if no 1st parent.
550 p2 revision
550 p2 revision
551 Revision number of 2nd parent. -1 if no 1st parent.
551 Revision number of 2nd parent. -1 if no 1st parent.
552 node
552 node
553 Binary node value for this revision number.
553 Binary node value for this revision number.
554
554
555 Negative values should index off the end of the sequence. ``-1``
555 Negative values should index off the end of the sequence. ``-1``
556 should return the null revision. ``-2`` should return the most
556 should return the null revision. ``-2`` should return the most
557 recent revision.
557 recent revision.
558 """
558 """
559
559
560 def __contains__(rev):
560 def __contains__(rev):
561 """Whether a revision number exists."""
561 """Whether a revision number exists."""
562
562
563 def insert(self, i, entry):
563 def insert(self, i, entry):
564 """Add an item to the index at specific revision."""
564 """Add an item to the index at specific revision."""
565
565
566
566
567 class ifileindex(interfaceutil.Interface):
567 class ifileindex(interfaceutil.Interface):
568 """Storage interface for index data of a single file.
568 """Storage interface for index data of a single file.
569
569
570 File storage data is divided into index metadata and data storage.
570 File storage data is divided into index metadata and data storage.
571 This interface defines the index portion of the interface.
571 This interface defines the index portion of the interface.
572
572
573 The index logically consists of:
573 The index logically consists of:
574
574
575 * A mapping between revision numbers and nodes.
575 * A mapping between revision numbers and nodes.
576 * DAG data (storing and querying the relationship between nodes).
576 * DAG data (storing and querying the relationship between nodes).
577 * Metadata to facilitate storage.
577 * Metadata to facilitate storage.
578 """
578 """
579
579
580 nullid = interfaceutil.Attribute(
580 nullid = interfaceutil.Attribute(
581 """node for the null revision for use as delta base."""
581 """node for the null revision for use as delta base."""
582 )
582 )
583
583
584 def __len__():
584 def __len__():
585 """Obtain the number of revisions stored for this file."""
585 """Obtain the number of revisions stored for this file."""
586
586
587 def __iter__():
587 def __iter__():
588 """Iterate over revision numbers for this file."""
588 """Iterate over revision numbers for this file."""
589
589
590 def hasnode(node):
590 def hasnode(node):
591 """Returns a bool indicating if a node is known to this store.
591 """Returns a bool indicating if a node is known to this store.
592
592
593 Implementations must only return True for full, binary node values:
593 Implementations must only return True for full, binary node values:
594 hex nodes, revision numbers, and partial node matches must be
594 hex nodes, revision numbers, and partial node matches must be
595 rejected.
595 rejected.
596
596
597 The null node is never present.
597 The null node is never present.
598 """
598 """
599
599
600 def revs(start=0, stop=None):
600 def revs(start=0, stop=None):
601 """Iterate over revision numbers for this file, with control."""
601 """Iterate over revision numbers for this file, with control."""
602
602
603 def parents(node):
603 def parents(node):
604 """Returns a 2-tuple of parent nodes for a revision.
604 """Returns a 2-tuple of parent nodes for a revision.
605
605
606 Values will be ``nullid`` if the parent is empty.
606 Values will be ``nullid`` if the parent is empty.
607 """
607 """
608
608
609 def parentrevs(rev):
609 def parentrevs(rev):
610 """Like parents() but operates on revision numbers."""
610 """Like parents() but operates on revision numbers."""
611
611
612 def rev(node):
612 def rev(node):
613 """Obtain the revision number given a node.
613 """Obtain the revision number given a node.
614
614
615 Raises ``error.LookupError`` if the node is not known.
615 Raises ``error.LookupError`` if the node is not known.
616 """
616 """
617
617
618 def node(rev):
618 def node(rev):
619 """Obtain the node value given a revision number.
619 """Obtain the node value given a revision number.
620
620
621 Raises ``IndexError`` if the node is not known.
621 Raises ``IndexError`` if the node is not known.
622 """
622 """
623
623
624 def lookup(node):
624 def lookup(node):
625 """Attempt to resolve a value to a node.
625 """Attempt to resolve a value to a node.
626
626
627 Value can be a binary node, hex node, revision number, or a string
627 Value can be a binary node, hex node, revision number, or a string
628 that can be converted to an integer.
628 that can be converted to an integer.
629
629
630 Raises ``error.LookupError`` if a node could not be resolved.
630 Raises ``error.LookupError`` if a node could not be resolved.
631 """
631 """
632
632
633 def linkrev(rev):
633 def linkrev(rev):
634 """Obtain the changeset revision number a revision is linked to."""
634 """Obtain the changeset revision number a revision is linked to."""
635
635
636 def iscensored(rev):
636 def iscensored(rev):
637 """Return whether a revision's content has been censored."""
637 """Return whether a revision's content has been censored."""
638
638
639 def commonancestorsheads(node1, node2):
639 def commonancestorsheads(node1, node2):
640 """Obtain an iterable of nodes containing heads of common ancestors.
640 """Obtain an iterable of nodes containing heads of common ancestors.
641
641
642 See ``ancestor.commonancestorsheads()``.
642 See ``ancestor.commonancestorsheads()``.
643 """
643 """
644
644
645 def descendants(revs):
645 def descendants(revs):
646 """Obtain descendant revision numbers for a set of revision numbers.
646 """Obtain descendant revision numbers for a set of revision numbers.
647
647
648 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
648 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
649 """
649 """
650
650
651 def heads(start=None, stop=None):
651 def heads(start=None, stop=None):
652 """Obtain a list of nodes that are DAG heads, with control.
652 """Obtain a list of nodes that are DAG heads, with control.
653
653
654 The set of revisions examined can be limited by specifying
654 The set of revisions examined can be limited by specifying
655 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
655 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
656 iterable of nodes. DAG traversal starts at earlier revision
656 iterable of nodes. DAG traversal starts at earlier revision
657 ``start`` and iterates forward until any node in ``stop`` is
657 ``start`` and iterates forward until any node in ``stop`` is
658 encountered.
658 encountered.
659 """
659 """
660
660
661 def children(node):
661 def children(node):
662 """Obtain nodes that are children of a node.
662 """Obtain nodes that are children of a node.
663
663
664 Returns a list of nodes.
664 Returns a list of nodes.
665 """
665 """
666
666
667
667
668 class ifiledata(interfaceutil.Interface):
668 class ifiledata(interfaceutil.Interface):
669 """Storage interface for data storage of a specific file.
669 """Storage interface for data storage of a specific file.
670
670
671 This complements ``ifileindex`` and provides an interface for accessing
671 This complements ``ifileindex`` and provides an interface for accessing
672 data for a tracked file.
672 data for a tracked file.
673 """
673 """
674
674
675 def size(rev):
675 def size(rev):
676 """Obtain the fulltext size of file data.
676 """Obtain the fulltext size of file data.
677
677
678 Any metadata is excluded from size measurements.
678 Any metadata is excluded from size measurements.
679 """
679 """
680
680
681 def revision(node, raw=False):
681 def revision(node, raw=False):
682 """Obtain fulltext data for a node.
682 """Obtain fulltext data for a node.
683
683
684 By default, any storage transformations are applied before the data
684 By default, any storage transformations are applied before the data
685 is returned. If ``raw`` is True, non-raw storage transformations
685 is returned. If ``raw`` is True, non-raw storage transformations
686 are not applied.
686 are not applied.
687
687
688 The fulltext data may contain a header containing metadata. Most
688 The fulltext data may contain a header containing metadata. Most
689 consumers should use ``read()`` to obtain the actual file data.
689 consumers should use ``read()`` to obtain the actual file data.
690 """
690 """
691
691
692 def rawdata(node):
692 def rawdata(node):
693 """Obtain raw data for a node."""
693 """Obtain raw data for a node."""
694
694
695 def read(node):
695 def read(node):
696 """Resolve file fulltext data.
696 """Resolve file fulltext data.
697
697
698 This is similar to ``revision()`` except any metadata in the data
698 This is similar to ``revision()`` except any metadata in the data
699 headers is stripped.
699 headers is stripped.
700 """
700 """
701
701
702 def renamed(node):
702 def renamed(node):
703 """Obtain copy metadata for a node.
703 """Obtain copy metadata for a node.
704
704
705 Returns ``False`` if no copy metadata is stored or a 2-tuple of
705 Returns ``False`` if no copy metadata is stored or a 2-tuple of
706 (path, node) from which this revision was copied.
706 (path, node) from which this revision was copied.
707 """
707 """
708
708
709 def cmp(node, fulltext):
709 def cmp(node, fulltext):
710 """Compare fulltext to another revision.
710 """Compare fulltext to another revision.
711
711
712 Returns True if the fulltext is different from what is stored.
712 Returns True if the fulltext is different from what is stored.
713
713
714 This takes copy metadata into account.
714 This takes copy metadata into account.
715
715
716 TODO better document the copy metadata and censoring logic.
716 TODO better document the copy metadata and censoring logic.
717 """
717 """
718
718
719 def emitrevisions(
719 def emitrevisions(
720 nodes,
720 nodes,
721 nodesorder=None,
721 nodesorder=None,
722 revisiondata=False,
722 revisiondata=False,
723 assumehaveparentrevisions=False,
723 assumehaveparentrevisions=False,
724 deltamode=CG_DELTAMODE_STD,
724 deltamode=CG_DELTAMODE_STD,
725 ):
725 ):
726 """Produce ``irevisiondelta`` for revisions.
726 """Produce ``irevisiondelta`` for revisions.
727
727
728 Given an iterable of nodes, emits objects conforming to the
728 Given an iterable of nodes, emits objects conforming to the
729 ``irevisiondelta`` interface that describe revisions in storage.
729 ``irevisiondelta`` interface that describe revisions in storage.
730
730
731 This method is a generator.
731 This method is a generator.
732
732
733 The input nodes may be unordered. Implementations must ensure that a
733 The input nodes may be unordered. Implementations must ensure that a
734 node's parents are emitted before the node itself. Transitively, this
734 node's parents are emitted before the node itself. Transitively, this
735 means that a node may only be emitted once all its ancestors in
735 means that a node may only be emitted once all its ancestors in
736 ``nodes`` have also been emitted.
736 ``nodes`` have also been emitted.
737
737
738 By default, emits "index" data (the ``node``, ``p1node``, and
738 By default, emits "index" data (the ``node``, ``p1node``, and
739 ``p2node`` attributes). If ``revisiondata`` is set, revision data
739 ``p2node`` attributes). If ``revisiondata`` is set, revision data
740 will also be present on the emitted objects.
740 will also be present on the emitted objects.
741
741
742 With default argument values, implementations can choose to emit
742 With default argument values, implementations can choose to emit
743 either fulltext revision data or a delta. When emitting deltas,
743 either fulltext revision data or a delta. When emitting deltas,
744 implementations must consider whether the delta's base revision
744 implementations must consider whether the delta's base revision
745 fulltext is available to the receiver.
745 fulltext is available to the receiver.
746
746
747 The base revision fulltext is guaranteed to be available if any of
747 The base revision fulltext is guaranteed to be available if any of
748 the following are met:
748 the following are met:
749
749
750 * Its fulltext revision was emitted by this method call.
750 * Its fulltext revision was emitted by this method call.
751 * A delta for that revision was emitted by this method call.
751 * A delta for that revision was emitted by this method call.
752 * ``assumehaveparentrevisions`` is True and the base revision is a
752 * ``assumehaveparentrevisions`` is True and the base revision is a
753 parent of the node.
753 parent of the node.
754
754
755 ``nodesorder`` can be used to control the order that revisions are
755 ``nodesorder`` can be used to control the order that revisions are
756 emitted. By default, revisions can be reordered as long as they are
756 emitted. By default, revisions can be reordered as long as they are
757 in DAG topological order (see above). If the value is ``nodes``,
757 in DAG topological order (see above). If the value is ``nodes``,
758 the iteration order from ``nodes`` should be used. If the value is
758 the iteration order from ``nodes`` should be used. If the value is
759 ``storage``, then the native order from the backing storage layer
759 ``storage``, then the native order from the backing storage layer
760 is used. (Not all storage layers will have strong ordering and behavior
760 is used. (Not all storage layers will have strong ordering and behavior
761 of this mode is storage-dependent.) ``nodes`` ordering can force
761 of this mode is storage-dependent.) ``nodes`` ordering can force
762 revisions to be emitted before their ancestors, so consumers should
762 revisions to be emitted before their ancestors, so consumers should
763 use it with care.
763 use it with care.
764
764
765 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
765 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
766 be set and it is the caller's responsibility to resolve it, if needed.
766 be set and it is the caller's responsibility to resolve it, if needed.
767
767
768 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
768 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
769 all revision data should be emitted as deltas against the revision
769 all revision data should be emitted as deltas against the revision
770 emitted just prior. The initial revision should be a delta against its
770 emitted just prior. The initial revision should be a delta against its
771 1st parent.
771 1st parent.
772 """
772 """
773
773
774
774
775 class ifilemutation(interfaceutil.Interface):
775 class ifilemutation(interfaceutil.Interface):
776 """Storage interface for mutation events of a tracked file."""
776 """Storage interface for mutation events of a tracked file."""
777
777
778 def add(filedata, meta, transaction, linkrev, p1, p2):
778 def add(filedata, meta, transaction, linkrev, p1, p2):
779 """Add a new revision to the store.
779 """Add a new revision to the store.
780
780
781 Takes file data, dictionary of metadata, a transaction, linkrev,
781 Takes file data, dictionary of metadata, a transaction, linkrev,
782 and parent nodes.
782 and parent nodes.
783
783
784 Returns the node that was added.
784 Returns the node that was added.
785
785
786 May no-op if a revision matching the supplied data is already stored.
786 May no-op if a revision matching the supplied data is already stored.
787 """
787 """
788
788
789 def addrevision(
789 def addrevision(
790 revisiondata,
790 revisiondata,
791 transaction,
791 transaction,
792 linkrev,
792 linkrev,
793 p1,
793 p1,
794 p2,
794 p2,
795 node=None,
795 node=None,
796 flags=0,
796 flags=0,
797 cachedelta=None,
797 cachedelta=None,
798 ):
798 ):
799 """Add a new revision to the store and return its number.
799 """Add a new revision to the store and return its number.
800
800
801 This is similar to ``add()`` except it operates at a lower level.
801 This is similar to ``add()`` except it operates at a lower level.
802
802
803 The data passed in already contains a metadata header, if any.
803 The data passed in already contains a metadata header, if any.
804
804
805 ``node`` and ``flags`` can be used to define the expected node and
805 ``node`` and ``flags`` can be used to define the expected node and
806 the flags to use with storage. ``flags`` is a bitwise value composed
806 the flags to use with storage. ``flags`` is a bitwise value composed
807 of the various ``REVISION_FLAG_*`` constants.
807 of the various ``REVISION_FLAG_*`` constants.
808
808
809 ``add()`` is usually called when adding files from e.g. the working
809 ``add()`` is usually called when adding files from e.g. the working
810 directory. ``addrevision()`` is often called by ``add()`` and for
810 directory. ``addrevision()`` is often called by ``add()`` and for
811 scenarios where revision data has already been computed, such as when
811 scenarios where revision data has already been computed, such as when
812 applying raw data from a peer repo.
812 applying raw data from a peer repo.
813 """
813 """
814
814
815 def addgroup(
815 def addgroup(
816 deltas,
816 deltas,
817 linkmapper,
817 linkmapper,
818 transaction,
818 transaction,
819 addrevisioncb=None,
819 addrevisioncb=None,
820 duplicaterevisioncb=None,
820 duplicaterevisioncb=None,
821 maybemissingparents=False,
821 maybemissingparents=False,
822 ):
822 ):
823 """Process a series of deltas for storage.
823 """Process a series of deltas for storage.
824
824
825 ``deltas`` is an iterable of 7-tuples of
825 ``deltas`` is an iterable of 7-tuples of
826 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
826 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
827 to add.
827 to add.
828
828
829 The ``delta`` field contains ``mpatch`` data to apply to a base
829 The ``delta`` field contains ``mpatch`` data to apply to a base
830 revision, identified by ``deltabase``. The base node can be
830 revision, identified by ``deltabase``. The base node can be
831 ``nullid``, in which case the header from the delta can be ignored
831 ``nullid``, in which case the header from the delta can be ignored
832 and the delta used as the fulltext.
832 and the delta used as the fulltext.
833
833
834 ``alwayscache`` instructs the lower layers to cache the content of the
834 ``alwayscache`` instructs the lower layers to cache the content of the
835 newly added revision, even if it needs to be explicitly computed.
835 newly added revision, even if it needs to be explicitly computed.
836 This used to be the default when ``addrevisioncb`` was provided up to
836 This used to be the default when ``addrevisioncb`` was provided up to
837 Mercurial 5.8.
837 Mercurial 5.8.
838
838
839 ``addrevisioncb`` should be called for each new rev as it is committed.
839 ``addrevisioncb`` should be called for each new rev as it is committed.
840 ``duplicaterevisioncb`` should be called for all revs with a
840 ``duplicaterevisioncb`` should be called for all revs with a
841 pre-existing node.
841 pre-existing node.
842
842
843 ``maybemissingparents`` is a bool indicating whether the incoming
843 ``maybemissingparents`` is a bool indicating whether the incoming
844 data may reference parents/ancestor revisions that aren't present.
844 data may reference parents/ancestor revisions that aren't present.
845 This flag is set when receiving data into a "shallow" store that
845 This flag is set when receiving data into a "shallow" store that
846 doesn't hold all history.
846 doesn't hold all history.
847
847
848 Returns a list of nodes that were processed. A node will be in the list
848 Returns a list of nodes that were processed. A node will be in the list
849 even if it existed in the store previously.
849 even if it existed in the store previously.
850 """
850 """
851
851
852 def censorrevision(tr, node, tombstone=b''):
852 def censorrevision(tr, node, tombstone=b''):
853 """Remove the content of a single revision.
853 """Remove the content of a single revision.
854
854
855 The specified ``node`` will have its content purged from storage.
855 The specified ``node`` will have its content purged from storage.
856 Future attempts to access the revision data for this node will
856 Future attempts to access the revision data for this node will
857 result in failure.
857 result in failure.
858
858
859 A ``tombstone`` message can optionally be stored. This message may be
859 A ``tombstone`` message can optionally be stored. This message may be
860 displayed to users when they attempt to access the missing revision
860 displayed to users when they attempt to access the missing revision
861 data.
861 data.
862
862
863 Storage backends may have stored deltas against the previous content
863 Storage backends may have stored deltas against the previous content
864 in this revision. As part of censoring a revision, these storage
864 in this revision. As part of censoring a revision, these storage
865 backends are expected to rewrite any internally stored deltas such
865 backends are expected to rewrite any internally stored deltas such
866 that they no longer reference the deleted content.
866 that they no longer reference the deleted content.
867 """
867 """
868
868
869 def getstrippoint(minlink):
869 def getstrippoint(minlink):
870 """Find the minimum revision that must be stripped to strip a linkrev.
870 """Find the minimum revision that must be stripped to strip a linkrev.
871
871
872 Returns a 2-tuple containing the minimum revision number and a set
872 Returns a 2-tuple containing the minimum revision number and a set
873 of all revisions numbers that would be broken by this strip.
873 of all revisions numbers that would be broken by this strip.
874
874
875 TODO this is highly revlog centric and should be abstracted into
875 TODO this is highly revlog centric and should be abstracted into
876 a higher-level deletion API. ``repair.strip()`` relies on this.
876 a higher-level deletion API. ``repair.strip()`` relies on this.
877 """
877 """
878
878
879 def strip(minlink, transaction):
879 def strip(minlink, transaction):
880 """Remove storage of items starting at a linkrev.
880 """Remove storage of items starting at a linkrev.
881
881
882 This uses ``getstrippoint()`` to determine the first node to remove.
882 This uses ``getstrippoint()`` to determine the first node to remove.
883 Then it effectively truncates storage for all revisions after that.
883 Then it effectively truncates storage for all revisions after that.
884
884
885 TODO this is highly revlog centric and should be abstracted into a
885 TODO this is highly revlog centric and should be abstracted into a
886 higher-level deletion API.
886 higher-level deletion API.
887 """
887 """
888
888
889
889
890 class ifilestorage(ifileindex, ifiledata, ifilemutation):
890 class ifilestorage(ifileindex, ifiledata, ifilemutation):
891 """Complete storage interface for a single tracked file."""
891 """Complete storage interface for a single tracked file."""
892
892
893 def files():
893 def files():
894 """Obtain paths that are backing storage for this file.
894 """Obtain paths that are backing storage for this file.
895
895
896 TODO this is used heavily by verify code and there should probably
896 TODO this is used heavily by verify code and there should probably
897 be a better API for that.
897 be a better API for that.
898 """
898 """
899
899
900 def storageinfo(
900 def storageinfo(
901 exclusivefiles=False,
901 exclusivefiles=False,
902 sharedfiles=False,
902 sharedfiles=False,
903 revisionscount=False,
903 revisionscount=False,
904 trackedsize=False,
904 trackedsize=False,
905 storedsize=False,
905 storedsize=False,
906 ):
906 ):
907 """Obtain information about storage for this file's data.
907 """Obtain information about storage for this file's data.
908
908
909 Returns a dict describing storage for this tracked path. The keys
909 Returns a dict describing storage for this tracked path. The keys
910 in the dict map to arguments of the same. The arguments are bools
910 in the dict map to arguments of the same. The arguments are bools
911 indicating whether to calculate and obtain that data.
911 indicating whether to calculate and obtain that data.
912
912
913 exclusivefiles
913 exclusivefiles
914 Iterable of (vfs, path) describing files that are exclusively
914 Iterable of (vfs, path) describing files that are exclusively
915 used to back storage for this tracked path.
915 used to back storage for this tracked path.
916
916
917 sharedfiles
917 sharedfiles
918 Iterable of (vfs, path) describing files that are used to back
918 Iterable of (vfs, path) describing files that are used to back
919 storage for this tracked path. Those files may also provide storage
919 storage for this tracked path. Those files may also provide storage
920 for other stored entities.
920 for other stored entities.
921
921
922 revisionscount
922 revisionscount
923 Number of revisions available for retrieval.
923 Number of revisions available for retrieval.
924
924
925 trackedsize
925 trackedsize
926 Total size in bytes of all tracked revisions. This is a sum of the
926 Total size in bytes of all tracked revisions. This is a sum of the
927 length of the fulltext of all revisions.
927 length of the fulltext of all revisions.
928
928
929 storedsize
929 storedsize
930 Total size in bytes used to store data for all tracked revisions.
930 Total size in bytes used to store data for all tracked revisions.
931 This is commonly less than ``trackedsize`` due to internal usage
931 This is commonly less than ``trackedsize`` due to internal usage
932 of deltas rather than fulltext revisions.
932 of deltas rather than fulltext revisions.
933
933
934 Not all storage backends may support all queries are have a reasonable
934 Not all storage backends may support all queries are have a reasonable
935 value to use. In that case, the value should be set to ``None`` and
935 value to use. In that case, the value should be set to ``None`` and
936 callers are expected to handle this special value.
936 callers are expected to handle this special value.
937 """
937 """
938
938
939 def verifyintegrity(state):
939 def verifyintegrity(state):
940 """Verifies the integrity of file storage.
940 """Verifies the integrity of file storage.
941
941
942 ``state`` is a dict holding state of the verifier process. It can be
942 ``state`` is a dict holding state of the verifier process. It can be
943 used to communicate data between invocations of multiple storage
943 used to communicate data between invocations of multiple storage
944 primitives.
944 primitives.
945
945
946 If individual revisions cannot have their revision content resolved,
946 If individual revisions cannot have their revision content resolved,
947 the method is expected to set the ``skipread`` key to a set of nodes
947 the method is expected to set the ``skipread`` key to a set of nodes
948 that encountered problems. If set, the method can also add the node(s)
948 that encountered problems. If set, the method can also add the node(s)
949 to ``safe_renamed`` in order to indicate nodes that may perform the
949 to ``safe_renamed`` in order to indicate nodes that may perform the
950 rename checks with currently accessible data.
950 rename checks with currently accessible data.
951
951
952 The method yields objects conforming to the ``iverifyproblem``
952 The method yields objects conforming to the ``iverifyproblem``
953 interface.
953 interface.
954 """
954 """
955
955
956
956
957 class idirs(interfaceutil.Interface):
957 class idirs(interfaceutil.Interface):
958 """Interface representing a collection of directories from paths.
958 """Interface representing a collection of directories from paths.
959
959
960 This interface is essentially a derived data structure representing
960 This interface is essentially a derived data structure representing
961 directories from a collection of paths.
961 directories from a collection of paths.
962 """
962 """
963
963
964 def addpath(path):
964 def addpath(path):
965 """Add a path to the collection.
965 """Add a path to the collection.
966
966
967 All directories in the path will be added to the collection.
967 All directories in the path will be added to the collection.
968 """
968 """
969
969
970 def delpath(path):
970 def delpath(path):
971 """Remove a path from the collection.
971 """Remove a path from the collection.
972
972
973 If the removal was the last path in a particular directory, the
973 If the removal was the last path in a particular directory, the
974 directory is removed from the collection.
974 directory is removed from the collection.
975 """
975 """
976
976
977 def __iter__():
977 def __iter__():
978 """Iterate over the directories in this collection of paths."""
978 """Iterate over the directories in this collection of paths."""
979
979
980 def __contains__(path):
980 def __contains__(path):
981 """Whether a specific directory is in this collection."""
981 """Whether a specific directory is in this collection."""
982
982
983
983
984 class imanifestdict(interfaceutil.Interface):
984 class imanifestdict(interfaceutil.Interface):
985 """Interface representing a manifest data structure.
985 """Interface representing a manifest data structure.
986
986
987 A manifest is effectively a dict mapping paths to entries. Each entry
987 A manifest is effectively a dict mapping paths to entries. Each entry
988 consists of a binary node and extra flags affecting that entry.
988 consists of a binary node and extra flags affecting that entry.
989 """
989 """
990
990
991 def __getitem__(path):
991 def __getitem__(path):
992 """Returns the binary node value for a path in the manifest.
992 """Returns the binary node value for a path in the manifest.
993
993
994 Raises ``KeyError`` if the path does not exist in the manifest.
994 Raises ``KeyError`` if the path does not exist in the manifest.
995
995
996 Equivalent to ``self.find(path)[0]``.
996 Equivalent to ``self.find(path)[0]``.
997 """
997 """
998
998
999 def find(path):
999 def find(path):
1000 """Returns the entry for a path in the manifest.
1000 """Returns the entry for a path in the manifest.
1001
1001
1002 Returns a 2-tuple of (node, flags).
1002 Returns a 2-tuple of (node, flags).
1003
1003
1004 Raises ``KeyError`` if the path does not exist in the manifest.
1004 Raises ``KeyError`` if the path does not exist in the manifest.
1005 """
1005 """
1006
1006
1007 def __len__():
1007 def __len__():
1008 """Return the number of entries in the manifest."""
1008 """Return the number of entries in the manifest."""
1009
1009
1010 def __nonzero__():
1010 def __nonzero__():
1011 """Returns True if the manifest has entries, False otherwise."""
1011 """Returns True if the manifest has entries, False otherwise."""
1012
1012
1013 __bool__ = __nonzero__
1013 __bool__ = __nonzero__
1014
1014
1015 def __setitem__(path, node):
1015 def __setitem__(path, node):
1016 """Define the node value for a path in the manifest.
1016 """Define the node value for a path in the manifest.
1017
1017
1018 If the path is already in the manifest, its flags will be copied to
1018 If the path is already in the manifest, its flags will be copied to
1019 the new entry.
1019 the new entry.
1020 """
1020 """
1021
1021
1022 def __contains__(path):
1022 def __contains__(path):
1023 """Whether a path exists in the manifest."""
1023 """Whether a path exists in the manifest."""
1024
1024
1025 def __delitem__(path):
1025 def __delitem__(path):
1026 """Remove a path from the manifest.
1026 """Remove a path from the manifest.
1027
1027
1028 Raises ``KeyError`` if the path is not in the manifest.
1028 Raises ``KeyError`` if the path is not in the manifest.
1029 """
1029 """
1030
1030
1031 def __iter__():
1031 def __iter__():
1032 """Iterate over paths in the manifest."""
1032 """Iterate over paths in the manifest."""
1033
1033
1034 def iterkeys():
1034 def iterkeys():
1035 """Iterate over paths in the manifest."""
1035 """Iterate over paths in the manifest."""
1036
1036
1037 def keys():
1037 def keys():
1038 """Obtain a list of paths in the manifest."""
1038 """Obtain a list of paths in the manifest."""
1039
1039
1040 def filesnotin(other, match=None):
1040 def filesnotin(other, match=None):
1041 """Obtain the set of paths in this manifest but not in another.
1041 """Obtain the set of paths in this manifest but not in another.
1042
1042
1043 ``match`` is an optional matcher function to be applied to both
1043 ``match`` is an optional matcher function to be applied to both
1044 manifests.
1044 manifests.
1045
1045
1046 Returns a set of paths.
1046 Returns a set of paths.
1047 """
1047 """
1048
1048
1049 def dirs():
1049 def dirs():
1050 """Returns an object implementing the ``idirs`` interface."""
1050 """Returns an object implementing the ``idirs`` interface."""
1051
1051
1052 def hasdir(dir):
1052 def hasdir(dir):
1053 """Returns a bool indicating if a directory is in this manifest."""
1053 """Returns a bool indicating if a directory is in this manifest."""
1054
1054
1055 def walk(match):
1055 def walk(match):
1056 """Generator of paths in manifest satisfying a matcher.
1056 """Generator of paths in manifest satisfying a matcher.
1057
1057
1058 If the matcher has explicit files listed and they don't exist in
1058 If the matcher has explicit files listed and they don't exist in
1059 the manifest, ``match.bad()`` is called for each missing file.
1059 the manifest, ``match.bad()`` is called for each missing file.
1060 """
1060 """
1061
1061
1062 def diff(other, match=None, clean=False):
1062 def diff(other, match=None, clean=False):
1063 """Find differences between this manifest and another.
1063 """Find differences between this manifest and another.
1064
1064
1065 This manifest is compared to ``other``.
1065 This manifest is compared to ``other``.
1066
1066
1067 If ``match`` is provided, the two manifests are filtered against this
1067 If ``match`` is provided, the two manifests are filtered against this
1068 matcher and only entries satisfying the matcher are compared.
1068 matcher and only entries satisfying the matcher are compared.
1069
1069
1070 If ``clean`` is True, unchanged files are included in the returned
1070 If ``clean`` is True, unchanged files are included in the returned
1071 object.
1071 object.
1072
1072
1073 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
1073 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
1074 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
1074 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
1075 represents the node and flags for this manifest and ``(node2, flag2)``
1075 represents the node and flags for this manifest and ``(node2, flag2)``
1076 are the same for the other manifest.
1076 are the same for the other manifest.
1077 """
1077 """
1078
1078
1079 def setflag(path, flag):
1079 def setflag(path, flag):
1080 """Set the flag value for a given path.
1080 """Set the flag value for a given path.
1081
1081
1082 Raises ``KeyError`` if the path is not already in the manifest.
1082 Raises ``KeyError`` if the path is not already in the manifest.
1083 """
1083 """
1084
1084
1085 def get(path, default=None):
1085 def get(path, default=None):
1086 """Obtain the node value for a path or a default value if missing."""
1086 """Obtain the node value for a path or a default value if missing."""
1087
1087
1088 def flags(path):
1088 def flags(path):
1089 """Return the flags value for a path (default: empty bytestring)."""
1089 """Return the flags value for a path (default: empty bytestring)."""
1090
1090
1091 def copy():
1091 def copy():
1092 """Return a copy of this manifest."""
1092 """Return a copy of this manifest."""
1093
1093
1094 def items():
1094 def items():
1095 """Returns an iterable of (path, node) for items in this manifest."""
1095 """Returns an iterable of (path, node) for items in this manifest."""
1096
1096
1097 def iteritems():
1097 def iteritems():
1098 """Identical to items()."""
1098 """Identical to items()."""
1099
1099
1100 def iterentries():
1100 def iterentries():
1101 """Returns an iterable of (path, node, flags) for this manifest.
1101 """Returns an iterable of (path, node, flags) for this manifest.
1102
1102
1103 Similar to ``iteritems()`` except items are a 3-tuple and include
1103 Similar to ``iteritems()`` except items are a 3-tuple and include
1104 flags.
1104 flags.
1105 """
1105 """
1106
1106
1107 def text():
1107 def text():
1108 """Obtain the raw data representation for this manifest.
1108 """Obtain the raw data representation for this manifest.
1109
1109
1110 Result is used to create a manifest revision.
1110 Result is used to create a manifest revision.
1111 """
1111 """
1112
1112
1113 def fastdelta(base, changes):
1113 def fastdelta(base, changes):
1114 """Obtain a delta between this manifest and another given changes.
1114 """Obtain a delta between this manifest and another given changes.
1115
1115
1116 ``base`` in the raw data representation for another manifest.
1116 ``base`` in the raw data representation for another manifest.
1117
1117
1118 ``changes`` is an iterable of ``(path, to_delete)``.
1118 ``changes`` is an iterable of ``(path, to_delete)``.
1119
1119
1120 Returns a 2-tuple containing ``bytearray(self.text())`` and the
1120 Returns a 2-tuple containing ``bytearray(self.text())`` and the
1121 delta between ``base`` and this manifest.
1121 delta between ``base`` and this manifest.
1122
1122
1123 If this manifest implementation can't support ``fastdelta()``,
1123 If this manifest implementation can't support ``fastdelta()``,
1124 raise ``mercurial.manifest.FastdeltaUnavailable``.
1124 raise ``mercurial.manifest.FastdeltaUnavailable``.
1125 """
1125 """
1126
1126
1127
1127
1128 class imanifestrevisionbase(interfaceutil.Interface):
1128 class imanifestrevisionbase(interfaceutil.Interface):
1129 """Base interface representing a single revision of a manifest.
1129 """Base interface representing a single revision of a manifest.
1130
1130
1131 Should not be used as a primary interface: should always be inherited
1131 Should not be used as a primary interface: should always be inherited
1132 as part of a larger interface.
1132 as part of a larger interface.
1133 """
1133 """
1134
1134
1135 def copy():
1135 def copy():
1136 """Obtain a copy of this manifest instance.
1136 """Obtain a copy of this manifest instance.
1137
1137
1138 Returns an object conforming to the ``imanifestrevisionwritable``
1138 Returns an object conforming to the ``imanifestrevisionwritable``
1139 interface. The instance will be associated with the same
1139 interface. The instance will be associated with the same
1140 ``imanifestlog`` collection as this instance.
1140 ``imanifestlog`` collection as this instance.
1141 """
1141 """
1142
1142
1143 def read():
1143 def read():
1144 """Obtain the parsed manifest data structure.
1144 """Obtain the parsed manifest data structure.
1145
1145
1146 The returned object conforms to the ``imanifestdict`` interface.
1146 The returned object conforms to the ``imanifestdict`` interface.
1147 """
1147 """
1148
1148
1149
1149
1150 class imanifestrevisionstored(imanifestrevisionbase):
1150 class imanifestrevisionstored(imanifestrevisionbase):
1151 """Interface representing a manifest revision committed to storage."""
1151 """Interface representing a manifest revision committed to storage."""
1152
1152
1153 def node():
1153 def node():
1154 """The binary node for this manifest."""
1154 """The binary node for this manifest."""
1155
1155
1156 parents = interfaceutil.Attribute(
1156 parents = interfaceutil.Attribute(
1157 """List of binary nodes that are parents for this manifest revision."""
1157 """List of binary nodes that are parents for this manifest revision."""
1158 )
1158 )
1159
1159
1160 def readdelta(shallow=False):
1160 def readdelta(shallow=False):
1161 """Obtain the manifest data structure representing changes from parent.
1161 """Obtain the manifest data structure representing changes from parent.
1162
1162
1163 This manifest is compared to its 1st parent. A new manifest representing
1163 This manifest is compared to its 1st parent. A new manifest representing
1164 those differences is constructed.
1164 those differences is constructed.
1165
1165
1166 The returned object conforms to the ``imanifestdict`` interface.
1166 The returned object conforms to the ``imanifestdict`` interface.
1167 """
1167 """
1168
1168
1169 def readfast(shallow=False):
1169 def readfast(shallow=False):
1170 """Calls either ``read()`` or ``readdelta()``.
1170 """Calls either ``read()`` or ``readdelta()``.
1171
1171
1172 The faster of the two options is called.
1172 The faster of the two options is called.
1173 """
1173 """
1174
1174
1175 def find(key):
1175 def find(key):
1176 """Calls self.read().find(key)``.
1176 """Calls self.read().find(key)``.
1177
1177
1178 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1178 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1179 """
1179 """
1180
1180
1181
1181
1182 class imanifestrevisionwritable(imanifestrevisionbase):
1182 class imanifestrevisionwritable(imanifestrevisionbase):
1183 """Interface representing a manifest revision that can be committed."""
1183 """Interface representing a manifest revision that can be committed."""
1184
1184
1185 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1185 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1186 """Add this revision to storage.
1186 """Add this revision to storage.
1187
1187
1188 Takes a transaction object, the changeset revision number it will
1188 Takes a transaction object, the changeset revision number it will
1189 be associated with, its parent nodes, and lists of added and
1189 be associated with, its parent nodes, and lists of added and
1190 removed paths.
1190 removed paths.
1191
1191
1192 If match is provided, storage can choose not to inspect or write out
1192 If match is provided, storage can choose not to inspect or write out
1193 items that do not match. Storage is still required to be able to provide
1193 items that do not match. Storage is still required to be able to provide
1194 the full manifest in the future for any directories written (these
1194 the full manifest in the future for any directories written (these
1195 manifests should not be "narrowed on disk").
1195 manifests should not be "narrowed on disk").
1196
1196
1197 Returns the binary node of the created revision.
1197 Returns the binary node of the created revision.
1198 """
1198 """
1199
1199
1200
1200
1201 class imanifeststorage(interfaceutil.Interface):
1201 class imanifeststorage(interfaceutil.Interface):
1202 """Storage interface for manifest data."""
1202 """Storage interface for manifest data."""
1203
1203
1204 nodeconstants = interfaceutil.Attribute(
1204 nodeconstants = interfaceutil.Attribute(
1205 """nodeconstants used by the current repository."""
1205 """nodeconstants used by the current repository."""
1206 )
1206 )
1207
1207
1208 tree = interfaceutil.Attribute(
1208 tree = interfaceutil.Attribute(
1209 """The path to the directory this manifest tracks.
1209 """The path to the directory this manifest tracks.
1210
1210
1211 The empty bytestring represents the root manifest.
1211 The empty bytestring represents the root manifest.
1212 """
1212 """
1213 )
1213 )
1214
1214
1215 index = interfaceutil.Attribute(
1215 index = interfaceutil.Attribute(
1216 """An ``ifilerevisionssequence`` instance."""
1216 """An ``ifilerevisionssequence`` instance."""
1217 )
1217 )
1218
1218
1219 opener = interfaceutil.Attribute(
1219 opener = interfaceutil.Attribute(
1220 """VFS opener to use to access underlying files used for storage.
1220 """VFS opener to use to access underlying files used for storage.
1221
1221
1222 TODO this is revlog specific and should not be exposed.
1222 TODO this is revlog specific and should not be exposed.
1223 """
1223 """
1224 )
1224 )
1225
1225
1226 _generaldelta = interfaceutil.Attribute(
1226 _generaldelta = interfaceutil.Attribute(
1227 """Whether generaldelta storage is being used.
1227 """Whether generaldelta storage is being used.
1228
1228
1229 TODO this is revlog specific and should not be exposed.
1229 TODO this is revlog specific and should not be exposed.
1230 """
1230 """
1231 )
1231 )
1232
1232
1233 fulltextcache = interfaceutil.Attribute(
1233 fulltextcache = interfaceutil.Attribute(
1234 """Dict with cache of fulltexts.
1234 """Dict with cache of fulltexts.
1235
1235
1236 TODO this doesn't feel appropriate for the storage interface.
1236 TODO this doesn't feel appropriate for the storage interface.
1237 """
1237 """
1238 )
1238 )
1239
1239
1240 def __len__():
1240 def __len__():
1241 """Obtain the number of revisions stored for this manifest."""
1241 """Obtain the number of revisions stored for this manifest."""
1242
1242
1243 def __iter__():
1243 def __iter__():
1244 """Iterate over revision numbers for this manifest."""
1244 """Iterate over revision numbers for this manifest."""
1245
1245
1246 def rev(node):
1246 def rev(node):
1247 """Obtain the revision number given a binary node.
1247 """Obtain the revision number given a binary node.
1248
1248
1249 Raises ``error.LookupError`` if the node is not known.
1249 Raises ``error.LookupError`` if the node is not known.
1250 """
1250 """
1251
1251
1252 def node(rev):
1252 def node(rev):
1253 """Obtain the node value given a revision number.
1253 """Obtain the node value given a revision number.
1254
1254
1255 Raises ``error.LookupError`` if the revision is not known.
1255 Raises ``error.LookupError`` if the revision is not known.
1256 """
1256 """
1257
1257
1258 def lookup(value):
1258 def lookup(value):
1259 """Attempt to resolve a value to a node.
1259 """Attempt to resolve a value to a node.
1260
1260
1261 Value can be a binary node, hex node, revision number, or a bytes
1261 Value can be a binary node, hex node, revision number, or a bytes
1262 that can be converted to an integer.
1262 that can be converted to an integer.
1263
1263
1264 Raises ``error.LookupError`` if a ndoe could not be resolved.
1264 Raises ``error.LookupError`` if a ndoe could not be resolved.
1265 """
1265 """
1266
1266
1267 def parents(node):
1267 def parents(node):
1268 """Returns a 2-tuple of parent nodes for a node.
1268 """Returns a 2-tuple of parent nodes for a node.
1269
1269
1270 Values will be ``nullid`` if the parent is empty.
1270 Values will be ``nullid`` if the parent is empty.
1271 """
1271 """
1272
1272
1273 def parentrevs(rev):
1273 def parentrevs(rev):
1274 """Like parents() but operates on revision numbers."""
1274 """Like parents() but operates on revision numbers."""
1275
1275
1276 def linkrev(rev):
1276 def linkrev(rev):
1277 """Obtain the changeset revision number a revision is linked to."""
1277 """Obtain the changeset revision number a revision is linked to."""
1278
1278
1279 def revision(node, _df=None):
1279 def revision(node, _df=None):
1280 """Obtain fulltext data for a node."""
1280 """Obtain fulltext data for a node."""
1281
1281
1282 def rawdata(node, _df=None):
1282 def rawdata(node, _df=None):
1283 """Obtain raw data for a node."""
1283 """Obtain raw data for a node."""
1284
1284
1285 def revdiff(rev1, rev2):
1285 def revdiff(rev1, rev2):
1286 """Obtain a delta between two revision numbers.
1286 """Obtain a delta between two revision numbers.
1287
1287
1288 The returned data is the result of ``bdiff.bdiff()`` on the raw
1288 The returned data is the result of ``bdiff.bdiff()`` on the raw
1289 revision data.
1289 revision data.
1290 """
1290 """
1291
1291
1292 def cmp(node, fulltext):
1292 def cmp(node, fulltext):
1293 """Compare fulltext to another revision.
1293 """Compare fulltext to another revision.
1294
1294
1295 Returns True if the fulltext is different from what is stored.
1295 Returns True if the fulltext is different from what is stored.
1296 """
1296 """
1297
1297
1298 def emitrevisions(
1298 def emitrevisions(
1299 nodes,
1299 nodes,
1300 nodesorder=None,
1300 nodesorder=None,
1301 revisiondata=False,
1301 revisiondata=False,
1302 assumehaveparentrevisions=False,
1302 assumehaveparentrevisions=False,
1303 ):
1303 ):
1304 """Produce ``irevisiondelta`` describing revisions.
1304 """Produce ``irevisiondelta`` describing revisions.
1305
1305
1306 See the documentation for ``ifiledata`` for more.
1306 See the documentation for ``ifiledata`` for more.
1307 """
1307 """
1308
1308
1309 def addgroup(
1309 def addgroup(
1310 deltas,
1310 deltas,
1311 linkmapper,
1311 linkmapper,
1312 transaction,
1312 transaction,
1313 addrevisioncb=None,
1313 addrevisioncb=None,
1314 duplicaterevisioncb=None,
1314 duplicaterevisioncb=None,
1315 ):
1315 ):
1316 """Process a series of deltas for storage.
1316 """Process a series of deltas for storage.
1317
1317
1318 See the documentation in ``ifilemutation`` for more.
1318 See the documentation in ``ifilemutation`` for more.
1319 """
1319 """
1320
1320
1321 def rawsize(rev):
1321 def rawsize(rev):
1322 """Obtain the size of tracked data.
1322 """Obtain the size of tracked data.
1323
1323
1324 Is equivalent to ``len(m.rawdata(node))``.
1324 Is equivalent to ``len(m.rawdata(node))``.
1325
1325
1326 TODO this method is only used by upgrade code and may be removed.
1326 TODO this method is only used by upgrade code and may be removed.
1327 """
1327 """
1328
1328
1329 def getstrippoint(minlink):
1329 def getstrippoint(minlink):
1330 """Find minimum revision that must be stripped to strip a linkrev.
1330 """Find minimum revision that must be stripped to strip a linkrev.
1331
1331
1332 See the documentation in ``ifilemutation`` for more.
1332 See the documentation in ``ifilemutation`` for more.
1333 """
1333 """
1334
1334
1335 def strip(minlink, transaction):
1335 def strip(minlink, transaction):
1336 """Remove storage of items starting at a linkrev.
1336 """Remove storage of items starting at a linkrev.
1337
1337
1338 See the documentation in ``ifilemutation`` for more.
1338 See the documentation in ``ifilemutation`` for more.
1339 """
1339 """
1340
1340
1341 def checksize():
1341 def checksize():
1342 """Obtain the expected sizes of backing files.
1342 """Obtain the expected sizes of backing files.
1343
1343
1344 TODO this is used by verify and it should not be part of the interface.
1344 TODO this is used by verify and it should not be part of the interface.
1345 """
1345 """
1346
1346
1347 def files():
1347 def files():
1348 """Obtain paths that are backing storage for this manifest.
1348 """Obtain paths that are backing storage for this manifest.
1349
1349
1350 TODO this is used by verify and there should probably be a better API
1350 TODO this is used by verify and there should probably be a better API
1351 for this functionality.
1351 for this functionality.
1352 """
1352 """
1353
1353
1354 def deltaparent(rev):
1354 def deltaparent(rev):
1355 """Obtain the revision that a revision is delta'd against.
1355 """Obtain the revision that a revision is delta'd against.
1356
1356
1357 TODO delta encoding is an implementation detail of storage and should
1357 TODO delta encoding is an implementation detail of storage and should
1358 not be exposed to the storage interface.
1358 not be exposed to the storage interface.
1359 """
1359 """
1360
1360
1361 def clone(tr, dest, **kwargs):
1361 def clone(tr, dest, **kwargs):
1362 """Clone this instance to another."""
1362 """Clone this instance to another."""
1363
1363
1364 def clearcaches(clear_persisted_data=False):
1364 def clearcaches(clear_persisted_data=False):
1365 """Clear any caches associated with this instance."""
1365 """Clear any caches associated with this instance."""
1366
1366
1367 def dirlog(d):
1367 def dirlog(d):
1368 """Obtain a manifest storage instance for a tree."""
1368 """Obtain a manifest storage instance for a tree."""
1369
1369
1370 def add(
1370 def add(
1371 m, transaction, link, p1, p2, added, removed, readtree=None, match=None
1371 m, transaction, link, p1, p2, added, removed, readtree=None, match=None
1372 ):
1372 ):
1373 """Add a revision to storage.
1373 """Add a revision to storage.
1374
1374
1375 ``m`` is an object conforming to ``imanifestdict``.
1375 ``m`` is an object conforming to ``imanifestdict``.
1376
1376
1377 ``link`` is the linkrev revision number.
1377 ``link`` is the linkrev revision number.
1378
1378
1379 ``p1`` and ``p2`` are the parent revision numbers.
1379 ``p1`` and ``p2`` are the parent revision numbers.
1380
1380
1381 ``added`` and ``removed`` are iterables of added and removed paths,
1381 ``added`` and ``removed`` are iterables of added and removed paths,
1382 respectively.
1382 respectively.
1383
1383
1384 ``readtree`` is a function that can be used to read the child tree(s)
1384 ``readtree`` is a function that can be used to read the child tree(s)
1385 when recursively writing the full tree structure when using
1385 when recursively writing the full tree structure when using
1386 treemanifets.
1386 treemanifets.
1387
1387
1388 ``match`` is a matcher that can be used to hint to storage that not all
1388 ``match`` is a matcher that can be used to hint to storage that not all
1389 paths must be inspected; this is an optimization and can be safely
1389 paths must be inspected; this is an optimization and can be safely
1390 ignored. Note that the storage must still be able to reproduce a full
1390 ignored. Note that the storage must still be able to reproduce a full
1391 manifest including files that did not match.
1391 manifest including files that did not match.
1392 """
1392 """
1393
1393
1394 def storageinfo(
1394 def storageinfo(
1395 exclusivefiles=False,
1395 exclusivefiles=False,
1396 sharedfiles=False,
1396 sharedfiles=False,
1397 revisionscount=False,
1397 revisionscount=False,
1398 trackedsize=False,
1398 trackedsize=False,
1399 storedsize=False,
1399 storedsize=False,
1400 ):
1400 ):
1401 """Obtain information about storage for this manifest's data.
1401 """Obtain information about storage for this manifest's data.
1402
1402
1403 See ``ifilestorage.storageinfo()`` for a description of this method.
1403 See ``ifilestorage.storageinfo()`` for a description of this method.
1404 This one behaves the same way, except for manifest data.
1404 This one behaves the same way, except for manifest data.
1405 """
1405 """
1406
1406
1407
1407
1408 class imanifestlog(interfaceutil.Interface):
1408 class imanifestlog(interfaceutil.Interface):
1409 """Interface representing a collection of manifest snapshots.
1409 """Interface representing a collection of manifest snapshots.
1410
1410
1411 Represents the root manifest in a repository.
1411 Represents the root manifest in a repository.
1412
1412
1413 Also serves as a means to access nested tree manifests and to cache
1413 Also serves as a means to access nested tree manifests and to cache
1414 tree manifests.
1414 tree manifests.
1415 """
1415 """
1416
1416
1417 nodeconstants = interfaceutil.Attribute(
1417 nodeconstants = interfaceutil.Attribute(
1418 """nodeconstants used by the current repository."""
1418 """nodeconstants used by the current repository."""
1419 )
1419 )
1420
1420
1421 def __getitem__(node):
1421 def __getitem__(node):
1422 """Obtain a manifest instance for a given binary node.
1422 """Obtain a manifest instance for a given binary node.
1423
1423
1424 Equivalent to calling ``self.get('', node)``.
1424 Equivalent to calling ``self.get('', node)``.
1425
1425
1426 The returned object conforms to the ``imanifestrevisionstored``
1426 The returned object conforms to the ``imanifestrevisionstored``
1427 interface.
1427 interface.
1428 """
1428 """
1429
1429
1430 def get(tree, node, verify=True):
1430 def get(tree, node, verify=True):
1431 """Retrieve the manifest instance for a given directory and binary node.
1431 """Retrieve the manifest instance for a given directory and binary node.
1432
1432
1433 ``node`` always refers to the node of the root manifest (which will be
1433 ``node`` always refers to the node of the root manifest (which will be
1434 the only manifest if flat manifests are being used).
1434 the only manifest if flat manifests are being used).
1435
1435
1436 If ``tree`` is the empty string, the root manifest is returned.
1436 If ``tree`` is the empty string, the root manifest is returned.
1437 Otherwise the manifest for the specified directory will be returned
1437 Otherwise the manifest for the specified directory will be returned
1438 (requires tree manifests).
1438 (requires tree manifests).
1439
1439
1440 If ``verify`` is True, ``LookupError`` is raised if the node is not
1440 If ``verify`` is True, ``LookupError`` is raised if the node is not
1441 known.
1441 known.
1442
1442
1443 The returned object conforms to the ``imanifestrevisionstored``
1443 The returned object conforms to the ``imanifestrevisionstored``
1444 interface.
1444 interface.
1445 """
1445 """
1446
1446
1447 def getstorage(tree):
1447 def getstorage(tree):
1448 """Retrieve an interface to storage for a particular tree.
1448 """Retrieve an interface to storage for a particular tree.
1449
1449
1450 If ``tree`` is the empty bytestring, storage for the root manifest will
1450 If ``tree`` is the empty bytestring, storage for the root manifest will
1451 be returned. Otherwise storage for a tree manifest is returned.
1451 be returned. Otherwise storage for a tree manifest is returned.
1452
1452
1453 TODO formalize interface for returned object.
1453 TODO formalize interface for returned object.
1454 """
1454 """
1455
1455
1456 def clearcaches():
1456 def clearcaches():
1457 """Clear caches associated with this collection."""
1457 """Clear caches associated with this collection."""
1458
1458
1459 def rev(node):
1459 def rev(node):
1460 """Obtain the revision number for a binary node.
1460 """Obtain the revision number for a binary node.
1461
1461
1462 Raises ``error.LookupError`` if the node is not known.
1462 Raises ``error.LookupError`` if the node is not known.
1463 """
1463 """
1464
1464
1465 def update_caches(transaction):
1465 def update_caches(transaction):
1466 """update whatever cache are relevant for the used storage."""
1466 """update whatever cache are relevant for the used storage."""
1467
1467
1468
1468
1469 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1469 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1470 """Local repository sub-interface providing access to tracked file storage.
1470 """Local repository sub-interface providing access to tracked file storage.
1471
1471
1472 This interface defines how a repository accesses storage for a single
1472 This interface defines how a repository accesses storage for a single
1473 tracked file path.
1473 tracked file path.
1474 """
1474 """
1475
1475
1476 def file(f):
1476 def file(f):
1477 """Obtain a filelog for a tracked path.
1477 """Obtain a filelog for a tracked path.
1478
1478
1479 The returned type conforms to the ``ifilestorage`` interface.
1479 The returned type conforms to the ``ifilestorage`` interface.
1480 """
1480 """
1481
1481
1482
1482
1483 class ilocalrepositorymain(interfaceutil.Interface):
1483 class ilocalrepositorymain(interfaceutil.Interface):
1484 """Main interface for local repositories.
1484 """Main interface for local repositories.
1485
1485
1486 This currently captures the reality of things - not how things should be.
1486 This currently captures the reality of things - not how things should be.
1487 """
1487 """
1488
1488
1489 nodeconstants = interfaceutil.Attribute(
1489 nodeconstants = interfaceutil.Attribute(
1490 """Constant nodes matching the hash function used by the repository."""
1490 """Constant nodes matching the hash function used by the repository."""
1491 )
1491 )
1492 nullid = interfaceutil.Attribute(
1492 nullid = interfaceutil.Attribute(
1493 """null revision for the hash function used by the repository."""
1493 """null revision for the hash function used by the repository."""
1494 )
1494 )
1495
1495
1496 supported = interfaceutil.Attribute(
1496 supported = interfaceutil.Attribute(
1497 """Set of requirements that this repo is capable of opening."""
1497 """Set of requirements that this repo is capable of opening."""
1498 )
1498 )
1499
1499
1500 requirements = interfaceutil.Attribute(
1500 requirements = interfaceutil.Attribute(
1501 """Set of requirements this repo uses."""
1501 """Set of requirements this repo uses."""
1502 )
1502 )
1503
1503
1504 features = interfaceutil.Attribute(
1504 features = interfaceutil.Attribute(
1505 """Set of "features" this repository supports.
1505 """Set of "features" this repository supports.
1506
1506
1507 A "feature" is a loosely-defined term. It can refer to a feature
1507 A "feature" is a loosely-defined term. It can refer to a feature
1508 in the classical sense or can describe an implementation detail
1508 in the classical sense or can describe an implementation detail
1509 of the repository. For example, a ``readonly`` feature may denote
1509 of the repository. For example, a ``readonly`` feature may denote
1510 the repository as read-only. Or a ``revlogfilestore`` feature may
1510 the repository as read-only. Or a ``revlogfilestore`` feature may
1511 denote that the repository is using revlogs for file storage.
1511 denote that the repository is using revlogs for file storage.
1512
1512
1513 The intent of features is to provide a machine-queryable mechanism
1513 The intent of features is to provide a machine-queryable mechanism
1514 for repo consumers to test for various repository characteristics.
1514 for repo consumers to test for various repository characteristics.
1515
1515
1516 Features are similar to ``requirements``. The main difference is that
1516 Features are similar to ``requirements``. The main difference is that
1517 requirements are stored on-disk and represent requirements to open the
1517 requirements are stored on-disk and represent requirements to open the
1518 repository. Features are more run-time capabilities of the repository
1518 repository. Features are more run-time capabilities of the repository
1519 and more granular capabilities (which may be derived from requirements).
1519 and more granular capabilities (which may be derived from requirements).
1520 """
1520 """
1521 )
1521 )
1522
1522
1523 filtername = interfaceutil.Attribute(
1523 filtername = interfaceutil.Attribute(
1524 """Name of the repoview that is active on this repo."""
1524 """Name of the repoview that is active on this repo."""
1525 )
1525 )
1526
1526
1527 vfs_map = interfaceutil.Attribute(
1527 vfs_map = interfaceutil.Attribute(
1528 """a bytes-key β†’ vfs mapping used by transaction and others"""
1528 """a bytes-key β†’ vfs mapping used by transaction and others"""
1529 )
1529 )
1530
1530
1531 wvfs = interfaceutil.Attribute(
1531 wvfs = interfaceutil.Attribute(
1532 """VFS used to access the working directory."""
1532 """VFS used to access the working directory."""
1533 )
1533 )
1534
1534
1535 vfs = interfaceutil.Attribute(
1535 vfs = interfaceutil.Attribute(
1536 """VFS rooted at the .hg directory.
1536 """VFS rooted at the .hg directory.
1537
1537
1538 Used to access repository data not in the store.
1538 Used to access repository data not in the store.
1539 """
1539 """
1540 )
1540 )
1541
1541
1542 svfs = interfaceutil.Attribute(
1542 svfs = interfaceutil.Attribute(
1543 """VFS rooted at the store.
1543 """VFS rooted at the store.
1544
1544
1545 Used to access repository data in the store. Typically .hg/store.
1545 Used to access repository data in the store. Typically .hg/store.
1546 But can point elsewhere if the store is shared.
1546 But can point elsewhere if the store is shared.
1547 """
1547 """
1548 )
1548 )
1549
1549
1550 root = interfaceutil.Attribute(
1550 root = interfaceutil.Attribute(
1551 """Path to the root of the working directory."""
1551 """Path to the root of the working directory."""
1552 )
1552 )
1553
1553
1554 path = interfaceutil.Attribute("""Path to the .hg directory.""")
1554 path = interfaceutil.Attribute("""Path to the .hg directory.""")
1555
1555
1556 origroot = interfaceutil.Attribute(
1556 origroot = interfaceutil.Attribute(
1557 """The filesystem path that was used to construct the repo."""
1557 """The filesystem path that was used to construct the repo."""
1558 )
1558 )
1559
1559
1560 auditor = interfaceutil.Attribute(
1560 auditor = interfaceutil.Attribute(
1561 """A pathauditor for the working directory.
1561 """A pathauditor for the working directory.
1562
1562
1563 This checks if a path refers to a nested repository.
1563 This checks if a path refers to a nested repository.
1564
1564
1565 Operates on the filesystem.
1565 Operates on the filesystem.
1566 """
1566 """
1567 )
1567 )
1568
1568
1569 nofsauditor = interfaceutil.Attribute(
1569 nofsauditor = interfaceutil.Attribute(
1570 """A pathauditor for the working directory.
1570 """A pathauditor for the working directory.
1571
1571
1572 This is like ``auditor`` except it doesn't do filesystem checks.
1572 This is like ``auditor`` except it doesn't do filesystem checks.
1573 """
1573 """
1574 )
1574 )
1575
1575
1576 baseui = interfaceutil.Attribute(
1576 baseui = interfaceutil.Attribute(
1577 """Original ui instance passed into constructor."""
1577 """Original ui instance passed into constructor."""
1578 )
1578 )
1579
1579
1580 ui = interfaceutil.Attribute("""Main ui instance for this instance.""")
1580 ui = interfaceutil.Attribute("""Main ui instance for this instance.""")
1581
1581
1582 sharedpath = interfaceutil.Attribute(
1582 sharedpath = interfaceutil.Attribute(
1583 """Path to the .hg directory of the repo this repo was shared from."""
1583 """Path to the .hg directory of the repo this repo was shared from."""
1584 )
1584 )
1585
1585
1586 store = interfaceutil.Attribute("""A store instance.""")
1586 store = interfaceutil.Attribute("""A store instance.""")
1587
1587
1588 spath = interfaceutil.Attribute("""Path to the store.""")
1588 spath = interfaceutil.Attribute("""Path to the store.""")
1589
1589
1590 sjoin = interfaceutil.Attribute("""Alias to self.store.join.""")
1590 sjoin = interfaceutil.Attribute("""Alias to self.store.join.""")
1591
1591
1592 cachevfs = interfaceutil.Attribute(
1592 cachevfs = interfaceutil.Attribute(
1593 """A VFS used to access the cache directory.
1593 """A VFS used to access the cache directory.
1594
1594
1595 Typically .hg/cache.
1595 Typically .hg/cache.
1596 """
1596 """
1597 )
1597 )
1598
1598
1599 wcachevfs = interfaceutil.Attribute(
1599 wcachevfs = interfaceutil.Attribute(
1600 """A VFS used to access the cache directory dedicated to working copy
1600 """A VFS used to access the cache directory dedicated to working copy
1601
1601
1602 Typically .hg/wcache.
1602 Typically .hg/wcache.
1603 """
1603 """
1604 )
1604 )
1605
1605
1606 filteredrevcache = interfaceutil.Attribute(
1606 filteredrevcache = interfaceutil.Attribute(
1607 """Holds sets of revisions to be filtered."""
1607 """Holds sets of revisions to be filtered."""
1608 )
1608 )
1609
1609
1610 names = interfaceutil.Attribute("""A ``namespaces`` instance.""")
1610 names = interfaceutil.Attribute("""A ``namespaces`` instance.""")
1611
1611
1612 filecopiesmode = interfaceutil.Attribute(
1612 filecopiesmode = interfaceutil.Attribute(
1613 """The way files copies should be dealt with in this repo."""
1613 """The way files copies should be dealt with in this repo."""
1614 )
1614 )
1615
1615
1616 def close():
1616 def close():
1617 """Close the handle on this repository."""
1617 """Close the handle on this repository."""
1618
1618
1619 def peer(path=None):
1619 def peer(path=None):
1620 """Obtain an object conforming to the ``peer`` interface."""
1620 """Obtain an object conforming to the ``peer`` interface."""
1621
1621
1622 def unfiltered():
1622 def unfiltered():
1623 """Obtain an unfiltered/raw view of this repo."""
1623 """Obtain an unfiltered/raw view of this repo."""
1624
1624
1625 def filtered(name, visibilityexceptions=None):
1625 def filtered(name, visibilityexceptions=None):
1626 """Obtain a named view of this repository."""
1626 """Obtain a named view of this repository."""
1627
1627
1628 obsstore = interfaceutil.Attribute("""A store of obsolescence data.""")
1628 obsstore = interfaceutil.Attribute("""A store of obsolescence data.""")
1629
1629
1630 changelog = interfaceutil.Attribute("""A handle on the changelog revlog.""")
1630 changelog = interfaceutil.Attribute("""A handle on the changelog revlog.""")
1631
1631
1632 manifestlog = interfaceutil.Attribute(
1632 manifestlog = interfaceutil.Attribute(
1633 """An instance conforming to the ``imanifestlog`` interface.
1633 """An instance conforming to the ``imanifestlog`` interface.
1634
1634
1635 Provides access to manifests for the repository.
1635 Provides access to manifests for the repository.
1636 """
1636 """
1637 )
1637 )
1638
1638
1639 dirstate = interfaceutil.Attribute("""Working directory state.""")
1639 dirstate = interfaceutil.Attribute("""Working directory state.""")
1640
1640
1641 narrowpats = interfaceutil.Attribute(
1641 narrowpats = interfaceutil.Attribute(
1642 """Matcher patterns for this repository's narrowspec."""
1642 """Matcher patterns for this repository's narrowspec."""
1643 )
1643 )
1644
1644
1645 def narrowmatch(match=None, includeexact=False):
1645 def narrowmatch(match=None, includeexact=False):
1646 """Obtain a matcher for the narrowspec."""
1646 """Obtain a matcher for the narrowspec."""
1647
1647
1648 def setnarrowpats(newincludes, newexcludes):
1648 def setnarrowpats(newincludes, newexcludes):
1649 """Define the narrowspec for this repository."""
1649 """Define the narrowspec for this repository."""
1650
1650
1651 def __getitem__(changeid):
1651 def __getitem__(changeid):
1652 """Try to resolve a changectx."""
1652 """Try to resolve a changectx."""
1653
1653
1654 def __contains__(changeid):
1654 def __contains__(changeid):
1655 """Whether a changeset exists."""
1655 """Whether a changeset exists."""
1656
1656
1657 def __nonzero__():
1657 def __nonzero__():
1658 """Always returns True."""
1658 """Always returns True."""
1659 return True
1659 return True
1660
1660
1661 __bool__ = __nonzero__
1661 __bool__ = __nonzero__
1662
1662
1663 def __len__():
1663 def __len__():
1664 """Returns the number of changesets in the repo."""
1664 """Returns the number of changesets in the repo."""
1665
1665
1666 def __iter__():
1666 def __iter__():
1667 """Iterate over revisions in the changelog."""
1667 """Iterate over revisions in the changelog."""
1668
1668
1669 def revs(expr, *args):
1669 def revs(expr, *args):
1670 """Evaluate a revset.
1670 """Evaluate a revset.
1671
1671
1672 Emits revisions.
1672 Emits revisions.
1673 """
1673 """
1674
1674
1675 def set(expr, *args):
1675 def set(expr, *args):
1676 """Evaluate a revset.
1676 """Evaluate a revset.
1677
1677
1678 Emits changectx instances.
1678 Emits changectx instances.
1679 """
1679 """
1680
1680
1681 def anyrevs(specs, user=False, localalias=None):
1681 def anyrevs(specs, user=False, localalias=None):
1682 """Find revisions matching one of the given revsets."""
1682 """Find revisions matching one of the given revsets."""
1683
1683
1684 def url():
1684 def url():
1685 """Returns a string representing the location of this repo."""
1685 """Returns a string representing the location of this repo."""
1686
1686
1687 def hook(name, throw=False, **args):
1687 def hook(name, throw=False, **args):
1688 """Call a hook."""
1688 """Call a hook."""
1689
1689
1690 def tags():
1690 def tags():
1691 """Return a mapping of tag to node."""
1691 """Return a mapping of tag to node."""
1692
1692
1693 def tagtype(tagname):
1693 def tagtype(tagname):
1694 """Return the type of a given tag."""
1694 """Return the type of a given tag."""
1695
1695
1696 def tagslist():
1696 def tagslist():
1697 """Return a list of tags ordered by revision."""
1697 """Return a list of tags ordered by revision."""
1698
1698
1699 def nodetags(node):
1699 def nodetags(node):
1700 """Return the tags associated with a node."""
1700 """Return the tags associated with a node."""
1701
1701
1702 def nodebookmarks(node):
1702 def nodebookmarks(node):
1703 """Return the list of bookmarks pointing to the specified node."""
1703 """Return the list of bookmarks pointing to the specified node."""
1704
1704
1705 def branchmap():
1705 def branchmap():
1706 """Return a mapping of branch to heads in that branch."""
1706 """Return a mapping of branch to heads in that branch."""
1707
1707
1708 def revbranchcache():
1708 def revbranchcache():
1709 pass
1709 pass
1710
1710
1711 def register_changeset(rev, changelogrevision):
1711 def register_changeset(rev, changelogrevision):
1712 """Extension point for caches for new nodes.
1712 """Extension point for caches for new nodes.
1713
1713
1714 Multiple consumers are expected to need parts of the changelogrevision,
1714 Multiple consumers are expected to need parts of the changelogrevision,
1715 so it is provided as optimization to avoid duplicate lookups. A simple
1715 so it is provided as optimization to avoid duplicate lookups. A simple
1716 cache would be fragile when other revisions are accessed, too."""
1716 cache would be fragile when other revisions are accessed, too."""
1717 pass
1717 pass
1718
1718
1719 def branchtip(branchtip, ignoremissing=False):
1719 def branchtip(branchtip, ignoremissing=False):
1720 """Return the tip node for a given branch."""
1720 """Return the tip node for a given branch."""
1721
1721
1722 def lookup(key):
1722 def lookup(key):
1723 """Resolve the node for a revision."""
1723 """Resolve the node for a revision."""
1724
1724
1725 def lookupbranch(key):
1725 def lookupbranch(key):
1726 """Look up the branch name of the given revision or branch name."""
1726 """Look up the branch name of the given revision or branch name."""
1727
1727
1728 def known(nodes):
1728 def known(nodes):
1729 """Determine whether a series of nodes is known.
1729 """Determine whether a series of nodes is known.
1730
1730
1731 Returns a list of bools.
1731 Returns a list of bools.
1732 """
1732 """
1733
1733
1734 def local():
1734 def local():
1735 """Whether the repository is local."""
1735 """Whether the repository is local."""
1736 return True
1736 return True
1737
1737
1738 def publishing():
1738 def publishing():
1739 """Whether the repository is a publishing repository."""
1739 """Whether the repository is a publishing repository."""
1740
1740
1741 def cancopy():
1741 def cancopy():
1742 pass
1742 pass
1743
1743
1744 def shared():
1744 def shared():
1745 """The type of shared repository or None."""
1745 """The type of shared repository or None."""
1746
1746
1747 def wjoin(f, *insidef):
1747 def wjoin(f, *insidef):
1748 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1748 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1749
1749
1750 def setparents(p1, p2):
1750 def setparents(p1, p2):
1751 """Set the parent nodes of the working directory."""
1751 """Set the parent nodes of the working directory."""
1752
1752
1753 def filectx(path, changeid=None, fileid=None):
1753 def filectx(path, changeid=None, fileid=None):
1754 """Obtain a filectx for the given file revision."""
1754 """Obtain a filectx for the given file revision."""
1755
1755
1756 def getcwd():
1756 def getcwd():
1757 """Obtain the current working directory from the dirstate."""
1757 """Obtain the current working directory from the dirstate."""
1758
1758
1759 def pathto(f, cwd=None):
1759 def pathto(f, cwd=None):
1760 """Obtain the relative path to a file."""
1760 """Obtain the relative path to a file."""
1761
1761
1762 def adddatafilter(name, fltr):
1762 def adddatafilter(name, fltr):
1763 pass
1763 pass
1764
1764
1765 def wread(filename):
1765 def wread(filename):
1766 """Read a file from wvfs, using data filters."""
1766 """Read a file from wvfs, using data filters."""
1767
1767
1768 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1768 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1769 """Write data to a file in the wvfs, using data filters."""
1769 """Write data to a file in the wvfs, using data filters."""
1770
1770
1771 def wwritedata(filename, data):
1771 def wwritedata(filename, data):
1772 """Resolve data for writing to the wvfs, using data filters."""
1772 """Resolve data for writing to the wvfs, using data filters."""
1773
1773
1774 def currenttransaction():
1774 def currenttransaction():
1775 """Obtain the current transaction instance or None."""
1775 """Obtain the current transaction instance or None."""
1776
1776
1777 def transaction(desc, report=None):
1777 def transaction(desc, report=None):
1778 """Open a new transaction to write to the repository."""
1778 """Open a new transaction to write to the repository."""
1779
1779
1780 def undofiles():
1780 def undofiles():
1781 """Returns a list of (vfs, path) for files to undo transactions."""
1781 """Returns a list of (vfs, path) for files to undo transactions."""
1782
1782
1783 def recover():
1783 def recover():
1784 """Roll back an interrupted transaction."""
1784 """Roll back an interrupted transaction."""
1785
1785
1786 def rollback(dryrun=False, force=False):
1786 def rollback(dryrun=False, force=False):
1787 """Undo the last transaction.
1787 """Undo the last transaction.
1788
1788
1789 DANGEROUS.
1789 DANGEROUS.
1790 """
1790 """
1791
1791
1792 def updatecaches(tr=None, full=False, caches=None):
1792 def updatecaches(tr=None, full=False, caches=None):
1793 """Warm repo caches."""
1793 """Warm repo caches."""
1794
1794
1795 def invalidatecaches():
1795 def invalidatecaches():
1796 """Invalidate cached data due to the repository mutating."""
1796 """Invalidate cached data due to the repository mutating."""
1797
1797
1798 def invalidatevolatilesets():
1798 def invalidatevolatilesets():
1799 pass
1799 pass
1800
1800
1801 def invalidatedirstate():
1801 def invalidatedirstate():
1802 """Invalidate the dirstate."""
1802 """Invalidate the dirstate."""
1803
1803
1804 def invalidate(clearfilecache=False):
1804 def invalidate(clearfilecache=False):
1805 pass
1805 pass
1806
1806
1807 def invalidateall():
1807 def invalidateall():
1808 pass
1808 pass
1809
1809
1810 def lock(wait=True):
1810 def lock(wait=True):
1811 """Lock the repository store and return a lock instance."""
1811 """Lock the repository store and return a lock instance."""
1812
1812
1813 def currentlock():
1814 """Return the lock if it's held or None."""
1815
1813 def wlock(wait=True):
1816 def wlock(wait=True):
1814 """Lock the non-store parts of the repository."""
1817 """Lock the non-store parts of the repository."""
1815
1818
1816 def currentwlock():
1819 def currentwlock():
1817 """Return the wlock if it's held or None."""
1820 """Return the wlock if it's held or None."""
1818
1821
1819 def checkcommitpatterns(wctx, match, status, fail):
1822 def checkcommitpatterns(wctx, match, status, fail):
1820 pass
1823 pass
1821
1824
1822 def commit(
1825 def commit(
1823 text=b'',
1826 text=b'',
1824 user=None,
1827 user=None,
1825 date=None,
1828 date=None,
1826 match=None,
1829 match=None,
1827 force=False,
1830 force=False,
1828 editor=False,
1831 editor=False,
1829 extra=None,
1832 extra=None,
1830 ):
1833 ):
1831 """Add a new revision to the repository."""
1834 """Add a new revision to the repository."""
1832
1835
1833 def commitctx(ctx, error=False, origctx=None):
1836 def commitctx(ctx, error=False, origctx=None):
1834 """Commit a commitctx instance to the repository."""
1837 """Commit a commitctx instance to the repository."""
1835
1838
1836 def destroying():
1839 def destroying():
1837 """Inform the repository that nodes are about to be destroyed."""
1840 """Inform the repository that nodes are about to be destroyed."""
1838
1841
1839 def destroyed():
1842 def destroyed():
1840 """Inform the repository that nodes have been destroyed."""
1843 """Inform the repository that nodes have been destroyed."""
1841
1844
1842 def status(
1845 def status(
1843 node1=b'.',
1846 node1=b'.',
1844 node2=None,
1847 node2=None,
1845 match=None,
1848 match=None,
1846 ignored=False,
1849 ignored=False,
1847 clean=False,
1850 clean=False,
1848 unknown=False,
1851 unknown=False,
1849 listsubrepos=False,
1852 listsubrepos=False,
1850 ):
1853 ):
1851 """Convenience method to call repo[x].status()."""
1854 """Convenience method to call repo[x].status()."""
1852
1855
1853 def addpostdsstatus(ps):
1856 def addpostdsstatus(ps):
1854 pass
1857 pass
1855
1858
1856 def postdsstatus():
1859 def postdsstatus():
1857 pass
1860 pass
1858
1861
1859 def clearpostdsstatus():
1862 def clearpostdsstatus():
1860 pass
1863 pass
1861
1864
1862 def heads(start=None):
1865 def heads(start=None):
1863 """Obtain list of nodes that are DAG heads."""
1866 """Obtain list of nodes that are DAG heads."""
1864
1867
1865 def branchheads(branch=None, start=None, closed=False):
1868 def branchheads(branch=None, start=None, closed=False):
1866 pass
1869 pass
1867
1870
1868 def branches(nodes):
1871 def branches(nodes):
1869 pass
1872 pass
1870
1873
1871 def between(pairs):
1874 def between(pairs):
1872 pass
1875 pass
1873
1876
1874 def checkpush(pushop):
1877 def checkpush(pushop):
1875 pass
1878 pass
1876
1879
1877 prepushoutgoinghooks = interfaceutil.Attribute("""util.hooks instance.""")
1880 prepushoutgoinghooks = interfaceutil.Attribute("""util.hooks instance.""")
1878
1881
1879 def pushkey(namespace, key, old, new):
1882 def pushkey(namespace, key, old, new):
1880 pass
1883 pass
1881
1884
1882 def listkeys(namespace):
1885 def listkeys(namespace):
1883 pass
1886 pass
1884
1887
1885 def debugwireargs(one, two, three=None, four=None, five=None):
1888 def debugwireargs(one, two, three=None, four=None, five=None):
1886 pass
1889 pass
1887
1890
1888 def savecommitmessage(text):
1891 def savecommitmessage(text):
1889 pass
1892 pass
1890
1893
1891 def register_sidedata_computer(
1894 def register_sidedata_computer(
1892 kind, category, keys, computer, flags, replace=False
1895 kind, category, keys, computer, flags, replace=False
1893 ):
1896 ):
1894 pass
1897 pass
1895
1898
1896 def register_wanted_sidedata(category):
1899 def register_wanted_sidedata(category):
1897 pass
1900 pass
1898
1901
1899
1902
1900 class completelocalrepository(
1903 class completelocalrepository(
1901 ilocalrepositorymain, ilocalrepositoryfilestorage
1904 ilocalrepositorymain, ilocalrepositoryfilestorage
1902 ):
1905 ):
1903 """Complete interface for a local repository."""
1906 """Complete interface for a local repository."""
1904
1907
1905
1908
1906 class iwireprotocolcommandcacher(interfaceutil.Interface):
1909 class iwireprotocolcommandcacher(interfaceutil.Interface):
1907 """Represents a caching backend for wire protocol commands.
1910 """Represents a caching backend for wire protocol commands.
1908
1911
1909 Wire protocol version 2 supports transparent caching of many commands.
1912 Wire protocol version 2 supports transparent caching of many commands.
1910 To leverage this caching, servers can activate objects that cache
1913 To leverage this caching, servers can activate objects that cache
1911 command responses. Objects handle both cache writing and reading.
1914 command responses. Objects handle both cache writing and reading.
1912 This interface defines how that response caching mechanism works.
1915 This interface defines how that response caching mechanism works.
1913
1916
1914 Wire protocol version 2 commands emit a series of objects that are
1917 Wire protocol version 2 commands emit a series of objects that are
1915 serialized and sent to the client. The caching layer exists between
1918 serialized and sent to the client. The caching layer exists between
1916 the invocation of the command function and the sending of its output
1919 the invocation of the command function and the sending of its output
1917 objects to an output layer.
1920 objects to an output layer.
1918
1921
1919 Instances of this interface represent a binding to a cache that
1922 Instances of this interface represent a binding to a cache that
1920 can serve a response (in place of calling a command function) and/or
1923 can serve a response (in place of calling a command function) and/or
1921 write responses to a cache for subsequent use.
1924 write responses to a cache for subsequent use.
1922
1925
1923 When a command request arrives, the following happens with regards
1926 When a command request arrives, the following happens with regards
1924 to this interface:
1927 to this interface:
1925
1928
1926 1. The server determines whether the command request is cacheable.
1929 1. The server determines whether the command request is cacheable.
1927 2. If it is, an instance of this interface is spawned.
1930 2. If it is, an instance of this interface is spawned.
1928 3. The cacher is activated in a context manager (``__enter__`` is called).
1931 3. The cacher is activated in a context manager (``__enter__`` is called).
1929 4. A cache *key* for that request is derived. This will call the
1932 4. A cache *key* for that request is derived. This will call the
1930 instance's ``adjustcachekeystate()`` method so the derivation
1933 instance's ``adjustcachekeystate()`` method so the derivation
1931 can be influenced.
1934 can be influenced.
1932 5. The cacher is informed of the derived cache key via a call to
1935 5. The cacher is informed of the derived cache key via a call to
1933 ``setcachekey()``.
1936 ``setcachekey()``.
1934 6. The cacher's ``lookup()`` method is called to test for presence of
1937 6. The cacher's ``lookup()`` method is called to test for presence of
1935 the derived key in the cache.
1938 the derived key in the cache.
1936 7. If ``lookup()`` returns a hit, that cached result is used in place
1939 7. If ``lookup()`` returns a hit, that cached result is used in place
1937 of invoking the command function. ``__exit__`` is called and the instance
1940 of invoking the command function. ``__exit__`` is called and the instance
1938 is discarded.
1941 is discarded.
1939 8. The command function is invoked.
1942 8. The command function is invoked.
1940 9. ``onobject()`` is called for each object emitted by the command
1943 9. ``onobject()`` is called for each object emitted by the command
1941 function.
1944 function.
1942 10. After the final object is seen, ``onfinished()`` is called.
1945 10. After the final object is seen, ``onfinished()`` is called.
1943 11. ``__exit__`` is called to signal the end of use of the instance.
1946 11. ``__exit__`` is called to signal the end of use of the instance.
1944
1947
1945 Cache *key* derivation can be influenced by the instance.
1948 Cache *key* derivation can be influenced by the instance.
1946
1949
1947 Cache keys are initially derived by a deterministic representation of
1950 Cache keys are initially derived by a deterministic representation of
1948 the command request. This includes the command name, arguments, protocol
1951 the command request. This includes the command name, arguments, protocol
1949 version, etc. This initial key derivation is performed by CBOR-encoding a
1952 version, etc. This initial key derivation is performed by CBOR-encoding a
1950 data structure and feeding that output into a hasher.
1953 data structure and feeding that output into a hasher.
1951
1954
1952 Instances of this interface can influence this initial key derivation
1955 Instances of this interface can influence this initial key derivation
1953 via ``adjustcachekeystate()``.
1956 via ``adjustcachekeystate()``.
1954
1957
1955 The instance is informed of the derived cache key via a call to
1958 The instance is informed of the derived cache key via a call to
1956 ``setcachekey()``. The instance must store the key locally so it can
1959 ``setcachekey()``. The instance must store the key locally so it can
1957 be consulted on subsequent operations that may require it.
1960 be consulted on subsequent operations that may require it.
1958
1961
1959 When constructed, the instance has access to a callable that can be used
1962 When constructed, the instance has access to a callable that can be used
1960 for encoding response objects. This callable receives as its single
1963 for encoding response objects. This callable receives as its single
1961 argument an object emitted by a command function. It returns an iterable
1964 argument an object emitted by a command function. It returns an iterable
1962 of bytes chunks representing the encoded object. Unless the cacher is
1965 of bytes chunks representing the encoded object. Unless the cacher is
1963 caching native Python objects in memory or has a way of reconstructing
1966 caching native Python objects in memory or has a way of reconstructing
1964 the original Python objects, implementations typically call this function
1967 the original Python objects, implementations typically call this function
1965 to produce bytes from the output objects and then store those bytes in
1968 to produce bytes from the output objects and then store those bytes in
1966 the cache. When it comes time to re-emit those bytes, they are wrapped
1969 the cache. When it comes time to re-emit those bytes, they are wrapped
1967 in a ``wireprototypes.encodedresponse`` instance to tell the output
1970 in a ``wireprototypes.encodedresponse`` instance to tell the output
1968 layer that they are pre-encoded.
1971 layer that they are pre-encoded.
1969
1972
1970 When receiving the objects emitted by the command function, instances
1973 When receiving the objects emitted by the command function, instances
1971 can choose what to do with those objects. The simplest thing to do is
1974 can choose what to do with those objects. The simplest thing to do is
1972 re-emit the original objects. They will be forwarded to the output
1975 re-emit the original objects. They will be forwarded to the output
1973 layer and will be processed as if the cacher did not exist.
1976 layer and will be processed as if the cacher did not exist.
1974
1977
1975 Implementations could also choose to not emit objects - instead locally
1978 Implementations could also choose to not emit objects - instead locally
1976 buffering objects or their encoded representation. They could then emit
1979 buffering objects or their encoded representation. They could then emit
1977 a single "coalesced" object when ``onfinished()`` is called. In
1980 a single "coalesced" object when ``onfinished()`` is called. In
1978 this way, the implementation would function as a filtering layer of
1981 this way, the implementation would function as a filtering layer of
1979 sorts.
1982 sorts.
1980
1983
1981 When caching objects, typically the encoded form of the object will
1984 When caching objects, typically the encoded form of the object will
1982 be stored. Keep in mind that if the original object is forwarded to
1985 be stored. Keep in mind that if the original object is forwarded to
1983 the output layer, it will need to be encoded there as well. For large
1986 the output layer, it will need to be encoded there as well. For large
1984 output, this redundant encoding could add overhead. Implementations
1987 output, this redundant encoding could add overhead. Implementations
1985 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1988 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1986 instances to avoid this overhead.
1989 instances to avoid this overhead.
1987 """
1990 """
1988
1991
1989 def __enter__():
1992 def __enter__():
1990 """Marks the instance as active.
1993 """Marks the instance as active.
1991
1994
1992 Should return self.
1995 Should return self.
1993 """
1996 """
1994
1997
1995 def __exit__(exctype, excvalue, exctb):
1998 def __exit__(exctype, excvalue, exctb):
1996 """Called when cacher is no longer used.
1999 """Called when cacher is no longer used.
1997
2000
1998 This can be used by implementations to perform cleanup actions (e.g.
2001 This can be used by implementations to perform cleanup actions (e.g.
1999 disconnecting network sockets, aborting a partially cached response.
2002 disconnecting network sockets, aborting a partially cached response.
2000 """
2003 """
2001
2004
2002 def adjustcachekeystate(state):
2005 def adjustcachekeystate(state):
2003 """Influences cache key derivation by adjusting state to derive key.
2006 """Influences cache key derivation by adjusting state to derive key.
2004
2007
2005 A dict defining the state used to derive the cache key is passed.
2008 A dict defining the state used to derive the cache key is passed.
2006
2009
2007 Implementations can modify this dict to record additional state that
2010 Implementations can modify this dict to record additional state that
2008 is wanted to influence key derivation.
2011 is wanted to influence key derivation.
2009
2012
2010 Implementations are *highly* encouraged to not modify or delete
2013 Implementations are *highly* encouraged to not modify or delete
2011 existing keys.
2014 existing keys.
2012 """
2015 """
2013
2016
2014 def setcachekey(key):
2017 def setcachekey(key):
2015 """Record the derived cache key for this request.
2018 """Record the derived cache key for this request.
2016
2019
2017 Instances may mutate the key for internal usage, as desired. e.g.
2020 Instances may mutate the key for internal usage, as desired. e.g.
2018 instances may wish to prepend the repo name, introduce path
2021 instances may wish to prepend the repo name, introduce path
2019 components for filesystem or URL addressing, etc. Behavior is up to
2022 components for filesystem or URL addressing, etc. Behavior is up to
2020 the cache.
2023 the cache.
2021
2024
2022 Returns a bool indicating if the request is cacheable by this
2025 Returns a bool indicating if the request is cacheable by this
2023 instance.
2026 instance.
2024 """
2027 """
2025
2028
2026 def lookup():
2029 def lookup():
2027 """Attempt to resolve an entry in the cache.
2030 """Attempt to resolve an entry in the cache.
2028
2031
2029 The instance is instructed to look for the cache key that it was
2032 The instance is instructed to look for the cache key that it was
2030 informed about via the call to ``setcachekey()``.
2033 informed about via the call to ``setcachekey()``.
2031
2034
2032 If there's no cache hit or the cacher doesn't wish to use the cached
2035 If there's no cache hit or the cacher doesn't wish to use the cached
2033 entry, ``None`` should be returned.
2036 entry, ``None`` should be returned.
2034
2037
2035 Else, a dict defining the cached result should be returned. The
2038 Else, a dict defining the cached result should be returned. The
2036 dict may have the following keys:
2039 dict may have the following keys:
2037
2040
2038 objs
2041 objs
2039 An iterable of objects that should be sent to the client. That
2042 An iterable of objects that should be sent to the client. That
2040 iterable of objects is expected to be what the command function
2043 iterable of objects is expected to be what the command function
2041 would return if invoked or an equivalent representation thereof.
2044 would return if invoked or an equivalent representation thereof.
2042 """
2045 """
2043
2046
2044 def onobject(obj):
2047 def onobject(obj):
2045 """Called when a new object is emitted from the command function.
2048 """Called when a new object is emitted from the command function.
2046
2049
2047 Receives as its argument the object that was emitted from the
2050 Receives as its argument the object that was emitted from the
2048 command function.
2051 command function.
2049
2052
2050 This method returns an iterator of objects to forward to the output
2053 This method returns an iterator of objects to forward to the output
2051 layer. The easiest implementation is a generator that just
2054 layer. The easiest implementation is a generator that just
2052 ``yield obj``.
2055 ``yield obj``.
2053 """
2056 """
2054
2057
2055 def onfinished():
2058 def onfinished():
2056 """Called after all objects have been emitted from the command function.
2059 """Called after all objects have been emitted from the command function.
2057
2060
2058 Implementations should return an iterator of objects to forward to
2061 Implementations should return an iterator of objects to forward to
2059 the output layer.
2062 the output layer.
2060
2063
2061 This method can be a generator.
2064 This method can be a generator.
2062 """
2065 """
@@ -1,3995 +1,3999 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 # coding: utf-8
2 # coding: utf-8
3 #
3 #
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import functools
10 import functools
11 import os
11 import os
12 import random
12 import random
13 import re
13 import re
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from concurrent import futures
18 from concurrent import futures
19 from typing import (
19 from typing import (
20 Optional,
20 Optional,
21 )
21 )
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 bin,
25 bin,
26 hex,
26 hex,
27 nullrev,
27 nullrev,
28 sha1nodeconstants,
28 sha1nodeconstants,
29 short,
29 short,
30 )
30 )
31 from .pycompat import (
31 from .pycompat import (
32 delattr,
32 delattr,
33 getattr,
33 getattr,
34 )
34 )
35 from . import (
35 from . import (
36 bookmarks,
36 bookmarks,
37 branchmap,
37 branchmap,
38 bundle2,
38 bundle2,
39 bundlecaches,
39 bundlecaches,
40 changegroup,
40 changegroup,
41 color,
41 color,
42 commit,
42 commit,
43 context,
43 context,
44 dirstate,
44 dirstate,
45 discovery,
45 discovery,
46 encoding,
46 encoding,
47 error,
47 error,
48 exchange,
48 exchange,
49 extensions,
49 extensions,
50 filelog,
50 filelog,
51 hook,
51 hook,
52 lock as lockmod,
52 lock as lockmod,
53 match as matchmod,
53 match as matchmod,
54 mergestate as mergestatemod,
54 mergestate as mergestatemod,
55 mergeutil,
55 mergeutil,
56 namespaces,
56 namespaces,
57 narrowspec,
57 narrowspec,
58 obsolete,
58 obsolete,
59 pathutil,
59 pathutil,
60 phases,
60 phases,
61 pushkey,
61 pushkey,
62 pycompat,
62 pycompat,
63 rcutil,
63 rcutil,
64 repoview,
64 repoview,
65 requirements as requirementsmod,
65 requirements as requirementsmod,
66 revlog,
66 revlog,
67 revset,
67 revset,
68 revsetlang,
68 revsetlang,
69 scmutil,
69 scmutil,
70 sparse,
70 sparse,
71 store as storemod,
71 store as storemod,
72 subrepoutil,
72 subrepoutil,
73 tags as tagsmod,
73 tags as tagsmod,
74 transaction,
74 transaction,
75 txnutil,
75 txnutil,
76 util,
76 util,
77 vfs as vfsmod,
77 vfs as vfsmod,
78 wireprototypes,
78 wireprototypes,
79 )
79 )
80
80
81 from .interfaces import (
81 from .interfaces import (
82 repository,
82 repository,
83 util as interfaceutil,
83 util as interfaceutil,
84 )
84 )
85
85
86 from .utils import (
86 from .utils import (
87 hashutil,
87 hashutil,
88 procutil,
88 procutil,
89 stringutil,
89 stringutil,
90 urlutil,
90 urlutil,
91 )
91 )
92
92
93 from .revlogutils import (
93 from .revlogutils import (
94 concurrency_checker as revlogchecker,
94 concurrency_checker as revlogchecker,
95 constants as revlogconst,
95 constants as revlogconst,
96 sidedata as sidedatamod,
96 sidedata as sidedatamod,
97 )
97 )
98
98
99 release = lockmod.release
99 release = lockmod.release
100 urlerr = util.urlerr
100 urlerr = util.urlerr
101 urlreq = util.urlreq
101 urlreq = util.urlreq
102
102
103 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(
103 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(
104 b"^((dirstate|narrowspec.dirstate).*|branch$)"
104 b"^((dirstate|narrowspec.dirstate).*|branch$)"
105 )
105 )
106
106
107 # set of (path, vfs-location) tuples. vfs-location is:
107 # set of (path, vfs-location) tuples. vfs-location is:
108 # - 'plain for vfs relative paths
108 # - 'plain for vfs relative paths
109 # - '' for svfs relative paths
109 # - '' for svfs relative paths
110 _cachedfiles = set()
110 _cachedfiles = set()
111
111
112
112
113 class _basefilecache(scmutil.filecache):
113 class _basefilecache(scmutil.filecache):
114 """All filecache usage on repo are done for logic that should be unfiltered"""
114 """All filecache usage on repo are done for logic that should be unfiltered"""
115
115
116 def __get__(self, repo, type=None):
116 def __get__(self, repo, type=None):
117 if repo is None:
117 if repo is None:
118 return self
118 return self
119 # proxy to unfiltered __dict__ since filtered repo has no entry
119 # proxy to unfiltered __dict__ since filtered repo has no entry
120 unfi = repo.unfiltered()
120 unfi = repo.unfiltered()
121 try:
121 try:
122 return unfi.__dict__[self.sname]
122 return unfi.__dict__[self.sname]
123 except KeyError:
123 except KeyError:
124 pass
124 pass
125 return super(_basefilecache, self).__get__(unfi, type)
125 return super(_basefilecache, self).__get__(unfi, type)
126
126
127 def set(self, repo, value):
127 def set(self, repo, value):
128 return super(_basefilecache, self).set(repo.unfiltered(), value)
128 return super(_basefilecache, self).set(repo.unfiltered(), value)
129
129
130
130
131 class repofilecache(_basefilecache):
131 class repofilecache(_basefilecache):
132 """filecache for files in .hg but outside of .hg/store"""
132 """filecache for files in .hg but outside of .hg/store"""
133
133
134 def __init__(self, *paths):
134 def __init__(self, *paths):
135 super(repofilecache, self).__init__(*paths)
135 super(repofilecache, self).__init__(*paths)
136 for path in paths:
136 for path in paths:
137 _cachedfiles.add((path, b'plain'))
137 _cachedfiles.add((path, b'plain'))
138
138
139 def join(self, obj, fname):
139 def join(self, obj, fname):
140 return obj.vfs.join(fname)
140 return obj.vfs.join(fname)
141
141
142
142
143 class storecache(_basefilecache):
143 class storecache(_basefilecache):
144 """filecache for files in the store"""
144 """filecache for files in the store"""
145
145
146 def __init__(self, *paths):
146 def __init__(self, *paths):
147 super(storecache, self).__init__(*paths)
147 super(storecache, self).__init__(*paths)
148 for path in paths:
148 for path in paths:
149 _cachedfiles.add((path, b''))
149 _cachedfiles.add((path, b''))
150
150
151 def join(self, obj, fname):
151 def join(self, obj, fname):
152 return obj.sjoin(fname)
152 return obj.sjoin(fname)
153
153
154
154
155 class changelogcache(storecache):
155 class changelogcache(storecache):
156 """filecache for the changelog"""
156 """filecache for the changelog"""
157
157
158 def __init__(self):
158 def __init__(self):
159 super(changelogcache, self).__init__()
159 super(changelogcache, self).__init__()
160 _cachedfiles.add((b'00changelog.i', b''))
160 _cachedfiles.add((b'00changelog.i', b''))
161 _cachedfiles.add((b'00changelog.n', b''))
161 _cachedfiles.add((b'00changelog.n', b''))
162
162
163 def tracked_paths(self, obj):
163 def tracked_paths(self, obj):
164 paths = [self.join(obj, b'00changelog.i')]
164 paths = [self.join(obj, b'00changelog.i')]
165 if obj.store.opener.options.get(b'persistent-nodemap', False):
165 if obj.store.opener.options.get(b'persistent-nodemap', False):
166 paths.append(self.join(obj, b'00changelog.n'))
166 paths.append(self.join(obj, b'00changelog.n'))
167 return paths
167 return paths
168
168
169
169
170 class manifestlogcache(storecache):
170 class manifestlogcache(storecache):
171 """filecache for the manifestlog"""
171 """filecache for the manifestlog"""
172
172
173 def __init__(self):
173 def __init__(self):
174 super(manifestlogcache, self).__init__()
174 super(manifestlogcache, self).__init__()
175 _cachedfiles.add((b'00manifest.i', b''))
175 _cachedfiles.add((b'00manifest.i', b''))
176 _cachedfiles.add((b'00manifest.n', b''))
176 _cachedfiles.add((b'00manifest.n', b''))
177
177
178 def tracked_paths(self, obj):
178 def tracked_paths(self, obj):
179 paths = [self.join(obj, b'00manifest.i')]
179 paths = [self.join(obj, b'00manifest.i')]
180 if obj.store.opener.options.get(b'persistent-nodemap', False):
180 if obj.store.opener.options.get(b'persistent-nodemap', False):
181 paths.append(self.join(obj, b'00manifest.n'))
181 paths.append(self.join(obj, b'00manifest.n'))
182 return paths
182 return paths
183
183
184
184
185 class mixedrepostorecache(_basefilecache):
185 class mixedrepostorecache(_basefilecache):
186 """filecache for a mix files in .hg/store and outside"""
186 """filecache for a mix files in .hg/store and outside"""
187
187
188 def __init__(self, *pathsandlocations):
188 def __init__(self, *pathsandlocations):
189 # scmutil.filecache only uses the path for passing back into our
189 # scmutil.filecache only uses the path for passing back into our
190 # join(), so we can safely pass a list of paths and locations
190 # join(), so we can safely pass a list of paths and locations
191 super(mixedrepostorecache, self).__init__(*pathsandlocations)
191 super(mixedrepostorecache, self).__init__(*pathsandlocations)
192 _cachedfiles.update(pathsandlocations)
192 _cachedfiles.update(pathsandlocations)
193
193
194 def join(self, obj, fnameandlocation):
194 def join(self, obj, fnameandlocation):
195 fname, location = fnameandlocation
195 fname, location = fnameandlocation
196 if location == b'plain':
196 if location == b'plain':
197 return obj.vfs.join(fname)
197 return obj.vfs.join(fname)
198 else:
198 else:
199 if location != b'':
199 if location != b'':
200 raise error.ProgrammingError(
200 raise error.ProgrammingError(
201 b'unexpected location: %s' % location
201 b'unexpected location: %s' % location
202 )
202 )
203 return obj.sjoin(fname)
203 return obj.sjoin(fname)
204
204
205
205
206 def isfilecached(repo, name):
206 def isfilecached(repo, name):
207 """check if a repo has already cached "name" filecache-ed property
207 """check if a repo has already cached "name" filecache-ed property
208
208
209 This returns (cachedobj-or-None, iscached) tuple.
209 This returns (cachedobj-or-None, iscached) tuple.
210 """
210 """
211 cacheentry = repo.unfiltered()._filecache.get(name, None)
211 cacheentry = repo.unfiltered()._filecache.get(name, None)
212 if not cacheentry:
212 if not cacheentry:
213 return None, False
213 return None, False
214 return cacheentry.obj, True
214 return cacheentry.obj, True
215
215
216
216
217 class unfilteredpropertycache(util.propertycache):
217 class unfilteredpropertycache(util.propertycache):
218 """propertycache that apply to unfiltered repo only"""
218 """propertycache that apply to unfiltered repo only"""
219
219
220 def __get__(self, repo, type=None):
220 def __get__(self, repo, type=None):
221 unfi = repo.unfiltered()
221 unfi = repo.unfiltered()
222 if unfi is repo:
222 if unfi is repo:
223 return super(unfilteredpropertycache, self).__get__(unfi)
223 return super(unfilteredpropertycache, self).__get__(unfi)
224 return getattr(unfi, self.name)
224 return getattr(unfi, self.name)
225
225
226
226
227 class filteredpropertycache(util.propertycache):
227 class filteredpropertycache(util.propertycache):
228 """propertycache that must take filtering in account"""
228 """propertycache that must take filtering in account"""
229
229
230 def cachevalue(self, obj, value):
230 def cachevalue(self, obj, value):
231 object.__setattr__(obj, self.name, value)
231 object.__setattr__(obj, self.name, value)
232
232
233
233
234 def hasunfilteredcache(repo, name):
234 def hasunfilteredcache(repo, name):
235 """check if a repo has an unfilteredpropertycache value for <name>"""
235 """check if a repo has an unfilteredpropertycache value for <name>"""
236 return name in vars(repo.unfiltered())
236 return name in vars(repo.unfiltered())
237
237
238
238
239 def unfilteredmethod(orig):
239 def unfilteredmethod(orig):
240 """decorate method that always need to be run on unfiltered version"""
240 """decorate method that always need to be run on unfiltered version"""
241
241
242 @functools.wraps(orig)
242 @functools.wraps(orig)
243 def wrapper(repo, *args, **kwargs):
243 def wrapper(repo, *args, **kwargs):
244 return orig(repo.unfiltered(), *args, **kwargs)
244 return orig(repo.unfiltered(), *args, **kwargs)
245
245
246 return wrapper
246 return wrapper
247
247
248
248
249 moderncaps = {
249 moderncaps = {
250 b'lookup',
250 b'lookup',
251 b'branchmap',
251 b'branchmap',
252 b'pushkey',
252 b'pushkey',
253 b'known',
253 b'known',
254 b'getbundle',
254 b'getbundle',
255 b'unbundle',
255 b'unbundle',
256 }
256 }
257 legacycaps = moderncaps.union({b'changegroupsubset'})
257 legacycaps = moderncaps.union({b'changegroupsubset'})
258
258
259
259
260 @interfaceutil.implementer(repository.ipeercommandexecutor)
260 @interfaceutil.implementer(repository.ipeercommandexecutor)
261 class localcommandexecutor:
261 class localcommandexecutor:
262 def __init__(self, peer):
262 def __init__(self, peer):
263 self._peer = peer
263 self._peer = peer
264 self._sent = False
264 self._sent = False
265 self._closed = False
265 self._closed = False
266
266
267 def __enter__(self):
267 def __enter__(self):
268 return self
268 return self
269
269
270 def __exit__(self, exctype, excvalue, exctb):
270 def __exit__(self, exctype, excvalue, exctb):
271 self.close()
271 self.close()
272
272
273 def callcommand(self, command, args):
273 def callcommand(self, command, args):
274 if self._sent:
274 if self._sent:
275 raise error.ProgrammingError(
275 raise error.ProgrammingError(
276 b'callcommand() cannot be used after sendcommands()'
276 b'callcommand() cannot be used after sendcommands()'
277 )
277 )
278
278
279 if self._closed:
279 if self._closed:
280 raise error.ProgrammingError(
280 raise error.ProgrammingError(
281 b'callcommand() cannot be used after close()'
281 b'callcommand() cannot be used after close()'
282 )
282 )
283
283
284 # We don't need to support anything fancy. Just call the named
284 # We don't need to support anything fancy. Just call the named
285 # method on the peer and return a resolved future.
285 # method on the peer and return a resolved future.
286 fn = getattr(self._peer, pycompat.sysstr(command))
286 fn = getattr(self._peer, pycompat.sysstr(command))
287
287
288 f = futures.Future()
288 f = futures.Future()
289
289
290 try:
290 try:
291 result = fn(**pycompat.strkwargs(args))
291 result = fn(**pycompat.strkwargs(args))
292 except Exception:
292 except Exception:
293 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
293 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
294 else:
294 else:
295 f.set_result(result)
295 f.set_result(result)
296
296
297 return f
297 return f
298
298
299 def sendcommands(self):
299 def sendcommands(self):
300 self._sent = True
300 self._sent = True
301
301
302 def close(self):
302 def close(self):
303 self._closed = True
303 self._closed = True
304
304
305
305
306 @interfaceutil.implementer(repository.ipeercommands)
306 @interfaceutil.implementer(repository.ipeercommands)
307 class localpeer(repository.peer):
307 class localpeer(repository.peer):
308 '''peer for a local repo; reflects only the most recent API'''
308 '''peer for a local repo; reflects only the most recent API'''
309
309
310 def __init__(self, repo, caps=None, path=None):
310 def __init__(self, repo, caps=None, path=None):
311 super(localpeer, self).__init__(repo.ui, path=path)
311 super(localpeer, self).__init__(repo.ui, path=path)
312
312
313 if caps is None:
313 if caps is None:
314 caps = moderncaps.copy()
314 caps = moderncaps.copy()
315 self._repo = repo.filtered(b'served')
315 self._repo = repo.filtered(b'served')
316
316
317 if repo._wanted_sidedata:
317 if repo._wanted_sidedata:
318 formatted = bundle2.format_remote_wanted_sidedata(repo)
318 formatted = bundle2.format_remote_wanted_sidedata(repo)
319 caps.add(b'exp-wanted-sidedata=' + formatted)
319 caps.add(b'exp-wanted-sidedata=' + formatted)
320
320
321 self._caps = repo._restrictcapabilities(caps)
321 self._caps = repo._restrictcapabilities(caps)
322
322
323 # Begin of _basepeer interface.
323 # Begin of _basepeer interface.
324
324
325 def url(self):
325 def url(self):
326 return self._repo.url()
326 return self._repo.url()
327
327
328 def local(self):
328 def local(self):
329 return self._repo
329 return self._repo
330
330
331 def canpush(self):
331 def canpush(self):
332 return True
332 return True
333
333
334 def close(self):
334 def close(self):
335 self._repo.close()
335 self._repo.close()
336
336
337 # End of _basepeer interface.
337 # End of _basepeer interface.
338
338
339 # Begin of _basewirecommands interface.
339 # Begin of _basewirecommands interface.
340
340
341 def branchmap(self):
341 def branchmap(self):
342 return self._repo.branchmap()
342 return self._repo.branchmap()
343
343
344 def capabilities(self):
344 def capabilities(self):
345 return self._caps
345 return self._caps
346
346
347 def clonebundles(self):
347 def clonebundles(self):
348 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
348 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
349
349
350 def debugwireargs(self, one, two, three=None, four=None, five=None):
350 def debugwireargs(self, one, two, three=None, four=None, five=None):
351 """Used to test argument passing over the wire"""
351 """Used to test argument passing over the wire"""
352 return b"%s %s %s %s %s" % (
352 return b"%s %s %s %s %s" % (
353 one,
353 one,
354 two,
354 two,
355 pycompat.bytestr(three),
355 pycompat.bytestr(three),
356 pycompat.bytestr(four),
356 pycompat.bytestr(four),
357 pycompat.bytestr(five),
357 pycompat.bytestr(five),
358 )
358 )
359
359
360 def getbundle(
360 def getbundle(
361 self,
361 self,
362 source,
362 source,
363 heads=None,
363 heads=None,
364 common=None,
364 common=None,
365 bundlecaps=None,
365 bundlecaps=None,
366 remote_sidedata=None,
366 remote_sidedata=None,
367 **kwargs
367 **kwargs
368 ):
368 ):
369 chunks = exchange.getbundlechunks(
369 chunks = exchange.getbundlechunks(
370 self._repo,
370 self._repo,
371 source,
371 source,
372 heads=heads,
372 heads=heads,
373 common=common,
373 common=common,
374 bundlecaps=bundlecaps,
374 bundlecaps=bundlecaps,
375 remote_sidedata=remote_sidedata,
375 remote_sidedata=remote_sidedata,
376 **kwargs
376 **kwargs
377 )[1]
377 )[1]
378 cb = util.chunkbuffer(chunks)
378 cb = util.chunkbuffer(chunks)
379
379
380 if exchange.bundle2requested(bundlecaps):
380 if exchange.bundle2requested(bundlecaps):
381 # When requesting a bundle2, getbundle returns a stream to make the
381 # When requesting a bundle2, getbundle returns a stream to make the
382 # wire level function happier. We need to build a proper object
382 # wire level function happier. We need to build a proper object
383 # from it in local peer.
383 # from it in local peer.
384 return bundle2.getunbundler(self.ui, cb)
384 return bundle2.getunbundler(self.ui, cb)
385 else:
385 else:
386 return changegroup.getunbundler(b'01', cb, None)
386 return changegroup.getunbundler(b'01', cb, None)
387
387
388 def heads(self):
388 def heads(self):
389 return self._repo.heads()
389 return self._repo.heads()
390
390
391 def known(self, nodes):
391 def known(self, nodes):
392 return self._repo.known(nodes)
392 return self._repo.known(nodes)
393
393
394 def listkeys(self, namespace):
394 def listkeys(self, namespace):
395 return self._repo.listkeys(namespace)
395 return self._repo.listkeys(namespace)
396
396
397 def lookup(self, key):
397 def lookup(self, key):
398 return self._repo.lookup(key)
398 return self._repo.lookup(key)
399
399
400 def pushkey(self, namespace, key, old, new):
400 def pushkey(self, namespace, key, old, new):
401 return self._repo.pushkey(namespace, key, old, new)
401 return self._repo.pushkey(namespace, key, old, new)
402
402
403 def stream_out(self):
403 def stream_out(self):
404 raise error.Abort(_(b'cannot perform stream clone against local peer'))
404 raise error.Abort(_(b'cannot perform stream clone against local peer'))
405
405
406 def unbundle(self, bundle, heads, url):
406 def unbundle(self, bundle, heads, url):
407 """apply a bundle on a repo
407 """apply a bundle on a repo
408
408
409 This function handles the repo locking itself."""
409 This function handles the repo locking itself."""
410 try:
410 try:
411 try:
411 try:
412 bundle = exchange.readbundle(self.ui, bundle, None)
412 bundle = exchange.readbundle(self.ui, bundle, None)
413 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
413 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
414 if util.safehasattr(ret, b'getchunks'):
414 if util.safehasattr(ret, b'getchunks'):
415 # This is a bundle20 object, turn it into an unbundler.
415 # This is a bundle20 object, turn it into an unbundler.
416 # This little dance should be dropped eventually when the
416 # This little dance should be dropped eventually when the
417 # API is finally improved.
417 # API is finally improved.
418 stream = util.chunkbuffer(ret.getchunks())
418 stream = util.chunkbuffer(ret.getchunks())
419 ret = bundle2.getunbundler(self.ui, stream)
419 ret = bundle2.getunbundler(self.ui, stream)
420 return ret
420 return ret
421 except Exception as exc:
421 except Exception as exc:
422 # If the exception contains output salvaged from a bundle2
422 # If the exception contains output salvaged from a bundle2
423 # reply, we need to make sure it is printed before continuing
423 # reply, we need to make sure it is printed before continuing
424 # to fail. So we build a bundle2 with such output and consume
424 # to fail. So we build a bundle2 with such output and consume
425 # it directly.
425 # it directly.
426 #
426 #
427 # This is not very elegant but allows a "simple" solution for
427 # This is not very elegant but allows a "simple" solution for
428 # issue4594
428 # issue4594
429 output = getattr(exc, '_bundle2salvagedoutput', ())
429 output = getattr(exc, '_bundle2salvagedoutput', ())
430 if output:
430 if output:
431 bundler = bundle2.bundle20(self._repo.ui)
431 bundler = bundle2.bundle20(self._repo.ui)
432 for out in output:
432 for out in output:
433 bundler.addpart(out)
433 bundler.addpart(out)
434 stream = util.chunkbuffer(bundler.getchunks())
434 stream = util.chunkbuffer(bundler.getchunks())
435 b = bundle2.getunbundler(self.ui, stream)
435 b = bundle2.getunbundler(self.ui, stream)
436 bundle2.processbundle(self._repo, b)
436 bundle2.processbundle(self._repo, b)
437 raise
437 raise
438 except error.PushRaced as exc:
438 except error.PushRaced as exc:
439 raise error.ResponseError(
439 raise error.ResponseError(
440 _(b'push failed:'), stringutil.forcebytestr(exc)
440 _(b'push failed:'), stringutil.forcebytestr(exc)
441 )
441 )
442
442
443 # End of _basewirecommands interface.
443 # End of _basewirecommands interface.
444
444
445 # Begin of peer interface.
445 # Begin of peer interface.
446
446
447 def commandexecutor(self):
447 def commandexecutor(self):
448 return localcommandexecutor(self)
448 return localcommandexecutor(self)
449
449
450 # End of peer interface.
450 # End of peer interface.
451
451
452
452
453 @interfaceutil.implementer(repository.ipeerlegacycommands)
453 @interfaceutil.implementer(repository.ipeerlegacycommands)
454 class locallegacypeer(localpeer):
454 class locallegacypeer(localpeer):
455 """peer extension which implements legacy methods too; used for tests with
455 """peer extension which implements legacy methods too; used for tests with
456 restricted capabilities"""
456 restricted capabilities"""
457
457
458 def __init__(self, repo, path=None):
458 def __init__(self, repo, path=None):
459 super(locallegacypeer, self).__init__(repo, caps=legacycaps, path=path)
459 super(locallegacypeer, self).__init__(repo, caps=legacycaps, path=path)
460
460
461 # Begin of baselegacywirecommands interface.
461 # Begin of baselegacywirecommands interface.
462
462
463 def between(self, pairs):
463 def between(self, pairs):
464 return self._repo.between(pairs)
464 return self._repo.between(pairs)
465
465
466 def branches(self, nodes):
466 def branches(self, nodes):
467 return self._repo.branches(nodes)
467 return self._repo.branches(nodes)
468
468
469 def changegroup(self, nodes, source):
469 def changegroup(self, nodes, source):
470 outgoing = discovery.outgoing(
470 outgoing = discovery.outgoing(
471 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
471 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
472 )
472 )
473 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
473 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
474
474
475 def changegroupsubset(self, bases, heads, source):
475 def changegroupsubset(self, bases, heads, source):
476 outgoing = discovery.outgoing(
476 outgoing = discovery.outgoing(
477 self._repo, missingroots=bases, ancestorsof=heads
477 self._repo, missingroots=bases, ancestorsof=heads
478 )
478 )
479 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
479 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
480
480
481 # End of baselegacywirecommands interface.
481 # End of baselegacywirecommands interface.
482
482
483
483
484 # Functions receiving (ui, features) that extensions can register to impact
484 # Functions receiving (ui, features) that extensions can register to impact
485 # the ability to load repositories with custom requirements. Only
485 # the ability to load repositories with custom requirements. Only
486 # functions defined in loaded extensions are called.
486 # functions defined in loaded extensions are called.
487 #
487 #
488 # The function receives a set of requirement strings that the repository
488 # The function receives a set of requirement strings that the repository
489 # is capable of opening. Functions will typically add elements to the
489 # is capable of opening. Functions will typically add elements to the
490 # set to reflect that the extension knows how to handle that requirements.
490 # set to reflect that the extension knows how to handle that requirements.
491 featuresetupfuncs = set()
491 featuresetupfuncs = set()
492
492
493
493
494 def _getsharedvfs(hgvfs, requirements):
494 def _getsharedvfs(hgvfs, requirements):
495 """returns the vfs object pointing to root of shared source
495 """returns the vfs object pointing to root of shared source
496 repo for a shared repository
496 repo for a shared repository
497
497
498 hgvfs is vfs pointing at .hg/ of current repo (shared one)
498 hgvfs is vfs pointing at .hg/ of current repo (shared one)
499 requirements is a set of requirements of current repo (shared one)
499 requirements is a set of requirements of current repo (shared one)
500 """
500 """
501 # The ``shared`` or ``relshared`` requirements indicate the
501 # The ``shared`` or ``relshared`` requirements indicate the
502 # store lives in the path contained in the ``.hg/sharedpath`` file.
502 # store lives in the path contained in the ``.hg/sharedpath`` file.
503 # This is an absolute path for ``shared`` and relative to
503 # This is an absolute path for ``shared`` and relative to
504 # ``.hg/`` for ``relshared``.
504 # ``.hg/`` for ``relshared``.
505 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
505 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
506 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
506 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
507 sharedpath = util.normpath(hgvfs.join(sharedpath))
507 sharedpath = util.normpath(hgvfs.join(sharedpath))
508
508
509 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
509 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
510
510
511 if not sharedvfs.exists():
511 if not sharedvfs.exists():
512 raise error.RepoError(
512 raise error.RepoError(
513 _(b'.hg/sharedpath points to nonexistent directory %s')
513 _(b'.hg/sharedpath points to nonexistent directory %s')
514 % sharedvfs.base
514 % sharedvfs.base
515 )
515 )
516 return sharedvfs
516 return sharedvfs
517
517
518
518
519 def _readrequires(vfs, allowmissing):
519 def _readrequires(vfs, allowmissing):
520 """reads the require file present at root of this vfs
520 """reads the require file present at root of this vfs
521 and return a set of requirements
521 and return a set of requirements
522
522
523 If allowmissing is True, we suppress FileNotFoundError if raised"""
523 If allowmissing is True, we suppress FileNotFoundError if raised"""
524 # requires file contains a newline-delimited list of
524 # requires file contains a newline-delimited list of
525 # features/capabilities the opener (us) must have in order to use
525 # features/capabilities the opener (us) must have in order to use
526 # the repository. This file was introduced in Mercurial 0.9.2,
526 # the repository. This file was introduced in Mercurial 0.9.2,
527 # which means very old repositories may not have one. We assume
527 # which means very old repositories may not have one. We assume
528 # a missing file translates to no requirements.
528 # a missing file translates to no requirements.
529 read = vfs.tryread if allowmissing else vfs.read
529 read = vfs.tryread if allowmissing else vfs.read
530 return set(read(b'requires').splitlines())
530 return set(read(b'requires').splitlines())
531
531
532
532
533 def makelocalrepository(baseui, path: bytes, intents=None):
533 def makelocalrepository(baseui, path: bytes, intents=None):
534 """Create a local repository object.
534 """Create a local repository object.
535
535
536 Given arguments needed to construct a local repository, this function
536 Given arguments needed to construct a local repository, this function
537 performs various early repository loading functionality (such as
537 performs various early repository loading functionality (such as
538 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
538 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
539 the repository can be opened, derives a type suitable for representing
539 the repository can be opened, derives a type suitable for representing
540 that repository, and returns an instance of it.
540 that repository, and returns an instance of it.
541
541
542 The returned object conforms to the ``repository.completelocalrepository``
542 The returned object conforms to the ``repository.completelocalrepository``
543 interface.
543 interface.
544
544
545 The repository type is derived by calling a series of factory functions
545 The repository type is derived by calling a series of factory functions
546 for each aspect/interface of the final repository. These are defined by
546 for each aspect/interface of the final repository. These are defined by
547 ``REPO_INTERFACES``.
547 ``REPO_INTERFACES``.
548
548
549 Each factory function is called to produce a type implementing a specific
549 Each factory function is called to produce a type implementing a specific
550 interface. The cumulative list of returned types will be combined into a
550 interface. The cumulative list of returned types will be combined into a
551 new type and that type will be instantiated to represent the local
551 new type and that type will be instantiated to represent the local
552 repository.
552 repository.
553
553
554 The factory functions each receive various state that may be consulted
554 The factory functions each receive various state that may be consulted
555 as part of deriving a type.
555 as part of deriving a type.
556
556
557 Extensions should wrap these factory functions to customize repository type
557 Extensions should wrap these factory functions to customize repository type
558 creation. Note that an extension's wrapped function may be called even if
558 creation. Note that an extension's wrapped function may be called even if
559 that extension is not loaded for the repo being constructed. Extensions
559 that extension is not loaded for the repo being constructed. Extensions
560 should check if their ``__name__`` appears in the
560 should check if their ``__name__`` appears in the
561 ``extensionmodulenames`` set passed to the factory function and no-op if
561 ``extensionmodulenames`` set passed to the factory function and no-op if
562 not.
562 not.
563 """
563 """
564 ui = baseui.copy()
564 ui = baseui.copy()
565 # Prevent copying repo configuration.
565 # Prevent copying repo configuration.
566 ui.copy = baseui.copy
566 ui.copy = baseui.copy
567
567
568 # Working directory VFS rooted at repository root.
568 # Working directory VFS rooted at repository root.
569 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
569 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
570
570
571 # Main VFS for .hg/ directory.
571 # Main VFS for .hg/ directory.
572 hgpath = wdirvfs.join(b'.hg')
572 hgpath = wdirvfs.join(b'.hg')
573 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
573 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
574 # Whether this repository is shared one or not
574 # Whether this repository is shared one or not
575 shared = False
575 shared = False
576 # If this repository is shared, vfs pointing to shared repo
576 # If this repository is shared, vfs pointing to shared repo
577 sharedvfs = None
577 sharedvfs = None
578
578
579 # The .hg/ path should exist and should be a directory. All other
579 # The .hg/ path should exist and should be a directory. All other
580 # cases are errors.
580 # cases are errors.
581 if not hgvfs.isdir():
581 if not hgvfs.isdir():
582 try:
582 try:
583 hgvfs.stat()
583 hgvfs.stat()
584 except FileNotFoundError:
584 except FileNotFoundError:
585 pass
585 pass
586 except ValueError as e:
586 except ValueError as e:
587 # Can be raised on Python 3.8 when path is invalid.
587 # Can be raised on Python 3.8 when path is invalid.
588 raise error.Abort(
588 raise error.Abort(
589 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
589 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
590 )
590 )
591
591
592 raise error.RepoError(_(b'repository %s not found') % path)
592 raise error.RepoError(_(b'repository %s not found') % path)
593
593
594 requirements = _readrequires(hgvfs, True)
594 requirements = _readrequires(hgvfs, True)
595 shared = (
595 shared = (
596 requirementsmod.SHARED_REQUIREMENT in requirements
596 requirementsmod.SHARED_REQUIREMENT in requirements
597 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
597 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
598 )
598 )
599 storevfs = None
599 storevfs = None
600 if shared:
600 if shared:
601 # This is a shared repo
601 # This is a shared repo
602 sharedvfs = _getsharedvfs(hgvfs, requirements)
602 sharedvfs = _getsharedvfs(hgvfs, requirements)
603 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
603 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
604 else:
604 else:
605 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
605 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
606
606
607 # if .hg/requires contains the sharesafe requirement, it means
607 # if .hg/requires contains the sharesafe requirement, it means
608 # there exists a `.hg/store/requires` too and we should read it
608 # there exists a `.hg/store/requires` too and we should read it
609 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
609 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
610 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
610 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
611 # is not present, refer checkrequirementscompat() for that
611 # is not present, refer checkrequirementscompat() for that
612 #
612 #
613 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
613 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
614 # repository was shared the old way. We check the share source .hg/requires
614 # repository was shared the old way. We check the share source .hg/requires
615 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
615 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
616 # to be reshared
616 # to be reshared
617 hint = _(b"see `hg help config.format.use-share-safe` for more information")
617 hint = _(b"see `hg help config.format.use-share-safe` for more information")
618 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
618 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
619 if (
619 if (
620 shared
620 shared
621 and requirementsmod.SHARESAFE_REQUIREMENT
621 and requirementsmod.SHARESAFE_REQUIREMENT
622 not in _readrequires(sharedvfs, True)
622 not in _readrequires(sharedvfs, True)
623 ):
623 ):
624 mismatch_warn = ui.configbool(
624 mismatch_warn = ui.configbool(
625 b'share', b'safe-mismatch.source-not-safe.warn'
625 b'share', b'safe-mismatch.source-not-safe.warn'
626 )
626 )
627 mismatch_config = ui.config(
627 mismatch_config = ui.config(
628 b'share', b'safe-mismatch.source-not-safe'
628 b'share', b'safe-mismatch.source-not-safe'
629 )
629 )
630 mismatch_verbose_upgrade = ui.configbool(
630 mismatch_verbose_upgrade = ui.configbool(
631 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
631 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
632 )
632 )
633 if mismatch_config in (
633 if mismatch_config in (
634 b'downgrade-allow',
634 b'downgrade-allow',
635 b'allow',
635 b'allow',
636 b'downgrade-abort',
636 b'downgrade-abort',
637 ):
637 ):
638 # prevent cyclic import localrepo -> upgrade -> localrepo
638 # prevent cyclic import localrepo -> upgrade -> localrepo
639 from . import upgrade
639 from . import upgrade
640
640
641 upgrade.downgrade_share_to_non_safe(
641 upgrade.downgrade_share_to_non_safe(
642 ui,
642 ui,
643 hgvfs,
643 hgvfs,
644 sharedvfs,
644 sharedvfs,
645 requirements,
645 requirements,
646 mismatch_config,
646 mismatch_config,
647 mismatch_warn,
647 mismatch_warn,
648 mismatch_verbose_upgrade,
648 mismatch_verbose_upgrade,
649 )
649 )
650 elif mismatch_config == b'abort':
650 elif mismatch_config == b'abort':
651 raise error.Abort(
651 raise error.Abort(
652 _(b"share source does not support share-safe requirement"),
652 _(b"share source does not support share-safe requirement"),
653 hint=hint,
653 hint=hint,
654 )
654 )
655 else:
655 else:
656 raise error.Abort(
656 raise error.Abort(
657 _(
657 _(
658 b"share-safe mismatch with source.\nUnrecognized"
658 b"share-safe mismatch with source.\nUnrecognized"
659 b" value '%s' of `share.safe-mismatch.source-not-safe`"
659 b" value '%s' of `share.safe-mismatch.source-not-safe`"
660 b" set."
660 b" set."
661 )
661 )
662 % mismatch_config,
662 % mismatch_config,
663 hint=hint,
663 hint=hint,
664 )
664 )
665 else:
665 else:
666 requirements |= _readrequires(storevfs, False)
666 requirements |= _readrequires(storevfs, False)
667 elif shared:
667 elif shared:
668 sourcerequires = _readrequires(sharedvfs, False)
668 sourcerequires = _readrequires(sharedvfs, False)
669 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
669 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
670 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
670 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
671 mismatch_warn = ui.configbool(
671 mismatch_warn = ui.configbool(
672 b'share', b'safe-mismatch.source-safe.warn'
672 b'share', b'safe-mismatch.source-safe.warn'
673 )
673 )
674 mismatch_verbose_upgrade = ui.configbool(
674 mismatch_verbose_upgrade = ui.configbool(
675 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
675 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
676 )
676 )
677 if mismatch_config in (
677 if mismatch_config in (
678 b'upgrade-allow',
678 b'upgrade-allow',
679 b'allow',
679 b'allow',
680 b'upgrade-abort',
680 b'upgrade-abort',
681 ):
681 ):
682 # prevent cyclic import localrepo -> upgrade -> localrepo
682 # prevent cyclic import localrepo -> upgrade -> localrepo
683 from . import upgrade
683 from . import upgrade
684
684
685 upgrade.upgrade_share_to_safe(
685 upgrade.upgrade_share_to_safe(
686 ui,
686 ui,
687 hgvfs,
687 hgvfs,
688 storevfs,
688 storevfs,
689 requirements,
689 requirements,
690 mismatch_config,
690 mismatch_config,
691 mismatch_warn,
691 mismatch_warn,
692 mismatch_verbose_upgrade,
692 mismatch_verbose_upgrade,
693 )
693 )
694 elif mismatch_config == b'abort':
694 elif mismatch_config == b'abort':
695 raise error.Abort(
695 raise error.Abort(
696 _(
696 _(
697 b'version mismatch: source uses share-safe'
697 b'version mismatch: source uses share-safe'
698 b' functionality while the current share does not'
698 b' functionality while the current share does not'
699 ),
699 ),
700 hint=hint,
700 hint=hint,
701 )
701 )
702 else:
702 else:
703 raise error.Abort(
703 raise error.Abort(
704 _(
704 _(
705 b"share-safe mismatch with source.\nUnrecognized"
705 b"share-safe mismatch with source.\nUnrecognized"
706 b" value '%s' of `share.safe-mismatch.source-safe` set."
706 b" value '%s' of `share.safe-mismatch.source-safe` set."
707 )
707 )
708 % mismatch_config,
708 % mismatch_config,
709 hint=hint,
709 hint=hint,
710 )
710 )
711
711
712 # The .hg/hgrc file may load extensions or contain config options
712 # The .hg/hgrc file may load extensions or contain config options
713 # that influence repository construction. Attempt to load it and
713 # that influence repository construction. Attempt to load it and
714 # process any new extensions that it may have pulled in.
714 # process any new extensions that it may have pulled in.
715 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
715 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
716 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
716 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
717 extensions.loadall(ui)
717 extensions.loadall(ui)
718 extensions.populateui(ui)
718 extensions.populateui(ui)
719
719
720 # Set of module names of extensions loaded for this repository.
720 # Set of module names of extensions loaded for this repository.
721 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
721 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
722
722
723 supportedrequirements = gathersupportedrequirements(ui)
723 supportedrequirements = gathersupportedrequirements(ui)
724
724
725 # We first validate the requirements are known.
725 # We first validate the requirements are known.
726 ensurerequirementsrecognized(requirements, supportedrequirements)
726 ensurerequirementsrecognized(requirements, supportedrequirements)
727
727
728 # Then we validate that the known set is reasonable to use together.
728 # Then we validate that the known set is reasonable to use together.
729 ensurerequirementscompatible(ui, requirements)
729 ensurerequirementscompatible(ui, requirements)
730
730
731 # TODO there are unhandled edge cases related to opening repositories with
731 # TODO there are unhandled edge cases related to opening repositories with
732 # shared storage. If storage is shared, we should also test for requirements
732 # shared storage. If storage is shared, we should also test for requirements
733 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
733 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
734 # that repo, as that repo may load extensions needed to open it. This is a
734 # that repo, as that repo may load extensions needed to open it. This is a
735 # bit complicated because we don't want the other hgrc to overwrite settings
735 # bit complicated because we don't want the other hgrc to overwrite settings
736 # in this hgrc.
736 # in this hgrc.
737 #
737 #
738 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
738 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
739 # file when sharing repos. But if a requirement is added after the share is
739 # file when sharing repos. But if a requirement is added after the share is
740 # performed, thereby introducing a new requirement for the opener, we may
740 # performed, thereby introducing a new requirement for the opener, we may
741 # will not see that and could encounter a run-time error interacting with
741 # will not see that and could encounter a run-time error interacting with
742 # that shared store since it has an unknown-to-us requirement.
742 # that shared store since it has an unknown-to-us requirement.
743
743
744 # At this point, we know we should be capable of opening the repository.
744 # At this point, we know we should be capable of opening the repository.
745 # Now get on with doing that.
745 # Now get on with doing that.
746
746
747 features = set()
747 features = set()
748
748
749 # The "store" part of the repository holds versioned data. How it is
749 # The "store" part of the repository holds versioned data. How it is
750 # accessed is determined by various requirements. If `shared` or
750 # accessed is determined by various requirements. If `shared` or
751 # `relshared` requirements are present, this indicates current repository
751 # `relshared` requirements are present, this indicates current repository
752 # is a share and store exists in path mentioned in `.hg/sharedpath`
752 # is a share and store exists in path mentioned in `.hg/sharedpath`
753 if shared:
753 if shared:
754 storebasepath = sharedvfs.base
754 storebasepath = sharedvfs.base
755 cachepath = sharedvfs.join(b'cache')
755 cachepath = sharedvfs.join(b'cache')
756 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
756 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
757 else:
757 else:
758 storebasepath = hgvfs.base
758 storebasepath = hgvfs.base
759 cachepath = hgvfs.join(b'cache')
759 cachepath = hgvfs.join(b'cache')
760 wcachepath = hgvfs.join(b'wcache')
760 wcachepath = hgvfs.join(b'wcache')
761
761
762 # The store has changed over time and the exact layout is dictated by
762 # The store has changed over time and the exact layout is dictated by
763 # requirements. The store interface abstracts differences across all
763 # requirements. The store interface abstracts differences across all
764 # of them.
764 # of them.
765 store = makestore(
765 store = makestore(
766 requirements,
766 requirements,
767 storebasepath,
767 storebasepath,
768 lambda base: vfsmod.vfs(base, cacheaudited=True),
768 lambda base: vfsmod.vfs(base, cacheaudited=True),
769 )
769 )
770 hgvfs.createmode = store.createmode
770 hgvfs.createmode = store.createmode
771
771
772 storevfs = store.vfs
772 storevfs = store.vfs
773 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
773 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
774
774
775 if (
775 if (
776 requirementsmod.REVLOGV2_REQUIREMENT in requirements
776 requirementsmod.REVLOGV2_REQUIREMENT in requirements
777 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
777 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
778 ):
778 ):
779 features.add(repository.REPO_FEATURE_SIDE_DATA)
779 features.add(repository.REPO_FEATURE_SIDE_DATA)
780 # the revlogv2 docket introduced race condition that we need to fix
780 # the revlogv2 docket introduced race condition that we need to fix
781 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
781 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
782
782
783 # The cache vfs is used to manage cache files.
783 # The cache vfs is used to manage cache files.
784 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
784 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
785 cachevfs.createmode = store.createmode
785 cachevfs.createmode = store.createmode
786 # The cache vfs is used to manage cache files related to the working copy
786 # The cache vfs is used to manage cache files related to the working copy
787 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
787 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
788 wcachevfs.createmode = store.createmode
788 wcachevfs.createmode = store.createmode
789
789
790 # Now resolve the type for the repository object. We do this by repeatedly
790 # Now resolve the type for the repository object. We do this by repeatedly
791 # calling a factory function to produces types for specific aspects of the
791 # calling a factory function to produces types for specific aspects of the
792 # repo's operation. The aggregate returned types are used as base classes
792 # repo's operation. The aggregate returned types are used as base classes
793 # for a dynamically-derived type, which will represent our new repository.
793 # for a dynamically-derived type, which will represent our new repository.
794
794
795 bases = []
795 bases = []
796 extrastate = {}
796 extrastate = {}
797
797
798 for iface, fn in REPO_INTERFACES:
798 for iface, fn in REPO_INTERFACES:
799 # We pass all potentially useful state to give extensions tons of
799 # We pass all potentially useful state to give extensions tons of
800 # flexibility.
800 # flexibility.
801 typ = fn()(
801 typ = fn()(
802 ui=ui,
802 ui=ui,
803 intents=intents,
803 intents=intents,
804 requirements=requirements,
804 requirements=requirements,
805 features=features,
805 features=features,
806 wdirvfs=wdirvfs,
806 wdirvfs=wdirvfs,
807 hgvfs=hgvfs,
807 hgvfs=hgvfs,
808 store=store,
808 store=store,
809 storevfs=storevfs,
809 storevfs=storevfs,
810 storeoptions=storevfs.options,
810 storeoptions=storevfs.options,
811 cachevfs=cachevfs,
811 cachevfs=cachevfs,
812 wcachevfs=wcachevfs,
812 wcachevfs=wcachevfs,
813 extensionmodulenames=extensionmodulenames,
813 extensionmodulenames=extensionmodulenames,
814 extrastate=extrastate,
814 extrastate=extrastate,
815 baseclasses=bases,
815 baseclasses=bases,
816 )
816 )
817
817
818 if not isinstance(typ, type):
818 if not isinstance(typ, type):
819 raise error.ProgrammingError(
819 raise error.ProgrammingError(
820 b'unable to construct type for %s' % iface
820 b'unable to construct type for %s' % iface
821 )
821 )
822
822
823 bases.append(typ)
823 bases.append(typ)
824
824
825 # type() allows you to use characters in type names that wouldn't be
825 # type() allows you to use characters in type names that wouldn't be
826 # recognized as Python symbols in source code. We abuse that to add
826 # recognized as Python symbols in source code. We abuse that to add
827 # rich information about our constructed repo.
827 # rich information about our constructed repo.
828 name = pycompat.sysstr(
828 name = pycompat.sysstr(
829 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
829 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
830 )
830 )
831
831
832 cls = type(name, tuple(bases), {})
832 cls = type(name, tuple(bases), {})
833
833
834 return cls(
834 return cls(
835 baseui=baseui,
835 baseui=baseui,
836 ui=ui,
836 ui=ui,
837 origroot=path,
837 origroot=path,
838 wdirvfs=wdirvfs,
838 wdirvfs=wdirvfs,
839 hgvfs=hgvfs,
839 hgvfs=hgvfs,
840 requirements=requirements,
840 requirements=requirements,
841 supportedrequirements=supportedrequirements,
841 supportedrequirements=supportedrequirements,
842 sharedpath=storebasepath,
842 sharedpath=storebasepath,
843 store=store,
843 store=store,
844 cachevfs=cachevfs,
844 cachevfs=cachevfs,
845 wcachevfs=wcachevfs,
845 wcachevfs=wcachevfs,
846 features=features,
846 features=features,
847 intents=intents,
847 intents=intents,
848 )
848 )
849
849
850
850
851 def loadhgrc(
851 def loadhgrc(
852 ui,
852 ui,
853 wdirvfs: vfsmod.vfs,
853 wdirvfs: vfsmod.vfs,
854 hgvfs: vfsmod.vfs,
854 hgvfs: vfsmod.vfs,
855 requirements,
855 requirements,
856 sharedvfs: Optional[vfsmod.vfs] = None,
856 sharedvfs: Optional[vfsmod.vfs] = None,
857 ):
857 ):
858 """Load hgrc files/content into a ui instance.
858 """Load hgrc files/content into a ui instance.
859
859
860 This is called during repository opening to load any additional
860 This is called during repository opening to load any additional
861 config files or settings relevant to the current repository.
861 config files or settings relevant to the current repository.
862
862
863 Returns a bool indicating whether any additional configs were loaded.
863 Returns a bool indicating whether any additional configs were loaded.
864
864
865 Extensions should monkeypatch this function to modify how per-repo
865 Extensions should monkeypatch this function to modify how per-repo
866 configs are loaded. For example, an extension may wish to pull in
866 configs are loaded. For example, an extension may wish to pull in
867 configs from alternate files or sources.
867 configs from alternate files or sources.
868
868
869 sharedvfs is vfs object pointing to source repo if the current one is a
869 sharedvfs is vfs object pointing to source repo if the current one is a
870 shared one
870 shared one
871 """
871 """
872 if not rcutil.use_repo_hgrc():
872 if not rcutil.use_repo_hgrc():
873 return False
873 return False
874
874
875 ret = False
875 ret = False
876 # first load config from shared source if we has to
876 # first load config from shared source if we has to
877 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
877 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
878 try:
878 try:
879 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
879 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
880 ret = True
880 ret = True
881 except IOError:
881 except IOError:
882 pass
882 pass
883
883
884 try:
884 try:
885 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
885 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
886 ret = True
886 ret = True
887 except IOError:
887 except IOError:
888 pass
888 pass
889
889
890 try:
890 try:
891 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
891 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
892 ret = True
892 ret = True
893 except IOError:
893 except IOError:
894 pass
894 pass
895
895
896 return ret
896 return ret
897
897
898
898
899 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
899 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
900 """Perform additional actions after .hg/hgrc is loaded.
900 """Perform additional actions after .hg/hgrc is loaded.
901
901
902 This function is called during repository loading immediately after
902 This function is called during repository loading immediately after
903 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
903 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
904
904
905 The function can be used to validate configs, automatically add
905 The function can be used to validate configs, automatically add
906 options (including extensions) based on requirements, etc.
906 options (including extensions) based on requirements, etc.
907 """
907 """
908
908
909 # Map of requirements to list of extensions to load automatically when
909 # Map of requirements to list of extensions to load automatically when
910 # requirement is present.
910 # requirement is present.
911 autoextensions = {
911 autoextensions = {
912 b'git': [b'git'],
912 b'git': [b'git'],
913 b'largefiles': [b'largefiles'],
913 b'largefiles': [b'largefiles'],
914 b'lfs': [b'lfs'],
914 b'lfs': [b'lfs'],
915 }
915 }
916
916
917 for requirement, names in sorted(autoextensions.items()):
917 for requirement, names in sorted(autoextensions.items()):
918 if requirement not in requirements:
918 if requirement not in requirements:
919 continue
919 continue
920
920
921 for name in names:
921 for name in names:
922 if not ui.hasconfig(b'extensions', name):
922 if not ui.hasconfig(b'extensions', name):
923 ui.setconfig(b'extensions', name, b'', source=b'autoload')
923 ui.setconfig(b'extensions', name, b'', source=b'autoload')
924
924
925
925
926 def gathersupportedrequirements(ui):
926 def gathersupportedrequirements(ui):
927 """Determine the complete set of recognized requirements."""
927 """Determine the complete set of recognized requirements."""
928 # Start with all requirements supported by this file.
928 # Start with all requirements supported by this file.
929 supported = set(localrepository._basesupported)
929 supported = set(localrepository._basesupported)
930
930
931 # Execute ``featuresetupfuncs`` entries if they belong to an extension
931 # Execute ``featuresetupfuncs`` entries if they belong to an extension
932 # relevant to this ui instance.
932 # relevant to this ui instance.
933 modules = {m.__name__ for n, m in extensions.extensions(ui)}
933 modules = {m.__name__ for n, m in extensions.extensions(ui)}
934
934
935 for fn in featuresetupfuncs:
935 for fn in featuresetupfuncs:
936 if fn.__module__ in modules:
936 if fn.__module__ in modules:
937 fn(ui, supported)
937 fn(ui, supported)
938
938
939 # Add derived requirements from registered compression engines.
939 # Add derived requirements from registered compression engines.
940 for name in util.compengines:
940 for name in util.compengines:
941 engine = util.compengines[name]
941 engine = util.compengines[name]
942 if engine.available() and engine.revlogheader():
942 if engine.available() and engine.revlogheader():
943 supported.add(b'exp-compression-%s' % name)
943 supported.add(b'exp-compression-%s' % name)
944 if engine.name() == b'zstd':
944 if engine.name() == b'zstd':
945 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
945 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
946
946
947 return supported
947 return supported
948
948
949
949
950 def ensurerequirementsrecognized(requirements, supported):
950 def ensurerequirementsrecognized(requirements, supported):
951 """Validate that a set of local requirements is recognized.
951 """Validate that a set of local requirements is recognized.
952
952
953 Receives a set of requirements. Raises an ``error.RepoError`` if there
953 Receives a set of requirements. Raises an ``error.RepoError`` if there
954 exists any requirement in that set that currently loaded code doesn't
954 exists any requirement in that set that currently loaded code doesn't
955 recognize.
955 recognize.
956
956
957 Returns a set of supported requirements.
957 Returns a set of supported requirements.
958 """
958 """
959 missing = set()
959 missing = set()
960
960
961 for requirement in requirements:
961 for requirement in requirements:
962 if requirement in supported:
962 if requirement in supported:
963 continue
963 continue
964
964
965 if not requirement or not requirement[0:1].isalnum():
965 if not requirement or not requirement[0:1].isalnum():
966 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
966 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
967
967
968 missing.add(requirement)
968 missing.add(requirement)
969
969
970 if missing:
970 if missing:
971 raise error.RequirementError(
971 raise error.RequirementError(
972 _(b'repository requires features unknown to this Mercurial: %s')
972 _(b'repository requires features unknown to this Mercurial: %s')
973 % b' '.join(sorted(missing)),
973 % b' '.join(sorted(missing)),
974 hint=_(
974 hint=_(
975 b'see https://mercurial-scm.org/wiki/MissingRequirement '
975 b'see https://mercurial-scm.org/wiki/MissingRequirement '
976 b'for more information'
976 b'for more information'
977 ),
977 ),
978 )
978 )
979
979
980
980
981 def ensurerequirementscompatible(ui, requirements):
981 def ensurerequirementscompatible(ui, requirements):
982 """Validates that a set of recognized requirements is mutually compatible.
982 """Validates that a set of recognized requirements is mutually compatible.
983
983
984 Some requirements may not be compatible with others or require
984 Some requirements may not be compatible with others or require
985 config options that aren't enabled. This function is called during
985 config options that aren't enabled. This function is called during
986 repository opening to ensure that the set of requirements needed
986 repository opening to ensure that the set of requirements needed
987 to open a repository is sane and compatible with config options.
987 to open a repository is sane and compatible with config options.
988
988
989 Extensions can monkeypatch this function to perform additional
989 Extensions can monkeypatch this function to perform additional
990 checking.
990 checking.
991
991
992 ``error.RepoError`` should be raised on failure.
992 ``error.RepoError`` should be raised on failure.
993 """
993 """
994 if (
994 if (
995 requirementsmod.SPARSE_REQUIREMENT in requirements
995 requirementsmod.SPARSE_REQUIREMENT in requirements
996 and not sparse.enabled
996 and not sparse.enabled
997 ):
997 ):
998 raise error.RepoError(
998 raise error.RepoError(
999 _(
999 _(
1000 b'repository is using sparse feature but '
1000 b'repository is using sparse feature but '
1001 b'sparse is not enabled; enable the '
1001 b'sparse is not enabled; enable the '
1002 b'"sparse" extensions to access'
1002 b'"sparse" extensions to access'
1003 )
1003 )
1004 )
1004 )
1005
1005
1006
1006
1007 def makestore(requirements, path, vfstype):
1007 def makestore(requirements, path, vfstype):
1008 """Construct a storage object for a repository."""
1008 """Construct a storage object for a repository."""
1009 if requirementsmod.STORE_REQUIREMENT in requirements:
1009 if requirementsmod.STORE_REQUIREMENT in requirements:
1010 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1010 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1011 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1011 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1012 return storemod.fncachestore(path, vfstype, dotencode)
1012 return storemod.fncachestore(path, vfstype, dotencode)
1013
1013
1014 return storemod.encodedstore(path, vfstype)
1014 return storemod.encodedstore(path, vfstype)
1015
1015
1016 return storemod.basicstore(path, vfstype)
1016 return storemod.basicstore(path, vfstype)
1017
1017
1018
1018
1019 def resolvestorevfsoptions(ui, requirements, features):
1019 def resolvestorevfsoptions(ui, requirements, features):
1020 """Resolve the options to pass to the store vfs opener.
1020 """Resolve the options to pass to the store vfs opener.
1021
1021
1022 The returned dict is used to influence behavior of the storage layer.
1022 The returned dict is used to influence behavior of the storage layer.
1023 """
1023 """
1024 options = {}
1024 options = {}
1025
1025
1026 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1026 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1027 options[b'treemanifest'] = True
1027 options[b'treemanifest'] = True
1028
1028
1029 # experimental config: format.manifestcachesize
1029 # experimental config: format.manifestcachesize
1030 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1030 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1031 if manifestcachesize is not None:
1031 if manifestcachesize is not None:
1032 options[b'manifestcachesize'] = manifestcachesize
1032 options[b'manifestcachesize'] = manifestcachesize
1033
1033
1034 # In the absence of another requirement superseding a revlog-related
1034 # In the absence of another requirement superseding a revlog-related
1035 # requirement, we have to assume the repo is using revlog version 0.
1035 # requirement, we have to assume the repo is using revlog version 0.
1036 # This revlog format is super old and we don't bother trying to parse
1036 # This revlog format is super old and we don't bother trying to parse
1037 # opener options for it because those options wouldn't do anything
1037 # opener options for it because those options wouldn't do anything
1038 # meaningful on such old repos.
1038 # meaningful on such old repos.
1039 if (
1039 if (
1040 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1040 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1041 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1041 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1042 ):
1042 ):
1043 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1043 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1044 else: # explicitly mark repo as using revlogv0
1044 else: # explicitly mark repo as using revlogv0
1045 options[b'revlogv0'] = True
1045 options[b'revlogv0'] = True
1046
1046
1047 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1047 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1048 options[b'copies-storage'] = b'changeset-sidedata'
1048 options[b'copies-storage'] = b'changeset-sidedata'
1049 else:
1049 else:
1050 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1050 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1051 copiesextramode = (b'changeset-only', b'compatibility')
1051 copiesextramode = (b'changeset-only', b'compatibility')
1052 if writecopiesto in copiesextramode:
1052 if writecopiesto in copiesextramode:
1053 options[b'copies-storage'] = b'extra'
1053 options[b'copies-storage'] = b'extra'
1054
1054
1055 return options
1055 return options
1056
1056
1057
1057
1058 def resolverevlogstorevfsoptions(ui, requirements, features):
1058 def resolverevlogstorevfsoptions(ui, requirements, features):
1059 """Resolve opener options specific to revlogs."""
1059 """Resolve opener options specific to revlogs."""
1060
1060
1061 options = {}
1061 options = {}
1062 options[b'flagprocessors'] = {}
1062 options[b'flagprocessors'] = {}
1063
1063
1064 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1064 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1065 options[b'revlogv1'] = True
1065 options[b'revlogv1'] = True
1066 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1066 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1067 options[b'revlogv2'] = True
1067 options[b'revlogv2'] = True
1068 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1068 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1069 options[b'changelogv2'] = True
1069 options[b'changelogv2'] = True
1070 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1070 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1071 options[b'changelogv2.compute-rank'] = cmp_rank
1071 options[b'changelogv2.compute-rank'] = cmp_rank
1072
1072
1073 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1073 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1074 options[b'generaldelta'] = True
1074 options[b'generaldelta'] = True
1075
1075
1076 # experimental config: format.chunkcachesize
1076 # experimental config: format.chunkcachesize
1077 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1077 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1078 if chunkcachesize is not None:
1078 if chunkcachesize is not None:
1079 options[b'chunkcachesize'] = chunkcachesize
1079 options[b'chunkcachesize'] = chunkcachesize
1080
1080
1081 deltabothparents = ui.configbool(
1081 deltabothparents = ui.configbool(
1082 b'storage', b'revlog.optimize-delta-parent-choice'
1082 b'storage', b'revlog.optimize-delta-parent-choice'
1083 )
1083 )
1084 options[b'deltabothparents'] = deltabothparents
1084 options[b'deltabothparents'] = deltabothparents
1085 dps_cgds = ui.configint(
1085 dps_cgds = ui.configint(
1086 b'storage',
1086 b'storage',
1087 b'revlog.delta-parent-search.candidate-group-chunk-size',
1087 b'revlog.delta-parent-search.candidate-group-chunk-size',
1088 )
1088 )
1089 options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
1089 options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
1090 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1090 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1091
1091
1092 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1092 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1093 options[b'issue6528.fix-incoming'] = issue6528
1093 options[b'issue6528.fix-incoming'] = issue6528
1094
1094
1095 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1095 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1096 lazydeltabase = False
1096 lazydeltabase = False
1097 if lazydelta:
1097 if lazydelta:
1098 lazydeltabase = ui.configbool(
1098 lazydeltabase = ui.configbool(
1099 b'storage', b'revlog.reuse-external-delta-parent'
1099 b'storage', b'revlog.reuse-external-delta-parent'
1100 )
1100 )
1101 if lazydeltabase is None:
1101 if lazydeltabase is None:
1102 lazydeltabase = not scmutil.gddeltaconfig(ui)
1102 lazydeltabase = not scmutil.gddeltaconfig(ui)
1103 options[b'lazydelta'] = lazydelta
1103 options[b'lazydelta'] = lazydelta
1104 options[b'lazydeltabase'] = lazydeltabase
1104 options[b'lazydeltabase'] = lazydeltabase
1105
1105
1106 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1106 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1107 if 0 <= chainspan:
1107 if 0 <= chainspan:
1108 options[b'maxdeltachainspan'] = chainspan
1108 options[b'maxdeltachainspan'] = chainspan
1109
1109
1110 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1110 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1111 if mmapindexthreshold is not None:
1111 if mmapindexthreshold is not None:
1112 options[b'mmapindexthreshold'] = mmapindexthreshold
1112 options[b'mmapindexthreshold'] = mmapindexthreshold
1113
1113
1114 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1114 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1115 srdensitythres = float(
1115 srdensitythres = float(
1116 ui.config(b'experimental', b'sparse-read.density-threshold')
1116 ui.config(b'experimental', b'sparse-read.density-threshold')
1117 )
1117 )
1118 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1118 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1119 options[b'with-sparse-read'] = withsparseread
1119 options[b'with-sparse-read'] = withsparseread
1120 options[b'sparse-read-density-threshold'] = srdensitythres
1120 options[b'sparse-read-density-threshold'] = srdensitythres
1121 options[b'sparse-read-min-gap-size'] = srmingapsize
1121 options[b'sparse-read-min-gap-size'] = srmingapsize
1122
1122
1123 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1123 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1124 options[b'sparse-revlog'] = sparserevlog
1124 options[b'sparse-revlog'] = sparserevlog
1125 if sparserevlog:
1125 if sparserevlog:
1126 options[b'generaldelta'] = True
1126 options[b'generaldelta'] = True
1127
1127
1128 maxchainlen = None
1128 maxchainlen = None
1129 if sparserevlog:
1129 if sparserevlog:
1130 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1130 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1131 # experimental config: format.maxchainlen
1131 # experimental config: format.maxchainlen
1132 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1132 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1133 if maxchainlen is not None:
1133 if maxchainlen is not None:
1134 options[b'maxchainlen'] = maxchainlen
1134 options[b'maxchainlen'] = maxchainlen
1135
1135
1136 for r in requirements:
1136 for r in requirements:
1137 # we allow multiple compression engine requirement to co-exist because
1137 # we allow multiple compression engine requirement to co-exist because
1138 # strickly speaking, revlog seems to support mixed compression style.
1138 # strickly speaking, revlog seems to support mixed compression style.
1139 #
1139 #
1140 # The compression used for new entries will be "the last one"
1140 # The compression used for new entries will be "the last one"
1141 prefix = r.startswith
1141 prefix = r.startswith
1142 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1142 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1143 options[b'compengine'] = r.split(b'-', 2)[2]
1143 options[b'compengine'] = r.split(b'-', 2)[2]
1144
1144
1145 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1145 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1146 if options[b'zlib.level'] is not None:
1146 if options[b'zlib.level'] is not None:
1147 if not (0 <= options[b'zlib.level'] <= 9):
1147 if not (0 <= options[b'zlib.level'] <= 9):
1148 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1148 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1149 raise error.Abort(msg % options[b'zlib.level'])
1149 raise error.Abort(msg % options[b'zlib.level'])
1150 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1150 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1151 if options[b'zstd.level'] is not None:
1151 if options[b'zstd.level'] is not None:
1152 if not (0 <= options[b'zstd.level'] <= 22):
1152 if not (0 <= options[b'zstd.level'] <= 22):
1153 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1153 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1154 raise error.Abort(msg % options[b'zstd.level'])
1154 raise error.Abort(msg % options[b'zstd.level'])
1155
1155
1156 if requirementsmod.NARROW_REQUIREMENT in requirements:
1156 if requirementsmod.NARROW_REQUIREMENT in requirements:
1157 options[b'enableellipsis'] = True
1157 options[b'enableellipsis'] = True
1158
1158
1159 if ui.configbool(b'experimental', b'rust.index'):
1159 if ui.configbool(b'experimental', b'rust.index'):
1160 options[b'rust.index'] = True
1160 options[b'rust.index'] = True
1161 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1161 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1162 slow_path = ui.config(
1162 slow_path = ui.config(
1163 b'storage', b'revlog.persistent-nodemap.slow-path'
1163 b'storage', b'revlog.persistent-nodemap.slow-path'
1164 )
1164 )
1165 if slow_path not in (b'allow', b'warn', b'abort'):
1165 if slow_path not in (b'allow', b'warn', b'abort'):
1166 default = ui.config_default(
1166 default = ui.config_default(
1167 b'storage', b'revlog.persistent-nodemap.slow-path'
1167 b'storage', b'revlog.persistent-nodemap.slow-path'
1168 )
1168 )
1169 msg = _(
1169 msg = _(
1170 b'unknown value for config '
1170 b'unknown value for config '
1171 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1171 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1172 )
1172 )
1173 ui.warn(msg % slow_path)
1173 ui.warn(msg % slow_path)
1174 if not ui.quiet:
1174 if not ui.quiet:
1175 ui.warn(_(b'falling back to default value: %s\n') % default)
1175 ui.warn(_(b'falling back to default value: %s\n') % default)
1176 slow_path = default
1176 slow_path = default
1177
1177
1178 msg = _(
1178 msg = _(
1179 b"accessing `persistent-nodemap` repository without associated "
1179 b"accessing `persistent-nodemap` repository without associated "
1180 b"fast implementation."
1180 b"fast implementation."
1181 )
1181 )
1182 hint = _(
1182 hint = _(
1183 b"check `hg help config.format.use-persistent-nodemap` "
1183 b"check `hg help config.format.use-persistent-nodemap` "
1184 b"for details"
1184 b"for details"
1185 )
1185 )
1186 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1186 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1187 if slow_path == b'warn':
1187 if slow_path == b'warn':
1188 msg = b"warning: " + msg + b'\n'
1188 msg = b"warning: " + msg + b'\n'
1189 ui.warn(msg)
1189 ui.warn(msg)
1190 if not ui.quiet:
1190 if not ui.quiet:
1191 hint = b'(' + hint + b')\n'
1191 hint = b'(' + hint + b')\n'
1192 ui.warn(hint)
1192 ui.warn(hint)
1193 if slow_path == b'abort':
1193 if slow_path == b'abort':
1194 raise error.Abort(msg, hint=hint)
1194 raise error.Abort(msg, hint=hint)
1195 options[b'persistent-nodemap'] = True
1195 options[b'persistent-nodemap'] = True
1196 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1196 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1197 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1197 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1198 if slow_path not in (b'allow', b'warn', b'abort'):
1198 if slow_path not in (b'allow', b'warn', b'abort'):
1199 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1199 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1200 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1200 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1201 ui.warn(msg % slow_path)
1201 ui.warn(msg % slow_path)
1202 if not ui.quiet:
1202 if not ui.quiet:
1203 ui.warn(_(b'falling back to default value: %s\n') % default)
1203 ui.warn(_(b'falling back to default value: %s\n') % default)
1204 slow_path = default
1204 slow_path = default
1205
1205
1206 msg = _(
1206 msg = _(
1207 b"accessing `dirstate-v2` repository without associated "
1207 b"accessing `dirstate-v2` repository without associated "
1208 b"fast implementation."
1208 b"fast implementation."
1209 )
1209 )
1210 hint = _(
1210 hint = _(
1211 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1211 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1212 )
1212 )
1213 if not dirstate.HAS_FAST_DIRSTATE_V2:
1213 if not dirstate.HAS_FAST_DIRSTATE_V2:
1214 if slow_path == b'warn':
1214 if slow_path == b'warn':
1215 msg = b"warning: " + msg + b'\n'
1215 msg = b"warning: " + msg + b'\n'
1216 ui.warn(msg)
1216 ui.warn(msg)
1217 if not ui.quiet:
1217 if not ui.quiet:
1218 hint = b'(' + hint + b')\n'
1218 hint = b'(' + hint + b')\n'
1219 ui.warn(hint)
1219 ui.warn(hint)
1220 if slow_path == b'abort':
1220 if slow_path == b'abort':
1221 raise error.Abort(msg, hint=hint)
1221 raise error.Abort(msg, hint=hint)
1222 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1222 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1223 options[b'persistent-nodemap.mmap'] = True
1223 options[b'persistent-nodemap.mmap'] = True
1224 if ui.configbool(b'devel', b'persistent-nodemap'):
1224 if ui.configbool(b'devel', b'persistent-nodemap'):
1225 options[b'devel-force-nodemap'] = True
1225 options[b'devel-force-nodemap'] = True
1226
1226
1227 return options
1227 return options
1228
1228
1229
1229
1230 def makemain(**kwargs):
1230 def makemain(**kwargs):
1231 """Produce a type conforming to ``ilocalrepositorymain``."""
1231 """Produce a type conforming to ``ilocalrepositorymain``."""
1232 return localrepository
1232 return localrepository
1233
1233
1234
1234
1235 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1235 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1236 class revlogfilestorage:
1236 class revlogfilestorage:
1237 """File storage when using revlogs."""
1237 """File storage when using revlogs."""
1238
1238
1239 def file(self, path):
1239 def file(self, path):
1240 if path.startswith(b'/'):
1240 if path.startswith(b'/'):
1241 path = path[1:]
1241 path = path[1:]
1242
1242
1243 try_split = (
1243 try_split = (
1244 self.currenttransaction() is not None
1244 self.currenttransaction() is not None
1245 or txnutil.mayhavepending(self.root)
1245 or txnutil.mayhavepending(self.root)
1246 )
1246 )
1247
1247
1248 return filelog.filelog(self.svfs, path, try_split=try_split)
1248 return filelog.filelog(self.svfs, path, try_split=try_split)
1249
1249
1250
1250
1251 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1251 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1252 class revlognarrowfilestorage:
1252 class revlognarrowfilestorage:
1253 """File storage when using revlogs and narrow files."""
1253 """File storage when using revlogs and narrow files."""
1254
1254
1255 def file(self, path):
1255 def file(self, path):
1256 if path.startswith(b'/'):
1256 if path.startswith(b'/'):
1257 path = path[1:]
1257 path = path[1:]
1258
1258
1259 try_split = (
1259 try_split = (
1260 self.currenttransaction() is not None
1260 self.currenttransaction() is not None
1261 or txnutil.mayhavepending(self.root)
1261 or txnutil.mayhavepending(self.root)
1262 )
1262 )
1263 return filelog.narrowfilelog(
1263 return filelog.narrowfilelog(
1264 self.svfs, path, self._storenarrowmatch, try_split=try_split
1264 self.svfs, path, self._storenarrowmatch, try_split=try_split
1265 )
1265 )
1266
1266
1267
1267
1268 def makefilestorage(requirements, features, **kwargs):
1268 def makefilestorage(requirements, features, **kwargs):
1269 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1269 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1270 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1270 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1271 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1271 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1272
1272
1273 if requirementsmod.NARROW_REQUIREMENT in requirements:
1273 if requirementsmod.NARROW_REQUIREMENT in requirements:
1274 return revlognarrowfilestorage
1274 return revlognarrowfilestorage
1275 else:
1275 else:
1276 return revlogfilestorage
1276 return revlogfilestorage
1277
1277
1278
1278
1279 # List of repository interfaces and factory functions for them. Each
1279 # List of repository interfaces and factory functions for them. Each
1280 # will be called in order during ``makelocalrepository()`` to iteratively
1280 # will be called in order during ``makelocalrepository()`` to iteratively
1281 # derive the final type for a local repository instance. We capture the
1281 # derive the final type for a local repository instance. We capture the
1282 # function as a lambda so we don't hold a reference and the module-level
1282 # function as a lambda so we don't hold a reference and the module-level
1283 # functions can be wrapped.
1283 # functions can be wrapped.
1284 REPO_INTERFACES = [
1284 REPO_INTERFACES = [
1285 (repository.ilocalrepositorymain, lambda: makemain),
1285 (repository.ilocalrepositorymain, lambda: makemain),
1286 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1286 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1287 ]
1287 ]
1288
1288
1289
1289
1290 @interfaceutil.implementer(repository.ilocalrepositorymain)
1290 @interfaceutil.implementer(repository.ilocalrepositorymain)
1291 class localrepository:
1291 class localrepository:
1292 """Main class for representing local repositories.
1292 """Main class for representing local repositories.
1293
1293
1294 All local repositories are instances of this class.
1294 All local repositories are instances of this class.
1295
1295
1296 Constructed on its own, instances of this class are not usable as
1296 Constructed on its own, instances of this class are not usable as
1297 repository objects. To obtain a usable repository object, call
1297 repository objects. To obtain a usable repository object, call
1298 ``hg.repository()``, ``localrepo.instance()``, or
1298 ``hg.repository()``, ``localrepo.instance()``, or
1299 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1299 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1300 ``instance()`` adds support for creating new repositories.
1300 ``instance()`` adds support for creating new repositories.
1301 ``hg.repository()`` adds more extension integration, including calling
1301 ``hg.repository()`` adds more extension integration, including calling
1302 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1302 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1303 used.
1303 used.
1304 """
1304 """
1305
1305
1306 _basesupported = {
1306 _basesupported = {
1307 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1307 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1308 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1308 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1309 requirementsmod.CHANGELOGV2_REQUIREMENT,
1309 requirementsmod.CHANGELOGV2_REQUIREMENT,
1310 requirementsmod.COPIESSDC_REQUIREMENT,
1310 requirementsmod.COPIESSDC_REQUIREMENT,
1311 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1311 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1312 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1312 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1313 requirementsmod.DOTENCODE_REQUIREMENT,
1313 requirementsmod.DOTENCODE_REQUIREMENT,
1314 requirementsmod.FNCACHE_REQUIREMENT,
1314 requirementsmod.FNCACHE_REQUIREMENT,
1315 requirementsmod.GENERALDELTA_REQUIREMENT,
1315 requirementsmod.GENERALDELTA_REQUIREMENT,
1316 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1316 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1317 requirementsmod.NODEMAP_REQUIREMENT,
1317 requirementsmod.NODEMAP_REQUIREMENT,
1318 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1318 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1319 requirementsmod.REVLOGV1_REQUIREMENT,
1319 requirementsmod.REVLOGV1_REQUIREMENT,
1320 requirementsmod.REVLOGV2_REQUIREMENT,
1320 requirementsmod.REVLOGV2_REQUIREMENT,
1321 requirementsmod.SHARED_REQUIREMENT,
1321 requirementsmod.SHARED_REQUIREMENT,
1322 requirementsmod.SHARESAFE_REQUIREMENT,
1322 requirementsmod.SHARESAFE_REQUIREMENT,
1323 requirementsmod.SPARSE_REQUIREMENT,
1323 requirementsmod.SPARSE_REQUIREMENT,
1324 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1324 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1325 requirementsmod.STORE_REQUIREMENT,
1325 requirementsmod.STORE_REQUIREMENT,
1326 requirementsmod.TREEMANIFEST_REQUIREMENT,
1326 requirementsmod.TREEMANIFEST_REQUIREMENT,
1327 }
1327 }
1328
1328
1329 # list of prefix for file which can be written without 'wlock'
1329 # list of prefix for file which can be written without 'wlock'
1330 # Extensions should extend this list when needed
1330 # Extensions should extend this list when needed
1331 _wlockfreeprefix = {
1331 _wlockfreeprefix = {
1332 # We migh consider requiring 'wlock' for the next
1332 # We migh consider requiring 'wlock' for the next
1333 # two, but pretty much all the existing code assume
1333 # two, but pretty much all the existing code assume
1334 # wlock is not needed so we keep them excluded for
1334 # wlock is not needed so we keep them excluded for
1335 # now.
1335 # now.
1336 b'hgrc',
1336 b'hgrc',
1337 b'requires',
1337 b'requires',
1338 # XXX cache is a complicatged business someone
1338 # XXX cache is a complicatged business someone
1339 # should investigate this in depth at some point
1339 # should investigate this in depth at some point
1340 b'cache/',
1340 b'cache/',
1341 # XXX bisect was still a bit too messy at the time
1341 # XXX bisect was still a bit too messy at the time
1342 # this changeset was introduced. Someone should fix
1342 # this changeset was introduced. Someone should fix
1343 # the remainig bit and drop this line
1343 # the remainig bit and drop this line
1344 b'bisect.state',
1344 b'bisect.state',
1345 }
1345 }
1346
1346
1347 def __init__(
1347 def __init__(
1348 self,
1348 self,
1349 baseui,
1349 baseui,
1350 ui,
1350 ui,
1351 origroot: bytes,
1351 origroot: bytes,
1352 wdirvfs: vfsmod.vfs,
1352 wdirvfs: vfsmod.vfs,
1353 hgvfs: vfsmod.vfs,
1353 hgvfs: vfsmod.vfs,
1354 requirements,
1354 requirements,
1355 supportedrequirements,
1355 supportedrequirements,
1356 sharedpath: bytes,
1356 sharedpath: bytes,
1357 store,
1357 store,
1358 cachevfs: vfsmod.vfs,
1358 cachevfs: vfsmod.vfs,
1359 wcachevfs: vfsmod.vfs,
1359 wcachevfs: vfsmod.vfs,
1360 features,
1360 features,
1361 intents=None,
1361 intents=None,
1362 ):
1362 ):
1363 """Create a new local repository instance.
1363 """Create a new local repository instance.
1364
1364
1365 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1365 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1366 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1366 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1367 object.
1367 object.
1368
1368
1369 Arguments:
1369 Arguments:
1370
1370
1371 baseui
1371 baseui
1372 ``ui.ui`` instance that ``ui`` argument was based off of.
1372 ``ui.ui`` instance that ``ui`` argument was based off of.
1373
1373
1374 ui
1374 ui
1375 ``ui.ui`` instance for use by the repository.
1375 ``ui.ui`` instance for use by the repository.
1376
1376
1377 origroot
1377 origroot
1378 ``bytes`` path to working directory root of this repository.
1378 ``bytes`` path to working directory root of this repository.
1379
1379
1380 wdirvfs
1380 wdirvfs
1381 ``vfs.vfs`` rooted at the working directory.
1381 ``vfs.vfs`` rooted at the working directory.
1382
1382
1383 hgvfs
1383 hgvfs
1384 ``vfs.vfs`` rooted at .hg/
1384 ``vfs.vfs`` rooted at .hg/
1385
1385
1386 requirements
1386 requirements
1387 ``set`` of bytestrings representing repository opening requirements.
1387 ``set`` of bytestrings representing repository opening requirements.
1388
1388
1389 supportedrequirements
1389 supportedrequirements
1390 ``set`` of bytestrings representing repository requirements that we
1390 ``set`` of bytestrings representing repository requirements that we
1391 know how to open. May be a supetset of ``requirements``.
1391 know how to open. May be a supetset of ``requirements``.
1392
1392
1393 sharedpath
1393 sharedpath
1394 ``bytes`` Defining path to storage base directory. Points to a
1394 ``bytes`` Defining path to storage base directory. Points to a
1395 ``.hg/`` directory somewhere.
1395 ``.hg/`` directory somewhere.
1396
1396
1397 store
1397 store
1398 ``store.basicstore`` (or derived) instance providing access to
1398 ``store.basicstore`` (or derived) instance providing access to
1399 versioned storage.
1399 versioned storage.
1400
1400
1401 cachevfs
1401 cachevfs
1402 ``vfs.vfs`` used for cache files.
1402 ``vfs.vfs`` used for cache files.
1403
1403
1404 wcachevfs
1404 wcachevfs
1405 ``vfs.vfs`` used for cache files related to the working copy.
1405 ``vfs.vfs`` used for cache files related to the working copy.
1406
1406
1407 features
1407 features
1408 ``set`` of bytestrings defining features/capabilities of this
1408 ``set`` of bytestrings defining features/capabilities of this
1409 instance.
1409 instance.
1410
1410
1411 intents
1411 intents
1412 ``set`` of system strings indicating what this repo will be used
1412 ``set`` of system strings indicating what this repo will be used
1413 for.
1413 for.
1414 """
1414 """
1415 self.baseui = baseui
1415 self.baseui = baseui
1416 self.ui = ui
1416 self.ui = ui
1417 self.origroot = origroot
1417 self.origroot = origroot
1418 # vfs rooted at working directory.
1418 # vfs rooted at working directory.
1419 self.wvfs = wdirvfs
1419 self.wvfs = wdirvfs
1420 self.root = wdirvfs.base
1420 self.root = wdirvfs.base
1421 # vfs rooted at .hg/. Used to access most non-store paths.
1421 # vfs rooted at .hg/. Used to access most non-store paths.
1422 self.vfs = hgvfs
1422 self.vfs = hgvfs
1423 self.path = hgvfs.base
1423 self.path = hgvfs.base
1424 self.requirements = requirements
1424 self.requirements = requirements
1425 self.nodeconstants = sha1nodeconstants
1425 self.nodeconstants = sha1nodeconstants
1426 self.nullid = self.nodeconstants.nullid
1426 self.nullid = self.nodeconstants.nullid
1427 self.supported = supportedrequirements
1427 self.supported = supportedrequirements
1428 self.sharedpath = sharedpath
1428 self.sharedpath = sharedpath
1429 self.store = store
1429 self.store = store
1430 self.cachevfs = cachevfs
1430 self.cachevfs = cachevfs
1431 self.wcachevfs = wcachevfs
1431 self.wcachevfs = wcachevfs
1432 self.features = features
1432 self.features = features
1433
1433
1434 self.filtername = None
1434 self.filtername = None
1435
1435
1436 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1436 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1437 b'devel', b'check-locks'
1437 b'devel', b'check-locks'
1438 ):
1438 ):
1439 self.vfs.audit = self._getvfsward(self.vfs.audit)
1439 self.vfs.audit = self._getvfsward(self.vfs.audit)
1440 # A list of callback to shape the phase if no data were found.
1440 # A list of callback to shape the phase if no data were found.
1441 # Callback are in the form: func(repo, roots) --> processed root.
1441 # Callback are in the form: func(repo, roots) --> processed root.
1442 # This list it to be filled by extension during repo setup
1442 # This list it to be filled by extension during repo setup
1443 self._phasedefaults = []
1443 self._phasedefaults = []
1444
1444
1445 color.setup(self.ui)
1445 color.setup(self.ui)
1446
1446
1447 self.spath = self.store.path
1447 self.spath = self.store.path
1448 self.svfs = self.store.vfs
1448 self.svfs = self.store.vfs
1449 self.sjoin = self.store.join
1449 self.sjoin = self.store.join
1450 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1450 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1451 b'devel', b'check-locks'
1451 b'devel', b'check-locks'
1452 ):
1452 ):
1453 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1453 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1454 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1454 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1455 else: # standard vfs
1455 else: # standard vfs
1456 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1456 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1457
1457
1458 self._dirstatevalidatewarned = False
1458 self._dirstatevalidatewarned = False
1459
1459
1460 self._branchcaches = branchmap.BranchMapCache()
1460 self._branchcaches = branchmap.BranchMapCache()
1461 self._revbranchcache = None
1461 self._revbranchcache = None
1462 self._filterpats = {}
1462 self._filterpats = {}
1463 self._datafilters = {}
1463 self._datafilters = {}
1464 self._transref = self._lockref = self._wlockref = None
1464 self._transref = self._lockref = self._wlockref = None
1465
1465
1466 # A cache for various files under .hg/ that tracks file changes,
1466 # A cache for various files under .hg/ that tracks file changes,
1467 # (used by the filecache decorator)
1467 # (used by the filecache decorator)
1468 #
1468 #
1469 # Maps a property name to its util.filecacheentry
1469 # Maps a property name to its util.filecacheentry
1470 self._filecache = {}
1470 self._filecache = {}
1471
1471
1472 # hold sets of revision to be filtered
1472 # hold sets of revision to be filtered
1473 # should be cleared when something might have changed the filter value:
1473 # should be cleared when something might have changed the filter value:
1474 # - new changesets,
1474 # - new changesets,
1475 # - phase change,
1475 # - phase change,
1476 # - new obsolescence marker,
1476 # - new obsolescence marker,
1477 # - working directory parent change,
1477 # - working directory parent change,
1478 # - bookmark changes
1478 # - bookmark changes
1479 self.filteredrevcache = {}
1479 self.filteredrevcache = {}
1480
1480
1481 self._dirstate = None
1481 self._dirstate = None
1482 # post-dirstate-status hooks
1482 # post-dirstate-status hooks
1483 self._postdsstatus = []
1483 self._postdsstatus = []
1484
1484
1485 self._pending_narrow_pats = None
1485 self._pending_narrow_pats = None
1486 self._pending_narrow_pats_dirstate = None
1486 self._pending_narrow_pats_dirstate = None
1487
1487
1488 # generic mapping between names and nodes
1488 # generic mapping between names and nodes
1489 self.names = namespaces.namespaces()
1489 self.names = namespaces.namespaces()
1490
1490
1491 # Key to signature value.
1491 # Key to signature value.
1492 self._sparsesignaturecache = {}
1492 self._sparsesignaturecache = {}
1493 # Signature to cached matcher instance.
1493 # Signature to cached matcher instance.
1494 self._sparsematchercache = {}
1494 self._sparsematchercache = {}
1495
1495
1496 self._extrafilterid = repoview.extrafilter(ui)
1496 self._extrafilterid = repoview.extrafilter(ui)
1497
1497
1498 self.filecopiesmode = None
1498 self.filecopiesmode = None
1499 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1499 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1500 self.filecopiesmode = b'changeset-sidedata'
1500 self.filecopiesmode = b'changeset-sidedata'
1501
1501
1502 self._wanted_sidedata = set()
1502 self._wanted_sidedata = set()
1503 self._sidedata_computers = {}
1503 self._sidedata_computers = {}
1504 sidedatamod.set_sidedata_spec_for_repo(self)
1504 sidedatamod.set_sidedata_spec_for_repo(self)
1505
1505
1506 def _getvfsward(self, origfunc):
1506 def _getvfsward(self, origfunc):
1507 """build a ward for self.vfs"""
1507 """build a ward for self.vfs"""
1508 rref = weakref.ref(self)
1508 rref = weakref.ref(self)
1509
1509
1510 def checkvfs(path, mode=None):
1510 def checkvfs(path, mode=None):
1511 ret = origfunc(path, mode=mode)
1511 ret = origfunc(path, mode=mode)
1512 repo = rref()
1512 repo = rref()
1513 if (
1513 if (
1514 repo is None
1514 repo is None
1515 or not util.safehasattr(repo, b'_wlockref')
1515 or not util.safehasattr(repo, b'_wlockref')
1516 or not util.safehasattr(repo, b'_lockref')
1516 or not util.safehasattr(repo, b'_lockref')
1517 ):
1517 ):
1518 return
1518 return
1519 if mode in (None, b'r', b'rb'):
1519 if mode in (None, b'r', b'rb'):
1520 return
1520 return
1521 if path.startswith(repo.path):
1521 if path.startswith(repo.path):
1522 # truncate name relative to the repository (.hg)
1522 # truncate name relative to the repository (.hg)
1523 path = path[len(repo.path) + 1 :]
1523 path = path[len(repo.path) + 1 :]
1524 if path.startswith(b'cache/'):
1524 if path.startswith(b'cache/'):
1525 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1525 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1526 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1526 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1527 # path prefixes covered by 'lock'
1527 # path prefixes covered by 'lock'
1528 vfs_path_prefixes = (
1528 vfs_path_prefixes = (
1529 b'journal.',
1529 b'journal.',
1530 b'undo.',
1530 b'undo.',
1531 b'strip-backup/',
1531 b'strip-backup/',
1532 b'cache/',
1532 b'cache/',
1533 )
1533 )
1534 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1534 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1535 if repo._currentlock(repo._lockref) is None:
1535 if repo._currentlock(repo._lockref) is None:
1536 repo.ui.develwarn(
1536 repo.ui.develwarn(
1537 b'write with no lock: "%s"' % path,
1537 b'write with no lock: "%s"' % path,
1538 stacklevel=3,
1538 stacklevel=3,
1539 config=b'check-locks',
1539 config=b'check-locks',
1540 )
1540 )
1541 elif repo._currentlock(repo._wlockref) is None:
1541 elif repo._currentlock(repo._wlockref) is None:
1542 # rest of vfs files are covered by 'wlock'
1542 # rest of vfs files are covered by 'wlock'
1543 #
1543 #
1544 # exclude special files
1544 # exclude special files
1545 for prefix in self._wlockfreeprefix:
1545 for prefix in self._wlockfreeprefix:
1546 if path.startswith(prefix):
1546 if path.startswith(prefix):
1547 return
1547 return
1548 repo.ui.develwarn(
1548 repo.ui.develwarn(
1549 b'write with no wlock: "%s"' % path,
1549 b'write with no wlock: "%s"' % path,
1550 stacklevel=3,
1550 stacklevel=3,
1551 config=b'check-locks',
1551 config=b'check-locks',
1552 )
1552 )
1553 return ret
1553 return ret
1554
1554
1555 return checkvfs
1555 return checkvfs
1556
1556
1557 def _getsvfsward(self, origfunc):
1557 def _getsvfsward(self, origfunc):
1558 """build a ward for self.svfs"""
1558 """build a ward for self.svfs"""
1559 rref = weakref.ref(self)
1559 rref = weakref.ref(self)
1560
1560
1561 def checksvfs(path, mode=None):
1561 def checksvfs(path, mode=None):
1562 ret = origfunc(path, mode=mode)
1562 ret = origfunc(path, mode=mode)
1563 repo = rref()
1563 repo = rref()
1564 if repo is None or not util.safehasattr(repo, b'_lockref'):
1564 if repo is None or not util.safehasattr(repo, b'_lockref'):
1565 return
1565 return
1566 if mode in (None, b'r', b'rb'):
1566 if mode in (None, b'r', b'rb'):
1567 return
1567 return
1568 if path.startswith(repo.sharedpath):
1568 if path.startswith(repo.sharedpath):
1569 # truncate name relative to the repository (.hg)
1569 # truncate name relative to the repository (.hg)
1570 path = path[len(repo.sharedpath) + 1 :]
1570 path = path[len(repo.sharedpath) + 1 :]
1571 if repo._currentlock(repo._lockref) is None:
1571 if repo._currentlock(repo._lockref) is None:
1572 repo.ui.develwarn(
1572 repo.ui.develwarn(
1573 b'write with no lock: "%s"' % path, stacklevel=4
1573 b'write with no lock: "%s"' % path, stacklevel=4
1574 )
1574 )
1575 return ret
1575 return ret
1576
1576
1577 return checksvfs
1577 return checksvfs
1578
1578
1579 @property
1579 @property
1580 def vfs_map(self):
1580 def vfs_map(self):
1581 return {
1581 return {
1582 b'': self.svfs,
1582 b'': self.svfs,
1583 b'plain': self.vfs,
1583 b'plain': self.vfs,
1584 b'store': self.svfs,
1584 b'store': self.svfs,
1585 }
1585 }
1586
1586
1587 def close(self):
1587 def close(self):
1588 self._writecaches()
1588 self._writecaches()
1589
1589
1590 def _writecaches(self):
1590 def _writecaches(self):
1591 if self._revbranchcache:
1591 if self._revbranchcache:
1592 self._revbranchcache.write()
1592 self._revbranchcache.write()
1593
1593
1594 def _restrictcapabilities(self, caps):
1594 def _restrictcapabilities(self, caps):
1595 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1595 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1596 caps = set(caps)
1596 caps = set(caps)
1597 capsblob = bundle2.encodecaps(
1597 capsblob = bundle2.encodecaps(
1598 bundle2.getrepocaps(self, role=b'client')
1598 bundle2.getrepocaps(self, role=b'client')
1599 )
1599 )
1600 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1600 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1601 if self.ui.configbool(b'experimental', b'narrow'):
1601 if self.ui.configbool(b'experimental', b'narrow'):
1602 caps.add(wireprototypes.NARROWCAP)
1602 caps.add(wireprototypes.NARROWCAP)
1603 return caps
1603 return caps
1604
1604
1605 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1605 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1606 # self -> auditor -> self._checknested -> self
1606 # self -> auditor -> self._checknested -> self
1607
1607
1608 @property
1608 @property
1609 def auditor(self):
1609 def auditor(self):
1610 # This is only used by context.workingctx.match in order to
1610 # This is only used by context.workingctx.match in order to
1611 # detect files in subrepos.
1611 # detect files in subrepos.
1612 return pathutil.pathauditor(self.root, callback=self._checknested)
1612 return pathutil.pathauditor(self.root, callback=self._checknested)
1613
1613
1614 @property
1614 @property
1615 def nofsauditor(self):
1615 def nofsauditor(self):
1616 # This is only used by context.basectx.match in order to detect
1616 # This is only used by context.basectx.match in order to detect
1617 # files in subrepos.
1617 # files in subrepos.
1618 return pathutil.pathauditor(
1618 return pathutil.pathauditor(
1619 self.root, callback=self._checknested, realfs=False, cached=True
1619 self.root, callback=self._checknested, realfs=False, cached=True
1620 )
1620 )
1621
1621
1622 def _checknested(self, path):
1622 def _checknested(self, path):
1623 """Determine if path is a legal nested repository."""
1623 """Determine if path is a legal nested repository."""
1624 if not path.startswith(self.root):
1624 if not path.startswith(self.root):
1625 return False
1625 return False
1626 subpath = path[len(self.root) + 1 :]
1626 subpath = path[len(self.root) + 1 :]
1627 normsubpath = util.pconvert(subpath)
1627 normsubpath = util.pconvert(subpath)
1628
1628
1629 # XXX: Checking against the current working copy is wrong in
1629 # XXX: Checking against the current working copy is wrong in
1630 # the sense that it can reject things like
1630 # the sense that it can reject things like
1631 #
1631 #
1632 # $ hg cat -r 10 sub/x.txt
1632 # $ hg cat -r 10 sub/x.txt
1633 #
1633 #
1634 # if sub/ is no longer a subrepository in the working copy
1634 # if sub/ is no longer a subrepository in the working copy
1635 # parent revision.
1635 # parent revision.
1636 #
1636 #
1637 # However, it can of course also allow things that would have
1637 # However, it can of course also allow things that would have
1638 # been rejected before, such as the above cat command if sub/
1638 # been rejected before, such as the above cat command if sub/
1639 # is a subrepository now, but was a normal directory before.
1639 # is a subrepository now, but was a normal directory before.
1640 # The old path auditor would have rejected by mistake since it
1640 # The old path auditor would have rejected by mistake since it
1641 # panics when it sees sub/.hg/.
1641 # panics when it sees sub/.hg/.
1642 #
1642 #
1643 # All in all, checking against the working copy seems sensible
1643 # All in all, checking against the working copy seems sensible
1644 # since we want to prevent access to nested repositories on
1644 # since we want to prevent access to nested repositories on
1645 # the filesystem *now*.
1645 # the filesystem *now*.
1646 ctx = self[None]
1646 ctx = self[None]
1647 parts = util.splitpath(subpath)
1647 parts = util.splitpath(subpath)
1648 while parts:
1648 while parts:
1649 prefix = b'/'.join(parts)
1649 prefix = b'/'.join(parts)
1650 if prefix in ctx.substate:
1650 if prefix in ctx.substate:
1651 if prefix == normsubpath:
1651 if prefix == normsubpath:
1652 return True
1652 return True
1653 else:
1653 else:
1654 sub = ctx.sub(prefix)
1654 sub = ctx.sub(prefix)
1655 return sub.checknested(subpath[len(prefix) + 1 :])
1655 return sub.checknested(subpath[len(prefix) + 1 :])
1656 else:
1656 else:
1657 parts.pop()
1657 parts.pop()
1658 return False
1658 return False
1659
1659
1660 def peer(self, path=None):
1660 def peer(self, path=None):
1661 return localpeer(self, path=path) # not cached to avoid reference cycle
1661 return localpeer(self, path=path) # not cached to avoid reference cycle
1662
1662
1663 def unfiltered(self):
1663 def unfiltered(self):
1664 """Return unfiltered version of the repository
1664 """Return unfiltered version of the repository
1665
1665
1666 Intended to be overwritten by filtered repo."""
1666 Intended to be overwritten by filtered repo."""
1667 return self
1667 return self
1668
1668
1669 def filtered(self, name, visibilityexceptions=None):
1669 def filtered(self, name, visibilityexceptions=None):
1670 """Return a filtered version of a repository
1670 """Return a filtered version of a repository
1671
1671
1672 The `name` parameter is the identifier of the requested view. This
1672 The `name` parameter is the identifier of the requested view. This
1673 will return a repoview object set "exactly" to the specified view.
1673 will return a repoview object set "exactly" to the specified view.
1674
1674
1675 This function does not apply recursive filtering to a repository. For
1675 This function does not apply recursive filtering to a repository. For
1676 example calling `repo.filtered("served")` will return a repoview using
1676 example calling `repo.filtered("served")` will return a repoview using
1677 the "served" view, regardless of the initial view used by `repo`.
1677 the "served" view, regardless of the initial view used by `repo`.
1678
1678
1679 In other word, there is always only one level of `repoview` "filtering".
1679 In other word, there is always only one level of `repoview` "filtering".
1680 """
1680 """
1681 if self._extrafilterid is not None and b'%' not in name:
1681 if self._extrafilterid is not None and b'%' not in name:
1682 name = name + b'%' + self._extrafilterid
1682 name = name + b'%' + self._extrafilterid
1683
1683
1684 cls = repoview.newtype(self.unfiltered().__class__)
1684 cls = repoview.newtype(self.unfiltered().__class__)
1685 return cls(self, name, visibilityexceptions)
1685 return cls(self, name, visibilityexceptions)
1686
1686
1687 @mixedrepostorecache(
1687 @mixedrepostorecache(
1688 (b'bookmarks', b'plain'),
1688 (b'bookmarks', b'plain'),
1689 (b'bookmarks.current', b'plain'),
1689 (b'bookmarks.current', b'plain'),
1690 (b'bookmarks', b''),
1690 (b'bookmarks', b''),
1691 (b'00changelog.i', b''),
1691 (b'00changelog.i', b''),
1692 )
1692 )
1693 def _bookmarks(self):
1693 def _bookmarks(self):
1694 # Since the multiple files involved in the transaction cannot be
1694 # Since the multiple files involved in the transaction cannot be
1695 # written atomically (with current repository format), there is a race
1695 # written atomically (with current repository format), there is a race
1696 # condition here.
1696 # condition here.
1697 #
1697 #
1698 # 1) changelog content A is read
1698 # 1) changelog content A is read
1699 # 2) outside transaction update changelog to content B
1699 # 2) outside transaction update changelog to content B
1700 # 3) outside transaction update bookmark file referring to content B
1700 # 3) outside transaction update bookmark file referring to content B
1701 # 4) bookmarks file content is read and filtered against changelog-A
1701 # 4) bookmarks file content is read and filtered against changelog-A
1702 #
1702 #
1703 # When this happens, bookmarks against nodes missing from A are dropped.
1703 # When this happens, bookmarks against nodes missing from A are dropped.
1704 #
1704 #
1705 # Having this happening during read is not great, but it become worse
1705 # Having this happening during read is not great, but it become worse
1706 # when this happen during write because the bookmarks to the "unknown"
1706 # when this happen during write because the bookmarks to the "unknown"
1707 # nodes will be dropped for good. However, writes happen within locks.
1707 # nodes will be dropped for good. However, writes happen within locks.
1708 # This locking makes it possible to have a race free consistent read.
1708 # This locking makes it possible to have a race free consistent read.
1709 # For this purpose data read from disc before locking are
1709 # For this purpose data read from disc before locking are
1710 # "invalidated" right after the locks are taken. This invalidations are
1710 # "invalidated" right after the locks are taken. This invalidations are
1711 # "light", the `filecache` mechanism keep the data in memory and will
1711 # "light", the `filecache` mechanism keep the data in memory and will
1712 # reuse them if the underlying files did not changed. Not parsing the
1712 # reuse them if the underlying files did not changed. Not parsing the
1713 # same data multiple times helps performances.
1713 # same data multiple times helps performances.
1714 #
1714 #
1715 # Unfortunately in the case describe above, the files tracked by the
1715 # Unfortunately in the case describe above, the files tracked by the
1716 # bookmarks file cache might not have changed, but the in-memory
1716 # bookmarks file cache might not have changed, but the in-memory
1717 # content is still "wrong" because we used an older changelog content
1717 # content is still "wrong" because we used an older changelog content
1718 # to process the on-disk data. So after locking, the changelog would be
1718 # to process the on-disk data. So after locking, the changelog would be
1719 # refreshed but `_bookmarks` would be preserved.
1719 # refreshed but `_bookmarks` would be preserved.
1720 # Adding `00changelog.i` to the list of tracked file is not
1720 # Adding `00changelog.i` to the list of tracked file is not
1721 # enough, because at the time we build the content for `_bookmarks` in
1721 # enough, because at the time we build the content for `_bookmarks` in
1722 # (4), the changelog file has already diverged from the content used
1722 # (4), the changelog file has already diverged from the content used
1723 # for loading `changelog` in (1)
1723 # for loading `changelog` in (1)
1724 #
1724 #
1725 # To prevent the issue, we force the changelog to be explicitly
1725 # To prevent the issue, we force the changelog to be explicitly
1726 # reloaded while computing `_bookmarks`. The data race can still happen
1726 # reloaded while computing `_bookmarks`. The data race can still happen
1727 # without the lock (with a narrower window), but it would no longer go
1727 # without the lock (with a narrower window), but it would no longer go
1728 # undetected during the lock time refresh.
1728 # undetected during the lock time refresh.
1729 #
1729 #
1730 # The new schedule is as follow
1730 # The new schedule is as follow
1731 #
1731 #
1732 # 1) filecache logic detect that `_bookmarks` needs to be computed
1732 # 1) filecache logic detect that `_bookmarks` needs to be computed
1733 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1733 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1734 # 3) We force `changelog` filecache to be tested
1734 # 3) We force `changelog` filecache to be tested
1735 # 4) cachestat for `changelog` are captured (for changelog)
1735 # 4) cachestat for `changelog` are captured (for changelog)
1736 # 5) `_bookmarks` is computed and cached
1736 # 5) `_bookmarks` is computed and cached
1737 #
1737 #
1738 # The step in (3) ensure we have a changelog at least as recent as the
1738 # The step in (3) ensure we have a changelog at least as recent as the
1739 # cache stat computed in (1). As a result at locking time:
1739 # cache stat computed in (1). As a result at locking time:
1740 # * if the changelog did not changed since (1) -> we can reuse the data
1740 # * if the changelog did not changed since (1) -> we can reuse the data
1741 # * otherwise -> the bookmarks get refreshed.
1741 # * otherwise -> the bookmarks get refreshed.
1742 self._refreshchangelog()
1742 self._refreshchangelog()
1743 return bookmarks.bmstore(self)
1743 return bookmarks.bmstore(self)
1744
1744
1745 def _refreshchangelog(self):
1745 def _refreshchangelog(self):
1746 """make sure the in memory changelog match the on-disk one"""
1746 """make sure the in memory changelog match the on-disk one"""
1747 if 'changelog' in vars(self) and self.currenttransaction() is None:
1747 if 'changelog' in vars(self) and self.currenttransaction() is None:
1748 del self.changelog
1748 del self.changelog
1749
1749
1750 @property
1750 @property
1751 def _activebookmark(self):
1751 def _activebookmark(self):
1752 return self._bookmarks.active
1752 return self._bookmarks.active
1753
1753
1754 # _phasesets depend on changelog. what we need is to call
1754 # _phasesets depend on changelog. what we need is to call
1755 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1755 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1756 # can't be easily expressed in filecache mechanism.
1756 # can't be easily expressed in filecache mechanism.
1757 @storecache(b'phaseroots', b'00changelog.i')
1757 @storecache(b'phaseroots', b'00changelog.i')
1758 def _phasecache(self):
1758 def _phasecache(self):
1759 return phases.phasecache(self, self._phasedefaults)
1759 return phases.phasecache(self, self._phasedefaults)
1760
1760
1761 @storecache(b'obsstore')
1761 @storecache(b'obsstore')
1762 def obsstore(self):
1762 def obsstore(self):
1763 return obsolete.makestore(self.ui, self)
1763 return obsolete.makestore(self.ui, self)
1764
1764
1765 @changelogcache()
1765 @changelogcache()
1766 def changelog(repo):
1766 def changelog(repo):
1767 # load dirstate before changelog to avoid race see issue6303
1767 # load dirstate before changelog to avoid race see issue6303
1768 repo.dirstate.prefetch_parents()
1768 repo.dirstate.prefetch_parents()
1769 return repo.store.changelog(
1769 return repo.store.changelog(
1770 txnutil.mayhavepending(repo.root),
1770 txnutil.mayhavepending(repo.root),
1771 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1771 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1772 )
1772 )
1773
1773
1774 @manifestlogcache()
1774 @manifestlogcache()
1775 def manifestlog(self):
1775 def manifestlog(self):
1776 return self.store.manifestlog(self, self._storenarrowmatch)
1776 return self.store.manifestlog(self, self._storenarrowmatch)
1777
1777
1778 @unfilteredpropertycache
1778 @unfilteredpropertycache
1779 def dirstate(self):
1779 def dirstate(self):
1780 if self._dirstate is None:
1780 if self._dirstate is None:
1781 self._dirstate = self._makedirstate()
1781 self._dirstate = self._makedirstate()
1782 else:
1782 else:
1783 self._dirstate.refresh()
1783 self._dirstate.refresh()
1784 return self._dirstate
1784 return self._dirstate
1785
1785
1786 def _makedirstate(self):
1786 def _makedirstate(self):
1787 """Extension point for wrapping the dirstate per-repo."""
1787 """Extension point for wrapping the dirstate per-repo."""
1788 sparsematchfn = None
1788 sparsematchfn = None
1789 if sparse.use_sparse(self):
1789 if sparse.use_sparse(self):
1790 sparsematchfn = lambda: sparse.matcher(self)
1790 sparsematchfn = lambda: sparse.matcher(self)
1791 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1791 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1792 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1792 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1793 use_dirstate_v2 = v2_req in self.requirements
1793 use_dirstate_v2 = v2_req in self.requirements
1794 use_tracked_hint = th in self.requirements
1794 use_tracked_hint = th in self.requirements
1795
1795
1796 return dirstate.dirstate(
1796 return dirstate.dirstate(
1797 self.vfs,
1797 self.vfs,
1798 self.ui,
1798 self.ui,
1799 self.root,
1799 self.root,
1800 self._dirstatevalidate,
1800 self._dirstatevalidate,
1801 sparsematchfn,
1801 sparsematchfn,
1802 self.nodeconstants,
1802 self.nodeconstants,
1803 use_dirstate_v2,
1803 use_dirstate_v2,
1804 use_tracked_hint=use_tracked_hint,
1804 use_tracked_hint=use_tracked_hint,
1805 )
1805 )
1806
1806
1807 def _dirstatevalidate(self, node):
1807 def _dirstatevalidate(self, node):
1808 try:
1808 try:
1809 self.changelog.rev(node)
1809 self.changelog.rev(node)
1810 return node
1810 return node
1811 except error.LookupError:
1811 except error.LookupError:
1812 if not self._dirstatevalidatewarned:
1812 if not self._dirstatevalidatewarned:
1813 self._dirstatevalidatewarned = True
1813 self._dirstatevalidatewarned = True
1814 self.ui.warn(
1814 self.ui.warn(
1815 _(b"warning: ignoring unknown working parent %s!\n")
1815 _(b"warning: ignoring unknown working parent %s!\n")
1816 % short(node)
1816 % short(node)
1817 )
1817 )
1818 return self.nullid
1818 return self.nullid
1819
1819
1820 @storecache(narrowspec.FILENAME)
1820 @storecache(narrowspec.FILENAME)
1821 def narrowpats(self):
1821 def narrowpats(self):
1822 """matcher patterns for this repository's narrowspec
1822 """matcher patterns for this repository's narrowspec
1823
1823
1824 A tuple of (includes, excludes).
1824 A tuple of (includes, excludes).
1825 """
1825 """
1826 # the narrow management should probably move into its own object
1826 # the narrow management should probably move into its own object
1827 val = self._pending_narrow_pats
1827 val = self._pending_narrow_pats
1828 if val is None:
1828 if val is None:
1829 val = narrowspec.load(self)
1829 val = narrowspec.load(self)
1830 return val
1830 return val
1831
1831
1832 @storecache(narrowspec.FILENAME)
1832 @storecache(narrowspec.FILENAME)
1833 def _storenarrowmatch(self):
1833 def _storenarrowmatch(self):
1834 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1834 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1835 return matchmod.always()
1835 return matchmod.always()
1836 include, exclude = self.narrowpats
1836 include, exclude = self.narrowpats
1837 return narrowspec.match(self.root, include=include, exclude=exclude)
1837 return narrowspec.match(self.root, include=include, exclude=exclude)
1838
1838
1839 @storecache(narrowspec.FILENAME)
1839 @storecache(narrowspec.FILENAME)
1840 def _narrowmatch(self):
1840 def _narrowmatch(self):
1841 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1841 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1842 return matchmod.always()
1842 return matchmod.always()
1843 narrowspec.checkworkingcopynarrowspec(self)
1843 narrowspec.checkworkingcopynarrowspec(self)
1844 include, exclude = self.narrowpats
1844 include, exclude = self.narrowpats
1845 return narrowspec.match(self.root, include=include, exclude=exclude)
1845 return narrowspec.match(self.root, include=include, exclude=exclude)
1846
1846
1847 def narrowmatch(self, match=None, includeexact=False):
1847 def narrowmatch(self, match=None, includeexact=False):
1848 """matcher corresponding the the repo's narrowspec
1848 """matcher corresponding the the repo's narrowspec
1849
1849
1850 If `match` is given, then that will be intersected with the narrow
1850 If `match` is given, then that will be intersected with the narrow
1851 matcher.
1851 matcher.
1852
1852
1853 If `includeexact` is True, then any exact matches from `match` will
1853 If `includeexact` is True, then any exact matches from `match` will
1854 be included even if they're outside the narrowspec.
1854 be included even if they're outside the narrowspec.
1855 """
1855 """
1856 if match:
1856 if match:
1857 if includeexact and not self._narrowmatch.always():
1857 if includeexact and not self._narrowmatch.always():
1858 # do not exclude explicitly-specified paths so that they can
1858 # do not exclude explicitly-specified paths so that they can
1859 # be warned later on
1859 # be warned later on
1860 em = matchmod.exact(match.files())
1860 em = matchmod.exact(match.files())
1861 nm = matchmod.unionmatcher([self._narrowmatch, em])
1861 nm = matchmod.unionmatcher([self._narrowmatch, em])
1862 return matchmod.intersectmatchers(match, nm)
1862 return matchmod.intersectmatchers(match, nm)
1863 return matchmod.intersectmatchers(match, self._narrowmatch)
1863 return matchmod.intersectmatchers(match, self._narrowmatch)
1864 return self._narrowmatch
1864 return self._narrowmatch
1865
1865
1866 def setnarrowpats(self, newincludes, newexcludes):
1866 def setnarrowpats(self, newincludes, newexcludes):
1867 narrowspec.save(self, newincludes, newexcludes)
1867 narrowspec.save(self, newincludes, newexcludes)
1868 self.invalidate(clearfilecache=True)
1868 self.invalidate(clearfilecache=True)
1869
1869
1870 @unfilteredpropertycache
1870 @unfilteredpropertycache
1871 def _quick_access_changeid_null(self):
1871 def _quick_access_changeid_null(self):
1872 return {
1872 return {
1873 b'null': (nullrev, self.nodeconstants.nullid),
1873 b'null': (nullrev, self.nodeconstants.nullid),
1874 nullrev: (nullrev, self.nodeconstants.nullid),
1874 nullrev: (nullrev, self.nodeconstants.nullid),
1875 self.nullid: (nullrev, self.nullid),
1875 self.nullid: (nullrev, self.nullid),
1876 }
1876 }
1877
1877
1878 @unfilteredpropertycache
1878 @unfilteredpropertycache
1879 def _quick_access_changeid_wc(self):
1879 def _quick_access_changeid_wc(self):
1880 # also fast path access to the working copy parents
1880 # also fast path access to the working copy parents
1881 # however, only do it for filter that ensure wc is visible.
1881 # however, only do it for filter that ensure wc is visible.
1882 quick = self._quick_access_changeid_null.copy()
1882 quick = self._quick_access_changeid_null.copy()
1883 cl = self.unfiltered().changelog
1883 cl = self.unfiltered().changelog
1884 for node in self.dirstate.parents():
1884 for node in self.dirstate.parents():
1885 if node == self.nullid:
1885 if node == self.nullid:
1886 continue
1886 continue
1887 rev = cl.index.get_rev(node)
1887 rev = cl.index.get_rev(node)
1888 if rev is None:
1888 if rev is None:
1889 # unknown working copy parent case:
1889 # unknown working copy parent case:
1890 #
1890 #
1891 # skip the fast path and let higher code deal with it
1891 # skip the fast path and let higher code deal with it
1892 continue
1892 continue
1893 pair = (rev, node)
1893 pair = (rev, node)
1894 quick[rev] = pair
1894 quick[rev] = pair
1895 quick[node] = pair
1895 quick[node] = pair
1896 # also add the parents of the parents
1896 # also add the parents of the parents
1897 for r in cl.parentrevs(rev):
1897 for r in cl.parentrevs(rev):
1898 if r == nullrev:
1898 if r == nullrev:
1899 continue
1899 continue
1900 n = cl.node(r)
1900 n = cl.node(r)
1901 pair = (r, n)
1901 pair = (r, n)
1902 quick[r] = pair
1902 quick[r] = pair
1903 quick[n] = pair
1903 quick[n] = pair
1904 p1node = self.dirstate.p1()
1904 p1node = self.dirstate.p1()
1905 if p1node != self.nullid:
1905 if p1node != self.nullid:
1906 quick[b'.'] = quick[p1node]
1906 quick[b'.'] = quick[p1node]
1907 return quick
1907 return quick
1908
1908
1909 @unfilteredmethod
1909 @unfilteredmethod
1910 def _quick_access_changeid_invalidate(self):
1910 def _quick_access_changeid_invalidate(self):
1911 if '_quick_access_changeid_wc' in vars(self):
1911 if '_quick_access_changeid_wc' in vars(self):
1912 del self.__dict__['_quick_access_changeid_wc']
1912 del self.__dict__['_quick_access_changeid_wc']
1913
1913
1914 @property
1914 @property
1915 def _quick_access_changeid(self):
1915 def _quick_access_changeid(self):
1916 """an helper dictionnary for __getitem__ calls
1916 """an helper dictionnary for __getitem__ calls
1917
1917
1918 This contains a list of symbol we can recognise right away without
1918 This contains a list of symbol we can recognise right away without
1919 further processing.
1919 further processing.
1920 """
1920 """
1921 if self.filtername in repoview.filter_has_wc:
1921 if self.filtername in repoview.filter_has_wc:
1922 return self._quick_access_changeid_wc
1922 return self._quick_access_changeid_wc
1923 return self._quick_access_changeid_null
1923 return self._quick_access_changeid_null
1924
1924
1925 def __getitem__(self, changeid):
1925 def __getitem__(self, changeid):
1926 # dealing with special cases
1926 # dealing with special cases
1927 if changeid is None:
1927 if changeid is None:
1928 return context.workingctx(self)
1928 return context.workingctx(self)
1929 if isinstance(changeid, context.basectx):
1929 if isinstance(changeid, context.basectx):
1930 return changeid
1930 return changeid
1931
1931
1932 # dealing with multiple revisions
1932 # dealing with multiple revisions
1933 if isinstance(changeid, slice):
1933 if isinstance(changeid, slice):
1934 # wdirrev isn't contiguous so the slice shouldn't include it
1934 # wdirrev isn't contiguous so the slice shouldn't include it
1935 return [
1935 return [
1936 self[i]
1936 self[i]
1937 for i in range(*changeid.indices(len(self)))
1937 for i in range(*changeid.indices(len(self)))
1938 if i not in self.changelog.filteredrevs
1938 if i not in self.changelog.filteredrevs
1939 ]
1939 ]
1940
1940
1941 # dealing with some special values
1941 # dealing with some special values
1942 quick_access = self._quick_access_changeid.get(changeid)
1942 quick_access = self._quick_access_changeid.get(changeid)
1943 if quick_access is not None:
1943 if quick_access is not None:
1944 rev, node = quick_access
1944 rev, node = quick_access
1945 return context.changectx(self, rev, node, maybe_filtered=False)
1945 return context.changectx(self, rev, node, maybe_filtered=False)
1946 if changeid == b'tip':
1946 if changeid == b'tip':
1947 node = self.changelog.tip()
1947 node = self.changelog.tip()
1948 rev = self.changelog.rev(node)
1948 rev = self.changelog.rev(node)
1949 return context.changectx(self, rev, node)
1949 return context.changectx(self, rev, node)
1950
1950
1951 # dealing with arbitrary values
1951 # dealing with arbitrary values
1952 try:
1952 try:
1953 if isinstance(changeid, int):
1953 if isinstance(changeid, int):
1954 node = self.changelog.node(changeid)
1954 node = self.changelog.node(changeid)
1955 rev = changeid
1955 rev = changeid
1956 elif changeid == b'.':
1956 elif changeid == b'.':
1957 # this is a hack to delay/avoid loading obsmarkers
1957 # this is a hack to delay/avoid loading obsmarkers
1958 # when we know that '.' won't be hidden
1958 # when we know that '.' won't be hidden
1959 node = self.dirstate.p1()
1959 node = self.dirstate.p1()
1960 rev = self.unfiltered().changelog.rev(node)
1960 rev = self.unfiltered().changelog.rev(node)
1961 elif len(changeid) == self.nodeconstants.nodelen:
1961 elif len(changeid) == self.nodeconstants.nodelen:
1962 try:
1962 try:
1963 node = changeid
1963 node = changeid
1964 rev = self.changelog.rev(changeid)
1964 rev = self.changelog.rev(changeid)
1965 except error.FilteredLookupError:
1965 except error.FilteredLookupError:
1966 changeid = hex(changeid) # for the error message
1966 changeid = hex(changeid) # for the error message
1967 raise
1967 raise
1968 except LookupError:
1968 except LookupError:
1969 # check if it might have come from damaged dirstate
1969 # check if it might have come from damaged dirstate
1970 #
1970 #
1971 # XXX we could avoid the unfiltered if we had a recognizable
1971 # XXX we could avoid the unfiltered if we had a recognizable
1972 # exception for filtered changeset access
1972 # exception for filtered changeset access
1973 if (
1973 if (
1974 self.local()
1974 self.local()
1975 and changeid in self.unfiltered().dirstate.parents()
1975 and changeid in self.unfiltered().dirstate.parents()
1976 ):
1976 ):
1977 msg = _(b"working directory has unknown parent '%s'!")
1977 msg = _(b"working directory has unknown parent '%s'!")
1978 raise error.Abort(msg % short(changeid))
1978 raise error.Abort(msg % short(changeid))
1979 changeid = hex(changeid) # for the error message
1979 changeid = hex(changeid) # for the error message
1980 raise
1980 raise
1981
1981
1982 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1982 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1983 node = bin(changeid)
1983 node = bin(changeid)
1984 rev = self.changelog.rev(node)
1984 rev = self.changelog.rev(node)
1985 else:
1985 else:
1986 raise error.ProgrammingError(
1986 raise error.ProgrammingError(
1987 b"unsupported changeid '%s' of type %s"
1987 b"unsupported changeid '%s' of type %s"
1988 % (changeid, pycompat.bytestr(type(changeid)))
1988 % (changeid, pycompat.bytestr(type(changeid)))
1989 )
1989 )
1990
1990
1991 return context.changectx(self, rev, node)
1991 return context.changectx(self, rev, node)
1992
1992
1993 except (error.FilteredIndexError, error.FilteredLookupError):
1993 except (error.FilteredIndexError, error.FilteredLookupError):
1994 raise error.FilteredRepoLookupError(
1994 raise error.FilteredRepoLookupError(
1995 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1995 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1996 )
1996 )
1997 except (IndexError, LookupError):
1997 except (IndexError, LookupError):
1998 raise error.RepoLookupError(
1998 raise error.RepoLookupError(
1999 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1999 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
2000 )
2000 )
2001 except error.WdirUnsupported:
2001 except error.WdirUnsupported:
2002 return context.workingctx(self)
2002 return context.workingctx(self)
2003
2003
2004 def __contains__(self, changeid):
2004 def __contains__(self, changeid):
2005 """True if the given changeid exists"""
2005 """True if the given changeid exists"""
2006 try:
2006 try:
2007 self[changeid]
2007 self[changeid]
2008 return True
2008 return True
2009 except error.RepoLookupError:
2009 except error.RepoLookupError:
2010 return False
2010 return False
2011
2011
2012 def __nonzero__(self):
2012 def __nonzero__(self):
2013 return True
2013 return True
2014
2014
2015 __bool__ = __nonzero__
2015 __bool__ = __nonzero__
2016
2016
2017 def __len__(self):
2017 def __len__(self):
2018 # no need to pay the cost of repoview.changelog
2018 # no need to pay the cost of repoview.changelog
2019 unfi = self.unfiltered()
2019 unfi = self.unfiltered()
2020 return len(unfi.changelog)
2020 return len(unfi.changelog)
2021
2021
2022 def __iter__(self):
2022 def __iter__(self):
2023 return iter(self.changelog)
2023 return iter(self.changelog)
2024
2024
2025 def revs(self, expr: bytes, *args):
2025 def revs(self, expr: bytes, *args):
2026 """Find revisions matching a revset.
2026 """Find revisions matching a revset.
2027
2027
2028 The revset is specified as a string ``expr`` that may contain
2028 The revset is specified as a string ``expr`` that may contain
2029 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2029 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2030
2030
2031 Revset aliases from the configuration are not expanded. To expand
2031 Revset aliases from the configuration are not expanded. To expand
2032 user aliases, consider calling ``scmutil.revrange()`` or
2032 user aliases, consider calling ``scmutil.revrange()`` or
2033 ``repo.anyrevs([expr], user=True)``.
2033 ``repo.anyrevs([expr], user=True)``.
2034
2034
2035 Returns a smartset.abstractsmartset, which is a list-like interface
2035 Returns a smartset.abstractsmartset, which is a list-like interface
2036 that contains integer revisions.
2036 that contains integer revisions.
2037 """
2037 """
2038 tree = revsetlang.spectree(expr, *args)
2038 tree = revsetlang.spectree(expr, *args)
2039 return revset.makematcher(tree)(self)
2039 return revset.makematcher(tree)(self)
2040
2040
2041 def set(self, expr: bytes, *args):
2041 def set(self, expr: bytes, *args):
2042 """Find revisions matching a revset and emit changectx instances.
2042 """Find revisions matching a revset and emit changectx instances.
2043
2043
2044 This is a convenience wrapper around ``revs()`` that iterates the
2044 This is a convenience wrapper around ``revs()`` that iterates the
2045 result and is a generator of changectx instances.
2045 result and is a generator of changectx instances.
2046
2046
2047 Revset aliases from the configuration are not expanded. To expand
2047 Revset aliases from the configuration are not expanded. To expand
2048 user aliases, consider calling ``scmutil.revrange()``.
2048 user aliases, consider calling ``scmutil.revrange()``.
2049 """
2049 """
2050 for r in self.revs(expr, *args):
2050 for r in self.revs(expr, *args):
2051 yield self[r]
2051 yield self[r]
2052
2052
2053 def anyrevs(self, specs: bytes, user=False, localalias=None):
2053 def anyrevs(self, specs: bytes, user=False, localalias=None):
2054 """Find revisions matching one of the given revsets.
2054 """Find revisions matching one of the given revsets.
2055
2055
2056 Revset aliases from the configuration are not expanded by default. To
2056 Revset aliases from the configuration are not expanded by default. To
2057 expand user aliases, specify ``user=True``. To provide some local
2057 expand user aliases, specify ``user=True``. To provide some local
2058 definitions overriding user aliases, set ``localalias`` to
2058 definitions overriding user aliases, set ``localalias`` to
2059 ``{name: definitionstring}``.
2059 ``{name: definitionstring}``.
2060 """
2060 """
2061 if specs == [b'null']:
2061 if specs == [b'null']:
2062 return revset.baseset([nullrev])
2062 return revset.baseset([nullrev])
2063 if specs == [b'.']:
2063 if specs == [b'.']:
2064 quick_data = self._quick_access_changeid.get(b'.')
2064 quick_data = self._quick_access_changeid.get(b'.')
2065 if quick_data is not None:
2065 if quick_data is not None:
2066 return revset.baseset([quick_data[0]])
2066 return revset.baseset([quick_data[0]])
2067 if user:
2067 if user:
2068 m = revset.matchany(
2068 m = revset.matchany(
2069 self.ui,
2069 self.ui,
2070 specs,
2070 specs,
2071 lookup=revset.lookupfn(self),
2071 lookup=revset.lookupfn(self),
2072 localalias=localalias,
2072 localalias=localalias,
2073 )
2073 )
2074 else:
2074 else:
2075 m = revset.matchany(None, specs, localalias=localalias)
2075 m = revset.matchany(None, specs, localalias=localalias)
2076 return m(self)
2076 return m(self)
2077
2077
2078 def url(self) -> bytes:
2078 def url(self) -> bytes:
2079 return b'file:' + self.root
2079 return b'file:' + self.root
2080
2080
2081 def hook(self, name, throw=False, **args):
2081 def hook(self, name, throw=False, **args):
2082 """Call a hook, passing this repo instance.
2082 """Call a hook, passing this repo instance.
2083
2083
2084 This a convenience method to aid invoking hooks. Extensions likely
2084 This a convenience method to aid invoking hooks. Extensions likely
2085 won't call this unless they have registered a custom hook or are
2085 won't call this unless they have registered a custom hook or are
2086 replacing code that is expected to call a hook.
2086 replacing code that is expected to call a hook.
2087 """
2087 """
2088 return hook.hook(self.ui, self, name, throw, **args)
2088 return hook.hook(self.ui, self, name, throw, **args)
2089
2089
2090 @filteredpropertycache
2090 @filteredpropertycache
2091 def _tagscache(self):
2091 def _tagscache(self):
2092 """Returns a tagscache object that contains various tags related
2092 """Returns a tagscache object that contains various tags related
2093 caches."""
2093 caches."""
2094
2094
2095 # This simplifies its cache management by having one decorated
2095 # This simplifies its cache management by having one decorated
2096 # function (this one) and the rest simply fetch things from it.
2096 # function (this one) and the rest simply fetch things from it.
2097 class tagscache:
2097 class tagscache:
2098 def __init__(self):
2098 def __init__(self):
2099 # These two define the set of tags for this repository. tags
2099 # These two define the set of tags for this repository. tags
2100 # maps tag name to node; tagtypes maps tag name to 'global' or
2100 # maps tag name to node; tagtypes maps tag name to 'global' or
2101 # 'local'. (Global tags are defined by .hgtags across all
2101 # 'local'. (Global tags are defined by .hgtags across all
2102 # heads, and local tags are defined in .hg/localtags.)
2102 # heads, and local tags are defined in .hg/localtags.)
2103 # They constitute the in-memory cache of tags.
2103 # They constitute the in-memory cache of tags.
2104 self.tags = self.tagtypes = None
2104 self.tags = self.tagtypes = None
2105
2105
2106 self.nodetagscache = self.tagslist = None
2106 self.nodetagscache = self.tagslist = None
2107
2107
2108 cache = tagscache()
2108 cache = tagscache()
2109 cache.tags, cache.tagtypes = self._findtags()
2109 cache.tags, cache.tagtypes = self._findtags()
2110
2110
2111 return cache
2111 return cache
2112
2112
2113 def tags(self):
2113 def tags(self):
2114 '''return a mapping of tag to node'''
2114 '''return a mapping of tag to node'''
2115 t = {}
2115 t = {}
2116 if self.changelog.filteredrevs:
2116 if self.changelog.filteredrevs:
2117 tags, tt = self._findtags()
2117 tags, tt = self._findtags()
2118 else:
2118 else:
2119 tags = self._tagscache.tags
2119 tags = self._tagscache.tags
2120 rev = self.changelog.rev
2120 rev = self.changelog.rev
2121 for k, v in tags.items():
2121 for k, v in tags.items():
2122 try:
2122 try:
2123 # ignore tags to unknown nodes
2123 # ignore tags to unknown nodes
2124 rev(v)
2124 rev(v)
2125 t[k] = v
2125 t[k] = v
2126 except (error.LookupError, ValueError):
2126 except (error.LookupError, ValueError):
2127 pass
2127 pass
2128 return t
2128 return t
2129
2129
2130 def _findtags(self):
2130 def _findtags(self):
2131 """Do the hard work of finding tags. Return a pair of dicts
2131 """Do the hard work of finding tags. Return a pair of dicts
2132 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2132 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2133 maps tag name to a string like \'global\' or \'local\'.
2133 maps tag name to a string like \'global\' or \'local\'.
2134 Subclasses or extensions are free to add their own tags, but
2134 Subclasses or extensions are free to add their own tags, but
2135 should be aware that the returned dicts will be retained for the
2135 should be aware that the returned dicts will be retained for the
2136 duration of the localrepo object."""
2136 duration of the localrepo object."""
2137
2137
2138 # XXX what tagtype should subclasses/extensions use? Currently
2138 # XXX what tagtype should subclasses/extensions use? Currently
2139 # mq and bookmarks add tags, but do not set the tagtype at all.
2139 # mq and bookmarks add tags, but do not set the tagtype at all.
2140 # Should each extension invent its own tag type? Should there
2140 # Should each extension invent its own tag type? Should there
2141 # be one tagtype for all such "virtual" tags? Or is the status
2141 # be one tagtype for all such "virtual" tags? Or is the status
2142 # quo fine?
2142 # quo fine?
2143
2143
2144 # map tag name to (node, hist)
2144 # map tag name to (node, hist)
2145 alltags = tagsmod.findglobaltags(self.ui, self)
2145 alltags = tagsmod.findglobaltags(self.ui, self)
2146 # map tag name to tag type
2146 # map tag name to tag type
2147 tagtypes = {tag: b'global' for tag in alltags}
2147 tagtypes = {tag: b'global' for tag in alltags}
2148
2148
2149 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2149 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2150
2150
2151 # Build the return dicts. Have to re-encode tag names because
2151 # Build the return dicts. Have to re-encode tag names because
2152 # the tags module always uses UTF-8 (in order not to lose info
2152 # the tags module always uses UTF-8 (in order not to lose info
2153 # writing to the cache), but the rest of Mercurial wants them in
2153 # writing to the cache), but the rest of Mercurial wants them in
2154 # local encoding.
2154 # local encoding.
2155 tags = {}
2155 tags = {}
2156 for name, (node, hist) in alltags.items():
2156 for name, (node, hist) in alltags.items():
2157 if node != self.nullid:
2157 if node != self.nullid:
2158 tags[encoding.tolocal(name)] = node
2158 tags[encoding.tolocal(name)] = node
2159 tags[b'tip'] = self.changelog.tip()
2159 tags[b'tip'] = self.changelog.tip()
2160 tagtypes = {
2160 tagtypes = {
2161 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2161 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2162 }
2162 }
2163 return (tags, tagtypes)
2163 return (tags, tagtypes)
2164
2164
2165 def tagtype(self, tagname):
2165 def tagtype(self, tagname):
2166 """
2166 """
2167 return the type of the given tag. result can be:
2167 return the type of the given tag. result can be:
2168
2168
2169 'local' : a local tag
2169 'local' : a local tag
2170 'global' : a global tag
2170 'global' : a global tag
2171 None : tag does not exist
2171 None : tag does not exist
2172 """
2172 """
2173
2173
2174 return self._tagscache.tagtypes.get(tagname)
2174 return self._tagscache.tagtypes.get(tagname)
2175
2175
2176 def tagslist(self):
2176 def tagslist(self):
2177 '''return a list of tags ordered by revision'''
2177 '''return a list of tags ordered by revision'''
2178 if not self._tagscache.tagslist:
2178 if not self._tagscache.tagslist:
2179 l = []
2179 l = []
2180 for t, n in self.tags().items():
2180 for t, n in self.tags().items():
2181 l.append((self.changelog.rev(n), t, n))
2181 l.append((self.changelog.rev(n), t, n))
2182 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2182 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2183
2183
2184 return self._tagscache.tagslist
2184 return self._tagscache.tagslist
2185
2185
2186 def nodetags(self, node):
2186 def nodetags(self, node):
2187 '''return the tags associated with a node'''
2187 '''return the tags associated with a node'''
2188 if not self._tagscache.nodetagscache:
2188 if not self._tagscache.nodetagscache:
2189 nodetagscache = {}
2189 nodetagscache = {}
2190 for t, n in self._tagscache.tags.items():
2190 for t, n in self._tagscache.tags.items():
2191 nodetagscache.setdefault(n, []).append(t)
2191 nodetagscache.setdefault(n, []).append(t)
2192 for tags in nodetagscache.values():
2192 for tags in nodetagscache.values():
2193 tags.sort()
2193 tags.sort()
2194 self._tagscache.nodetagscache = nodetagscache
2194 self._tagscache.nodetagscache = nodetagscache
2195 return self._tagscache.nodetagscache.get(node, [])
2195 return self._tagscache.nodetagscache.get(node, [])
2196
2196
2197 def nodebookmarks(self, node):
2197 def nodebookmarks(self, node):
2198 """return the list of bookmarks pointing to the specified node"""
2198 """return the list of bookmarks pointing to the specified node"""
2199 return self._bookmarks.names(node)
2199 return self._bookmarks.names(node)
2200
2200
2201 def branchmap(self):
2201 def branchmap(self):
2202 """returns a dictionary {branch: [branchheads]} with branchheads
2202 """returns a dictionary {branch: [branchheads]} with branchheads
2203 ordered by increasing revision number"""
2203 ordered by increasing revision number"""
2204 return self._branchcaches[self]
2204 return self._branchcaches[self]
2205
2205
2206 @unfilteredmethod
2206 @unfilteredmethod
2207 def revbranchcache(self):
2207 def revbranchcache(self):
2208 if not self._revbranchcache:
2208 if not self._revbranchcache:
2209 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2209 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2210 return self._revbranchcache
2210 return self._revbranchcache
2211
2211
2212 def register_changeset(self, rev, changelogrevision):
2212 def register_changeset(self, rev, changelogrevision):
2213 self.revbranchcache().setdata(rev, changelogrevision)
2213 self.revbranchcache().setdata(rev, changelogrevision)
2214
2214
2215 def branchtip(self, branch, ignoremissing=False):
2215 def branchtip(self, branch, ignoremissing=False):
2216 """return the tip node for a given branch
2216 """return the tip node for a given branch
2217
2217
2218 If ignoremissing is True, then this method will not raise an error.
2218 If ignoremissing is True, then this method will not raise an error.
2219 This is helpful for callers that only expect None for a missing branch
2219 This is helpful for callers that only expect None for a missing branch
2220 (e.g. namespace).
2220 (e.g. namespace).
2221
2221
2222 """
2222 """
2223 try:
2223 try:
2224 return self.branchmap().branchtip(branch)
2224 return self.branchmap().branchtip(branch)
2225 except KeyError:
2225 except KeyError:
2226 if not ignoremissing:
2226 if not ignoremissing:
2227 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2227 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2228 else:
2228 else:
2229 pass
2229 pass
2230
2230
2231 def lookup(self, key):
2231 def lookup(self, key):
2232 node = scmutil.revsymbol(self, key).node()
2232 node = scmutil.revsymbol(self, key).node()
2233 if node is None:
2233 if node is None:
2234 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2234 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2235 return node
2235 return node
2236
2236
2237 def lookupbranch(self, key):
2237 def lookupbranch(self, key):
2238 if self.branchmap().hasbranch(key):
2238 if self.branchmap().hasbranch(key):
2239 return key
2239 return key
2240
2240
2241 return scmutil.revsymbol(self, key).branch()
2241 return scmutil.revsymbol(self, key).branch()
2242
2242
2243 def known(self, nodes):
2243 def known(self, nodes):
2244 cl = self.changelog
2244 cl = self.changelog
2245 get_rev = cl.index.get_rev
2245 get_rev = cl.index.get_rev
2246 filtered = cl.filteredrevs
2246 filtered = cl.filteredrevs
2247 result = []
2247 result = []
2248 for n in nodes:
2248 for n in nodes:
2249 r = get_rev(n)
2249 r = get_rev(n)
2250 resp = not (r is None or r in filtered)
2250 resp = not (r is None or r in filtered)
2251 result.append(resp)
2251 result.append(resp)
2252 return result
2252 return result
2253
2253
2254 def local(self):
2254 def local(self):
2255 return self
2255 return self
2256
2256
2257 def publishing(self):
2257 def publishing(self):
2258 # it's safe (and desirable) to trust the publish flag unconditionally
2258 # it's safe (and desirable) to trust the publish flag unconditionally
2259 # so that we don't finalize changes shared between users via ssh or nfs
2259 # so that we don't finalize changes shared between users via ssh or nfs
2260 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2260 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2261
2261
2262 def cancopy(self):
2262 def cancopy(self):
2263 # so statichttprepo's override of local() works
2263 # so statichttprepo's override of local() works
2264 if not self.local():
2264 if not self.local():
2265 return False
2265 return False
2266 if not self.publishing():
2266 if not self.publishing():
2267 return True
2267 return True
2268 # if publishing we can't copy if there is filtered content
2268 # if publishing we can't copy if there is filtered content
2269 return not self.filtered(b'visible').changelog.filteredrevs
2269 return not self.filtered(b'visible').changelog.filteredrevs
2270
2270
2271 def shared(self):
2271 def shared(self):
2272 '''the type of shared repository (None if not shared)'''
2272 '''the type of shared repository (None if not shared)'''
2273 if self.sharedpath != self.path:
2273 if self.sharedpath != self.path:
2274 return b'store'
2274 return b'store'
2275 return None
2275 return None
2276
2276
2277 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2277 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2278 return self.vfs.reljoin(self.root, f, *insidef)
2278 return self.vfs.reljoin(self.root, f, *insidef)
2279
2279
2280 def setparents(self, p1, p2=None):
2280 def setparents(self, p1, p2=None):
2281 if p2 is None:
2281 if p2 is None:
2282 p2 = self.nullid
2282 p2 = self.nullid
2283 self[None].setparents(p1, p2)
2283 self[None].setparents(p1, p2)
2284 self._quick_access_changeid_invalidate()
2284 self._quick_access_changeid_invalidate()
2285
2285
2286 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2286 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2287 """changeid must be a changeset revision, if specified.
2287 """changeid must be a changeset revision, if specified.
2288 fileid can be a file revision or node."""
2288 fileid can be a file revision or node."""
2289 return context.filectx(
2289 return context.filectx(
2290 self, path, changeid, fileid, changectx=changectx
2290 self, path, changeid, fileid, changectx=changectx
2291 )
2291 )
2292
2292
2293 def getcwd(self) -> bytes:
2293 def getcwd(self) -> bytes:
2294 return self.dirstate.getcwd()
2294 return self.dirstate.getcwd()
2295
2295
2296 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2296 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2297 return self.dirstate.pathto(f, cwd)
2297 return self.dirstate.pathto(f, cwd)
2298
2298
2299 def _loadfilter(self, filter):
2299 def _loadfilter(self, filter):
2300 if filter not in self._filterpats:
2300 if filter not in self._filterpats:
2301 l = []
2301 l = []
2302 for pat, cmd in self.ui.configitems(filter):
2302 for pat, cmd in self.ui.configitems(filter):
2303 if cmd == b'!':
2303 if cmd == b'!':
2304 continue
2304 continue
2305 mf = matchmod.match(self.root, b'', [pat])
2305 mf = matchmod.match(self.root, b'', [pat])
2306 fn = None
2306 fn = None
2307 params = cmd
2307 params = cmd
2308 for name, filterfn in self._datafilters.items():
2308 for name, filterfn in self._datafilters.items():
2309 if cmd.startswith(name):
2309 if cmd.startswith(name):
2310 fn = filterfn
2310 fn = filterfn
2311 params = cmd[len(name) :].lstrip()
2311 params = cmd[len(name) :].lstrip()
2312 break
2312 break
2313 if not fn:
2313 if not fn:
2314 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2314 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2315 fn.__name__ = 'commandfilter'
2315 fn.__name__ = 'commandfilter'
2316 # Wrap old filters not supporting keyword arguments
2316 # Wrap old filters not supporting keyword arguments
2317 if not pycompat.getargspec(fn)[2]:
2317 if not pycompat.getargspec(fn)[2]:
2318 oldfn = fn
2318 oldfn = fn
2319 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2319 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2320 fn.__name__ = 'compat-' + oldfn.__name__
2320 fn.__name__ = 'compat-' + oldfn.__name__
2321 l.append((mf, fn, params))
2321 l.append((mf, fn, params))
2322 self._filterpats[filter] = l
2322 self._filterpats[filter] = l
2323 return self._filterpats[filter]
2323 return self._filterpats[filter]
2324
2324
2325 def _filter(self, filterpats, filename, data):
2325 def _filter(self, filterpats, filename, data):
2326 for mf, fn, cmd in filterpats:
2326 for mf, fn, cmd in filterpats:
2327 if mf(filename):
2327 if mf(filename):
2328 self.ui.debug(
2328 self.ui.debug(
2329 b"filtering %s through %s\n"
2329 b"filtering %s through %s\n"
2330 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2330 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2331 )
2331 )
2332 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2332 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2333 break
2333 break
2334
2334
2335 return data
2335 return data
2336
2336
2337 @unfilteredpropertycache
2337 @unfilteredpropertycache
2338 def _encodefilterpats(self):
2338 def _encodefilterpats(self):
2339 return self._loadfilter(b'encode')
2339 return self._loadfilter(b'encode')
2340
2340
2341 @unfilteredpropertycache
2341 @unfilteredpropertycache
2342 def _decodefilterpats(self):
2342 def _decodefilterpats(self):
2343 return self._loadfilter(b'decode')
2343 return self._loadfilter(b'decode')
2344
2344
2345 def adddatafilter(self, name, filter):
2345 def adddatafilter(self, name, filter):
2346 self._datafilters[name] = filter
2346 self._datafilters[name] = filter
2347
2347
2348 def wread(self, filename: bytes) -> bytes:
2348 def wread(self, filename: bytes) -> bytes:
2349 if self.wvfs.islink(filename):
2349 if self.wvfs.islink(filename):
2350 data = self.wvfs.readlink(filename)
2350 data = self.wvfs.readlink(filename)
2351 else:
2351 else:
2352 data = self.wvfs.read(filename)
2352 data = self.wvfs.read(filename)
2353 return self._filter(self._encodefilterpats, filename, data)
2353 return self._filter(self._encodefilterpats, filename, data)
2354
2354
2355 def wwrite(
2355 def wwrite(
2356 self,
2356 self,
2357 filename: bytes,
2357 filename: bytes,
2358 data: bytes,
2358 data: bytes,
2359 flags: bytes,
2359 flags: bytes,
2360 backgroundclose=False,
2360 backgroundclose=False,
2361 **kwargs
2361 **kwargs
2362 ) -> int:
2362 ) -> int:
2363 """write ``data`` into ``filename`` in the working directory
2363 """write ``data`` into ``filename`` in the working directory
2364
2364
2365 This returns length of written (maybe decoded) data.
2365 This returns length of written (maybe decoded) data.
2366 """
2366 """
2367 data = self._filter(self._decodefilterpats, filename, data)
2367 data = self._filter(self._decodefilterpats, filename, data)
2368 if b'l' in flags:
2368 if b'l' in flags:
2369 self.wvfs.symlink(data, filename)
2369 self.wvfs.symlink(data, filename)
2370 else:
2370 else:
2371 self.wvfs.write(
2371 self.wvfs.write(
2372 filename, data, backgroundclose=backgroundclose, **kwargs
2372 filename, data, backgroundclose=backgroundclose, **kwargs
2373 )
2373 )
2374 if b'x' in flags:
2374 if b'x' in flags:
2375 self.wvfs.setflags(filename, False, True)
2375 self.wvfs.setflags(filename, False, True)
2376 else:
2376 else:
2377 self.wvfs.setflags(filename, False, False)
2377 self.wvfs.setflags(filename, False, False)
2378 return len(data)
2378 return len(data)
2379
2379
2380 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2380 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2381 return self._filter(self._decodefilterpats, filename, data)
2381 return self._filter(self._decodefilterpats, filename, data)
2382
2382
2383 def currenttransaction(self):
2383 def currenttransaction(self):
2384 """return the current transaction or None if non exists"""
2384 """return the current transaction or None if non exists"""
2385 if self._transref:
2385 if self._transref:
2386 tr = self._transref()
2386 tr = self._transref()
2387 else:
2387 else:
2388 tr = None
2388 tr = None
2389
2389
2390 if tr and tr.running():
2390 if tr and tr.running():
2391 return tr
2391 return tr
2392 return None
2392 return None
2393
2393
2394 def transaction(self, desc, report=None):
2394 def transaction(self, desc, report=None):
2395 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2395 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2396 b'devel', b'check-locks'
2396 b'devel', b'check-locks'
2397 ):
2397 ):
2398 if self._currentlock(self._lockref) is None:
2398 if self._currentlock(self._lockref) is None:
2399 raise error.ProgrammingError(b'transaction requires locking')
2399 raise error.ProgrammingError(b'transaction requires locking')
2400 tr = self.currenttransaction()
2400 tr = self.currenttransaction()
2401 if tr is not None:
2401 if tr is not None:
2402 return tr.nest(name=desc)
2402 return tr.nest(name=desc)
2403
2403
2404 # abort here if the journal already exists
2404 # abort here if the journal already exists
2405 if self.svfs.exists(b"journal"):
2405 if self.svfs.exists(b"journal"):
2406 raise error.RepoError(
2406 raise error.RepoError(
2407 _(b"abandoned transaction found"),
2407 _(b"abandoned transaction found"),
2408 hint=_(b"run 'hg recover' to clean up transaction"),
2408 hint=_(b"run 'hg recover' to clean up transaction"),
2409 )
2409 )
2410
2410
2411 # At that point your dirstate should be clean:
2411 # At that point your dirstate should be clean:
2412 #
2412 #
2413 # - If you don't have the wlock, why would you still have a dirty
2413 # - If you don't have the wlock, why would you still have a dirty
2414 # dirstate ?
2414 # dirstate ?
2415 #
2415 #
2416 # - If you hold the wlock, you should not be opening a transaction in
2416 # - If you hold the wlock, you should not be opening a transaction in
2417 # the middle of a `distate.changing_*` block. The transaction needs to
2417 # the middle of a `distate.changing_*` block. The transaction needs to
2418 # be open before that and wrap the change-context.
2418 # be open before that and wrap the change-context.
2419 #
2419 #
2420 # - If you are not within a `dirstate.changing_*` context, why is our
2420 # - If you are not within a `dirstate.changing_*` context, why is our
2421 # dirstate dirty?
2421 # dirstate dirty?
2422 if self.dirstate._dirty:
2422 if self.dirstate._dirty:
2423 m = "cannot open a transaction with a dirty dirstate"
2423 m = "cannot open a transaction with a dirty dirstate"
2424 raise error.ProgrammingError(m)
2424 raise error.ProgrammingError(m)
2425
2425
2426 idbase = b"%.40f#%f" % (random.random(), time.time())
2426 idbase = b"%.40f#%f" % (random.random(), time.time())
2427 ha = hex(hashutil.sha1(idbase).digest())
2427 ha = hex(hashutil.sha1(idbase).digest())
2428 txnid = b'TXN:' + ha
2428 txnid = b'TXN:' + ha
2429 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2429 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2430
2430
2431 self._writejournal(desc)
2431 self._writejournal(desc)
2432 if report:
2432 if report:
2433 rp = report
2433 rp = report
2434 else:
2434 else:
2435 rp = self.ui.warn
2435 rp = self.ui.warn
2436 vfsmap = self.vfs_map
2436 vfsmap = self.vfs_map
2437 # we must avoid cyclic reference between repo and transaction.
2437 # we must avoid cyclic reference between repo and transaction.
2438 reporef = weakref.ref(self)
2438 reporef = weakref.ref(self)
2439 # Code to track tag movement
2439 # Code to track tag movement
2440 #
2440 #
2441 # Since tags are all handled as file content, it is actually quite hard
2441 # Since tags are all handled as file content, it is actually quite hard
2442 # to track these movement from a code perspective. So we fallback to a
2442 # to track these movement from a code perspective. So we fallback to a
2443 # tracking at the repository level. One could envision to track changes
2443 # tracking at the repository level. One could envision to track changes
2444 # to the '.hgtags' file through changegroup apply but that fails to
2444 # to the '.hgtags' file through changegroup apply but that fails to
2445 # cope with case where transaction expose new heads without changegroup
2445 # cope with case where transaction expose new heads without changegroup
2446 # being involved (eg: phase movement).
2446 # being involved (eg: phase movement).
2447 #
2447 #
2448 # For now, We gate the feature behind a flag since this likely comes
2448 # For now, We gate the feature behind a flag since this likely comes
2449 # with performance impacts. The current code run more often than needed
2449 # with performance impacts. The current code run more often than needed
2450 # and do not use caches as much as it could. The current focus is on
2450 # and do not use caches as much as it could. The current focus is on
2451 # the behavior of the feature so we disable it by default. The flag
2451 # the behavior of the feature so we disable it by default. The flag
2452 # will be removed when we are happy with the performance impact.
2452 # will be removed when we are happy with the performance impact.
2453 #
2453 #
2454 # Once this feature is no longer experimental move the following
2454 # Once this feature is no longer experimental move the following
2455 # documentation to the appropriate help section:
2455 # documentation to the appropriate help section:
2456 #
2456 #
2457 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2457 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2458 # tags (new or changed or deleted tags). In addition the details of
2458 # tags (new or changed or deleted tags). In addition the details of
2459 # these changes are made available in a file at:
2459 # these changes are made available in a file at:
2460 # ``REPOROOT/.hg/changes/tags.changes``.
2460 # ``REPOROOT/.hg/changes/tags.changes``.
2461 # Make sure you check for HG_TAG_MOVED before reading that file as it
2461 # Make sure you check for HG_TAG_MOVED before reading that file as it
2462 # might exist from a previous transaction even if no tag were touched
2462 # might exist from a previous transaction even if no tag were touched
2463 # in this one. Changes are recorded in a line base format::
2463 # in this one. Changes are recorded in a line base format::
2464 #
2464 #
2465 # <action> <hex-node> <tag-name>\n
2465 # <action> <hex-node> <tag-name>\n
2466 #
2466 #
2467 # Actions are defined as follow:
2467 # Actions are defined as follow:
2468 # "-R": tag is removed,
2468 # "-R": tag is removed,
2469 # "+A": tag is added,
2469 # "+A": tag is added,
2470 # "-M": tag is moved (old value),
2470 # "-M": tag is moved (old value),
2471 # "+M": tag is moved (new value),
2471 # "+M": tag is moved (new value),
2472 tracktags = lambda x: None
2472 tracktags = lambda x: None
2473 # experimental config: experimental.hook-track-tags
2473 # experimental config: experimental.hook-track-tags
2474 shouldtracktags = self.ui.configbool(
2474 shouldtracktags = self.ui.configbool(
2475 b'experimental', b'hook-track-tags'
2475 b'experimental', b'hook-track-tags'
2476 )
2476 )
2477 if desc != b'strip' and shouldtracktags:
2477 if desc != b'strip' and shouldtracktags:
2478 oldheads = self.changelog.headrevs()
2478 oldheads = self.changelog.headrevs()
2479
2479
2480 def tracktags(tr2):
2480 def tracktags(tr2):
2481 repo = reporef()
2481 repo = reporef()
2482 assert repo is not None # help pytype
2482 assert repo is not None # help pytype
2483 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2483 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2484 newheads = repo.changelog.headrevs()
2484 newheads = repo.changelog.headrevs()
2485 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2485 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2486 # notes: we compare lists here.
2486 # notes: we compare lists here.
2487 # As we do it only once buiding set would not be cheaper
2487 # As we do it only once buiding set would not be cheaper
2488 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2488 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2489 if changes:
2489 if changes:
2490 tr2.hookargs[b'tag_moved'] = b'1'
2490 tr2.hookargs[b'tag_moved'] = b'1'
2491 with repo.vfs(
2491 with repo.vfs(
2492 b'changes/tags.changes', b'w', atomictemp=True
2492 b'changes/tags.changes', b'w', atomictemp=True
2493 ) as changesfile:
2493 ) as changesfile:
2494 # note: we do not register the file to the transaction
2494 # note: we do not register the file to the transaction
2495 # because we needs it to still exist on the transaction
2495 # because we needs it to still exist on the transaction
2496 # is close (for txnclose hooks)
2496 # is close (for txnclose hooks)
2497 tagsmod.writediff(changesfile, changes)
2497 tagsmod.writediff(changesfile, changes)
2498
2498
2499 def validate(tr2):
2499 def validate(tr2):
2500 """will run pre-closing hooks"""
2500 """will run pre-closing hooks"""
2501 # XXX the transaction API is a bit lacking here so we take a hacky
2501 # XXX the transaction API is a bit lacking here so we take a hacky
2502 # path for now
2502 # path for now
2503 #
2503 #
2504 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2504 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2505 # dict is copied before these run. In addition we needs the data
2505 # dict is copied before these run. In addition we needs the data
2506 # available to in memory hooks too.
2506 # available to in memory hooks too.
2507 #
2507 #
2508 # Moreover, we also need to make sure this runs before txnclose
2508 # Moreover, we also need to make sure this runs before txnclose
2509 # hooks and there is no "pending" mechanism that would execute
2509 # hooks and there is no "pending" mechanism that would execute
2510 # logic only if hooks are about to run.
2510 # logic only if hooks are about to run.
2511 #
2511 #
2512 # Fixing this limitation of the transaction is also needed to track
2512 # Fixing this limitation of the transaction is also needed to track
2513 # other families of changes (bookmarks, phases, obsolescence).
2513 # other families of changes (bookmarks, phases, obsolescence).
2514 #
2514 #
2515 # This will have to be fixed before we remove the experimental
2515 # This will have to be fixed before we remove the experimental
2516 # gating.
2516 # gating.
2517 tracktags(tr2)
2517 tracktags(tr2)
2518 repo = reporef()
2518 repo = reporef()
2519 assert repo is not None # help pytype
2519 assert repo is not None # help pytype
2520
2520
2521 singleheadopt = (b'experimental', b'single-head-per-branch')
2521 singleheadopt = (b'experimental', b'single-head-per-branch')
2522 singlehead = repo.ui.configbool(*singleheadopt)
2522 singlehead = repo.ui.configbool(*singleheadopt)
2523 if singlehead:
2523 if singlehead:
2524 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2524 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2525 accountclosed = singleheadsub.get(
2525 accountclosed = singleheadsub.get(
2526 b"account-closed-heads", False
2526 b"account-closed-heads", False
2527 )
2527 )
2528 if singleheadsub.get(b"public-changes-only", False):
2528 if singleheadsub.get(b"public-changes-only", False):
2529 filtername = b"immutable"
2529 filtername = b"immutable"
2530 else:
2530 else:
2531 filtername = b"visible"
2531 filtername = b"visible"
2532 scmutil.enforcesinglehead(
2532 scmutil.enforcesinglehead(
2533 repo, tr2, desc, accountclosed, filtername
2533 repo, tr2, desc, accountclosed, filtername
2534 )
2534 )
2535 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2535 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2536 for name, (old, new) in sorted(
2536 for name, (old, new) in sorted(
2537 tr.changes[b'bookmarks'].items()
2537 tr.changes[b'bookmarks'].items()
2538 ):
2538 ):
2539 args = tr.hookargs.copy()
2539 args = tr.hookargs.copy()
2540 args.update(bookmarks.preparehookargs(name, old, new))
2540 args.update(bookmarks.preparehookargs(name, old, new))
2541 repo.hook(
2541 repo.hook(
2542 b'pretxnclose-bookmark',
2542 b'pretxnclose-bookmark',
2543 throw=True,
2543 throw=True,
2544 **pycompat.strkwargs(args)
2544 **pycompat.strkwargs(args)
2545 )
2545 )
2546 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2546 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2547 cl = repo.unfiltered().changelog
2547 cl = repo.unfiltered().changelog
2548 for revs, (old, new) in tr.changes[b'phases']:
2548 for revs, (old, new) in tr.changes[b'phases']:
2549 for rev in revs:
2549 for rev in revs:
2550 args = tr.hookargs.copy()
2550 args = tr.hookargs.copy()
2551 node = hex(cl.node(rev))
2551 node = hex(cl.node(rev))
2552 args.update(phases.preparehookargs(node, old, new))
2552 args.update(phases.preparehookargs(node, old, new))
2553 repo.hook(
2553 repo.hook(
2554 b'pretxnclose-phase',
2554 b'pretxnclose-phase',
2555 throw=True,
2555 throw=True,
2556 **pycompat.strkwargs(args)
2556 **pycompat.strkwargs(args)
2557 )
2557 )
2558
2558
2559 repo.hook(
2559 repo.hook(
2560 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2560 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2561 )
2561 )
2562
2562
2563 def releasefn(tr, success):
2563 def releasefn(tr, success):
2564 repo = reporef()
2564 repo = reporef()
2565 if repo is None:
2565 if repo is None:
2566 # If the repo has been GC'd (and this release function is being
2566 # If the repo has been GC'd (and this release function is being
2567 # called from transaction.__del__), there's not much we can do,
2567 # called from transaction.__del__), there's not much we can do,
2568 # so just leave the unfinished transaction there and let the
2568 # so just leave the unfinished transaction there and let the
2569 # user run `hg recover`.
2569 # user run `hg recover`.
2570 return
2570 return
2571 if success:
2571 if success:
2572 # this should be explicitly invoked here, because
2572 # this should be explicitly invoked here, because
2573 # in-memory changes aren't written out at closing
2573 # in-memory changes aren't written out at closing
2574 # transaction, if tr.addfilegenerator (via
2574 # transaction, if tr.addfilegenerator (via
2575 # dirstate.write or so) isn't invoked while
2575 # dirstate.write or so) isn't invoked while
2576 # transaction running
2576 # transaction running
2577 repo.dirstate.write(None)
2577 repo.dirstate.write(None)
2578 else:
2578 else:
2579 # discard all changes (including ones already written
2579 # discard all changes (including ones already written
2580 # out) in this transaction
2580 # out) in this transaction
2581 repo.invalidate(clearfilecache=True)
2581 repo.invalidate(clearfilecache=True)
2582
2582
2583 tr = transaction.transaction(
2583 tr = transaction.transaction(
2584 rp,
2584 rp,
2585 self.svfs,
2585 self.svfs,
2586 vfsmap,
2586 vfsmap,
2587 b"journal",
2587 b"journal",
2588 b"undo",
2588 b"undo",
2589 lambda: None,
2589 lambda: None,
2590 self.store.createmode,
2590 self.store.createmode,
2591 validator=validate,
2591 validator=validate,
2592 releasefn=releasefn,
2592 releasefn=releasefn,
2593 checkambigfiles=_cachedfiles,
2593 checkambigfiles=_cachedfiles,
2594 name=desc,
2594 name=desc,
2595 )
2595 )
2596 for vfs_id, path in self._journalfiles():
2596 for vfs_id, path in self._journalfiles():
2597 tr.add_journal(vfs_id, path)
2597 tr.add_journal(vfs_id, path)
2598 tr.changes[b'origrepolen'] = len(self)
2598 tr.changes[b'origrepolen'] = len(self)
2599 tr.changes[b'obsmarkers'] = set()
2599 tr.changes[b'obsmarkers'] = set()
2600 tr.changes[b'phases'] = []
2600 tr.changes[b'phases'] = []
2601 tr.changes[b'bookmarks'] = {}
2601 tr.changes[b'bookmarks'] = {}
2602
2602
2603 tr.hookargs[b'txnid'] = txnid
2603 tr.hookargs[b'txnid'] = txnid
2604 tr.hookargs[b'txnname'] = desc
2604 tr.hookargs[b'txnname'] = desc
2605 tr.hookargs[b'changes'] = tr.changes
2605 tr.hookargs[b'changes'] = tr.changes
2606 # note: writing the fncache only during finalize mean that the file is
2606 # note: writing the fncache only during finalize mean that the file is
2607 # outdated when running hooks. As fncache is used for streaming clone,
2607 # outdated when running hooks. As fncache is used for streaming clone,
2608 # this is not expected to break anything that happen during the hooks.
2608 # this is not expected to break anything that happen during the hooks.
2609 tr.addfinalize(b'flush-fncache', self.store.write)
2609 tr.addfinalize(b'flush-fncache', self.store.write)
2610
2610
2611 def txnclosehook(tr2):
2611 def txnclosehook(tr2):
2612 """To be run if transaction is successful, will schedule a hook run"""
2612 """To be run if transaction is successful, will schedule a hook run"""
2613 # Don't reference tr2 in hook() so we don't hold a reference.
2613 # Don't reference tr2 in hook() so we don't hold a reference.
2614 # This reduces memory consumption when there are multiple
2614 # This reduces memory consumption when there are multiple
2615 # transactions per lock. This can likely go away if issue5045
2615 # transactions per lock. This can likely go away if issue5045
2616 # fixes the function accumulation.
2616 # fixes the function accumulation.
2617 hookargs = tr2.hookargs
2617 hookargs = tr2.hookargs
2618
2618
2619 def hookfunc(unused_success):
2619 def hookfunc(unused_success):
2620 repo = reporef()
2620 repo = reporef()
2621 assert repo is not None # help pytype
2621 assert repo is not None # help pytype
2622
2622
2623 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2623 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2624 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2624 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2625 for name, (old, new) in bmchanges:
2625 for name, (old, new) in bmchanges:
2626 args = tr.hookargs.copy()
2626 args = tr.hookargs.copy()
2627 args.update(bookmarks.preparehookargs(name, old, new))
2627 args.update(bookmarks.preparehookargs(name, old, new))
2628 repo.hook(
2628 repo.hook(
2629 b'txnclose-bookmark',
2629 b'txnclose-bookmark',
2630 throw=False,
2630 throw=False,
2631 **pycompat.strkwargs(args)
2631 **pycompat.strkwargs(args)
2632 )
2632 )
2633
2633
2634 if hook.hashook(repo.ui, b'txnclose-phase'):
2634 if hook.hashook(repo.ui, b'txnclose-phase'):
2635 cl = repo.unfiltered().changelog
2635 cl = repo.unfiltered().changelog
2636 phasemv = sorted(
2636 phasemv = sorted(
2637 tr.changes[b'phases'], key=lambda r: r[0][0]
2637 tr.changes[b'phases'], key=lambda r: r[0][0]
2638 )
2638 )
2639 for revs, (old, new) in phasemv:
2639 for revs, (old, new) in phasemv:
2640 for rev in revs:
2640 for rev in revs:
2641 args = tr.hookargs.copy()
2641 args = tr.hookargs.copy()
2642 node = hex(cl.node(rev))
2642 node = hex(cl.node(rev))
2643 args.update(phases.preparehookargs(node, old, new))
2643 args.update(phases.preparehookargs(node, old, new))
2644 repo.hook(
2644 repo.hook(
2645 b'txnclose-phase',
2645 b'txnclose-phase',
2646 throw=False,
2646 throw=False,
2647 **pycompat.strkwargs(args)
2647 **pycompat.strkwargs(args)
2648 )
2648 )
2649
2649
2650 repo.hook(
2650 repo.hook(
2651 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2651 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2652 )
2652 )
2653
2653
2654 repo = reporef()
2654 repo = reporef()
2655 assert repo is not None # help pytype
2655 assert repo is not None # help pytype
2656 repo._afterlock(hookfunc)
2656 repo._afterlock(hookfunc)
2657
2657
2658 tr.addfinalize(b'txnclose-hook', txnclosehook)
2658 tr.addfinalize(b'txnclose-hook', txnclosehook)
2659 # Include a leading "-" to make it happen before the transaction summary
2659 # Include a leading "-" to make it happen before the transaction summary
2660 # reports registered via scmutil.registersummarycallback() whose names
2660 # reports registered via scmutil.registersummarycallback() whose names
2661 # are 00-txnreport etc. That way, the caches will be warm when the
2661 # are 00-txnreport etc. That way, the caches will be warm when the
2662 # callbacks run.
2662 # callbacks run.
2663 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2663 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2664
2664
2665 def txnaborthook(tr2):
2665 def txnaborthook(tr2):
2666 """To be run if transaction is aborted"""
2666 """To be run if transaction is aborted"""
2667 repo = reporef()
2667 repo = reporef()
2668 assert repo is not None # help pytype
2668 assert repo is not None # help pytype
2669 repo.hook(
2669 repo.hook(
2670 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2670 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2671 )
2671 )
2672
2672
2673 tr.addabort(b'txnabort-hook', txnaborthook)
2673 tr.addabort(b'txnabort-hook', txnaborthook)
2674 # avoid eager cache invalidation. in-memory data should be identical
2674 # avoid eager cache invalidation. in-memory data should be identical
2675 # to stored data if transaction has no error.
2675 # to stored data if transaction has no error.
2676 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2676 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2677 self._transref = weakref.ref(tr)
2677 self._transref = weakref.ref(tr)
2678 scmutil.registersummarycallback(self, tr, desc)
2678 scmutil.registersummarycallback(self, tr, desc)
2679 # This only exist to deal with the need of rollback to have viable
2679 # This only exist to deal with the need of rollback to have viable
2680 # parents at the end of the operation. So backup viable parents at the
2680 # parents at the end of the operation. So backup viable parents at the
2681 # time of this operation.
2681 # time of this operation.
2682 #
2682 #
2683 # We only do it when the `wlock` is taken, otherwise other might be
2683 # We only do it when the `wlock` is taken, otherwise other might be
2684 # altering the dirstate under us.
2684 # altering the dirstate under us.
2685 #
2685 #
2686 # This is really not a great way to do this (first, because we cannot
2686 # This is really not a great way to do this (first, because we cannot
2687 # always do it). There are more viable alternative that exists
2687 # always do it). There are more viable alternative that exists
2688 #
2688 #
2689 # - backing only the working copy parent in a dedicated files and doing
2689 # - backing only the working copy parent in a dedicated files and doing
2690 # a clean "keep-update" to them on `hg rollback`.
2690 # a clean "keep-update" to them on `hg rollback`.
2691 #
2691 #
2692 # - slightly changing the behavior an applying a logic similar to "hg
2692 # - slightly changing the behavior an applying a logic similar to "hg
2693 # strip" to pick a working copy destination on `hg rollback`
2693 # strip" to pick a working copy destination on `hg rollback`
2694 if self.currentwlock() is not None:
2694 if self.currentwlock() is not None:
2695 ds = self.dirstate
2695 ds = self.dirstate
2696 if not self.vfs.exists(b'branch'):
2696 if not self.vfs.exists(b'branch'):
2697 # force a file to be written if None exist
2697 # force a file to be written if None exist
2698 ds.setbranch(b'default', None)
2698 ds.setbranch(b'default', None)
2699
2699
2700 def backup_dirstate(tr):
2700 def backup_dirstate(tr):
2701 for f in ds.all_file_names():
2701 for f in ds.all_file_names():
2702 # hardlink backup is okay because `dirstate` is always
2702 # hardlink backup is okay because `dirstate` is always
2703 # atomically written and possible data file are append only
2703 # atomically written and possible data file are append only
2704 # and resistant to trailing data.
2704 # and resistant to trailing data.
2705 tr.addbackup(f, hardlink=True, location=b'plain')
2705 tr.addbackup(f, hardlink=True, location=b'plain')
2706
2706
2707 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2707 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2708 return tr
2708 return tr
2709
2709
2710 def _journalfiles(self):
2710 def _journalfiles(self):
2711 return (
2711 return (
2712 (self.svfs, b'journal'),
2712 (self.svfs, b'journal'),
2713 (self.vfs, b'journal.desc'),
2713 (self.vfs, b'journal.desc'),
2714 )
2714 )
2715
2715
2716 def undofiles(self):
2716 def undofiles(self):
2717 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2717 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2718
2718
2719 @unfilteredmethod
2719 @unfilteredmethod
2720 def _writejournal(self, desc):
2720 def _writejournal(self, desc):
2721 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2721 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2722
2722
2723 def recover(self):
2723 def recover(self):
2724 with self.lock():
2724 with self.lock():
2725 if self.svfs.exists(b"journal"):
2725 if self.svfs.exists(b"journal"):
2726 self.ui.status(_(b"rolling back interrupted transaction\n"))
2726 self.ui.status(_(b"rolling back interrupted transaction\n"))
2727 vfsmap = self.vfs_map
2727 vfsmap = self.vfs_map
2728 transaction.rollback(
2728 transaction.rollback(
2729 self.svfs,
2729 self.svfs,
2730 vfsmap,
2730 vfsmap,
2731 b"journal",
2731 b"journal",
2732 self.ui.warn,
2732 self.ui.warn,
2733 checkambigfiles=_cachedfiles,
2733 checkambigfiles=_cachedfiles,
2734 )
2734 )
2735 self.invalidate()
2735 self.invalidate()
2736 return True
2736 return True
2737 else:
2737 else:
2738 self.ui.warn(_(b"no interrupted transaction available\n"))
2738 self.ui.warn(_(b"no interrupted transaction available\n"))
2739 return False
2739 return False
2740
2740
2741 def rollback(self, dryrun=False, force=False):
2741 def rollback(self, dryrun=False, force=False):
2742 wlock = lock = None
2742 wlock = lock = None
2743 try:
2743 try:
2744 wlock = self.wlock()
2744 wlock = self.wlock()
2745 lock = self.lock()
2745 lock = self.lock()
2746 if self.svfs.exists(b"undo"):
2746 if self.svfs.exists(b"undo"):
2747 return self._rollback(dryrun, force)
2747 return self._rollback(dryrun, force)
2748 else:
2748 else:
2749 self.ui.warn(_(b"no rollback information available\n"))
2749 self.ui.warn(_(b"no rollback information available\n"))
2750 return 1
2750 return 1
2751 finally:
2751 finally:
2752 release(lock, wlock)
2752 release(lock, wlock)
2753
2753
2754 @unfilteredmethod # Until we get smarter cache management
2754 @unfilteredmethod # Until we get smarter cache management
2755 def _rollback(self, dryrun, force):
2755 def _rollback(self, dryrun, force):
2756 ui = self.ui
2756 ui = self.ui
2757
2757
2758 parents = self.dirstate.parents()
2758 parents = self.dirstate.parents()
2759 try:
2759 try:
2760 args = self.vfs.read(b'undo.desc').splitlines()
2760 args = self.vfs.read(b'undo.desc').splitlines()
2761 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2761 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2762 if len(args) >= 3:
2762 if len(args) >= 3:
2763 detail = args[2]
2763 detail = args[2]
2764 oldtip = oldlen - 1
2764 oldtip = oldlen - 1
2765
2765
2766 if detail and ui.verbose:
2766 if detail and ui.verbose:
2767 msg = _(
2767 msg = _(
2768 b'repository tip rolled back to revision %d'
2768 b'repository tip rolled back to revision %d'
2769 b' (undo %s: %s)\n'
2769 b' (undo %s: %s)\n'
2770 ) % (oldtip, desc, detail)
2770 ) % (oldtip, desc, detail)
2771 else:
2771 else:
2772 msg = _(
2772 msg = _(
2773 b'repository tip rolled back to revision %d (undo %s)\n'
2773 b'repository tip rolled back to revision %d (undo %s)\n'
2774 ) % (oldtip, desc)
2774 ) % (oldtip, desc)
2775 parentgone = any(self[p].rev() > oldtip for p in parents)
2775 parentgone = any(self[p].rev() > oldtip for p in parents)
2776 except IOError:
2776 except IOError:
2777 msg = _(b'rolling back unknown transaction\n')
2777 msg = _(b'rolling back unknown transaction\n')
2778 desc = None
2778 desc = None
2779 parentgone = True
2779 parentgone = True
2780
2780
2781 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2781 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2782 raise error.Abort(
2782 raise error.Abort(
2783 _(
2783 _(
2784 b'rollback of last commit while not checked out '
2784 b'rollback of last commit while not checked out '
2785 b'may lose data'
2785 b'may lose data'
2786 ),
2786 ),
2787 hint=_(b'use -f to force'),
2787 hint=_(b'use -f to force'),
2788 )
2788 )
2789
2789
2790 ui.status(msg)
2790 ui.status(msg)
2791 if dryrun:
2791 if dryrun:
2792 return 0
2792 return 0
2793
2793
2794 self.destroying()
2794 self.destroying()
2795 vfsmap = self.vfs_map
2795 vfsmap = self.vfs_map
2796 skip_journal_pattern = None
2796 skip_journal_pattern = None
2797 if not parentgone:
2797 if not parentgone:
2798 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2798 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2799 transaction.rollback(
2799 transaction.rollback(
2800 self.svfs,
2800 self.svfs,
2801 vfsmap,
2801 vfsmap,
2802 b'undo',
2802 b'undo',
2803 ui.warn,
2803 ui.warn,
2804 checkambigfiles=_cachedfiles,
2804 checkambigfiles=_cachedfiles,
2805 skip_journal_pattern=skip_journal_pattern,
2805 skip_journal_pattern=skip_journal_pattern,
2806 )
2806 )
2807 self.invalidate()
2807 self.invalidate()
2808 self.dirstate.invalidate()
2808 self.dirstate.invalidate()
2809
2809
2810 if parentgone:
2810 if parentgone:
2811 # replace this with some explicit parent update in the future.
2811 # replace this with some explicit parent update in the future.
2812 has_node = self.changelog.index.has_node
2812 has_node = self.changelog.index.has_node
2813 if not all(has_node(p) for p in self.dirstate._pl):
2813 if not all(has_node(p) for p in self.dirstate._pl):
2814 # There was no dirstate to backup initially, we need to drop
2814 # There was no dirstate to backup initially, we need to drop
2815 # the existing one.
2815 # the existing one.
2816 with self.dirstate.changing_parents(self):
2816 with self.dirstate.changing_parents(self):
2817 self.dirstate.setparents(self.nullid)
2817 self.dirstate.setparents(self.nullid)
2818 self.dirstate.clear()
2818 self.dirstate.clear()
2819
2819
2820 parents = tuple([p.rev() for p in self[None].parents()])
2820 parents = tuple([p.rev() for p in self[None].parents()])
2821 if len(parents) > 1:
2821 if len(parents) > 1:
2822 ui.status(
2822 ui.status(
2823 _(
2823 _(
2824 b'working directory now based on '
2824 b'working directory now based on '
2825 b'revisions %d and %d\n'
2825 b'revisions %d and %d\n'
2826 )
2826 )
2827 % parents
2827 % parents
2828 )
2828 )
2829 else:
2829 else:
2830 ui.status(
2830 ui.status(
2831 _(b'working directory now based on revision %d\n') % parents
2831 _(b'working directory now based on revision %d\n') % parents
2832 )
2832 )
2833 mergestatemod.mergestate.clean(self)
2833 mergestatemod.mergestate.clean(self)
2834
2834
2835 # TODO: if we know which new heads may result from this rollback, pass
2835 # TODO: if we know which new heads may result from this rollback, pass
2836 # them to destroy(), which will prevent the branchhead cache from being
2836 # them to destroy(), which will prevent the branchhead cache from being
2837 # invalidated.
2837 # invalidated.
2838 self.destroyed()
2838 self.destroyed()
2839 return 0
2839 return 0
2840
2840
2841 def _buildcacheupdater(self, newtransaction):
2841 def _buildcacheupdater(self, newtransaction):
2842 """called during transaction to build the callback updating cache
2842 """called during transaction to build the callback updating cache
2843
2843
2844 Lives on the repository to help extension who might want to augment
2844 Lives on the repository to help extension who might want to augment
2845 this logic. For this purpose, the created transaction is passed to the
2845 this logic. For this purpose, the created transaction is passed to the
2846 method.
2846 method.
2847 """
2847 """
2848 # we must avoid cyclic reference between repo and transaction.
2848 # we must avoid cyclic reference between repo and transaction.
2849 reporef = weakref.ref(self)
2849 reporef = weakref.ref(self)
2850
2850
2851 def updater(tr):
2851 def updater(tr):
2852 repo = reporef()
2852 repo = reporef()
2853 assert repo is not None # help pytype
2853 assert repo is not None # help pytype
2854 repo.updatecaches(tr)
2854 repo.updatecaches(tr)
2855
2855
2856 return updater
2856 return updater
2857
2857
2858 @unfilteredmethod
2858 @unfilteredmethod
2859 def updatecaches(self, tr=None, full=False, caches=None):
2859 def updatecaches(self, tr=None, full=False, caches=None):
2860 """warm appropriate caches
2860 """warm appropriate caches
2861
2861
2862 If this function is called after a transaction closed. The transaction
2862 If this function is called after a transaction closed. The transaction
2863 will be available in the 'tr' argument. This can be used to selectively
2863 will be available in the 'tr' argument. This can be used to selectively
2864 update caches relevant to the changes in that transaction.
2864 update caches relevant to the changes in that transaction.
2865
2865
2866 If 'full' is set, make sure all caches the function knows about have
2866 If 'full' is set, make sure all caches the function knows about have
2867 up-to-date data. Even the ones usually loaded more lazily.
2867 up-to-date data. Even the ones usually loaded more lazily.
2868
2868
2869 The `full` argument can take a special "post-clone" value. In this case
2869 The `full` argument can take a special "post-clone" value. In this case
2870 the cache warming is made after a clone and of the slower cache might
2870 the cache warming is made after a clone and of the slower cache might
2871 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2871 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2872 as we plan for a cleaner way to deal with this for 5.9.
2872 as we plan for a cleaner way to deal with this for 5.9.
2873 """
2873 """
2874 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2874 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2875 # During strip, many caches are invalid but
2875 # During strip, many caches are invalid but
2876 # later call to `destroyed` will refresh them.
2876 # later call to `destroyed` will refresh them.
2877 return
2877 return
2878
2878
2879 unfi = self.unfiltered()
2879 unfi = self.unfiltered()
2880
2880
2881 if full:
2881 if full:
2882 msg = (
2882 msg = (
2883 "`full` argument for `repo.updatecaches` is deprecated\n"
2883 "`full` argument for `repo.updatecaches` is deprecated\n"
2884 "(use `caches=repository.CACHE_ALL` instead)"
2884 "(use `caches=repository.CACHE_ALL` instead)"
2885 )
2885 )
2886 self.ui.deprecwarn(msg, b"5.9")
2886 self.ui.deprecwarn(msg, b"5.9")
2887 caches = repository.CACHES_ALL
2887 caches = repository.CACHES_ALL
2888 if full == b"post-clone":
2888 if full == b"post-clone":
2889 caches = repository.CACHES_POST_CLONE
2889 caches = repository.CACHES_POST_CLONE
2890 caches = repository.CACHES_ALL
2890 caches = repository.CACHES_ALL
2891 elif caches is None:
2891 elif caches is None:
2892 caches = repository.CACHES_DEFAULT
2892 caches = repository.CACHES_DEFAULT
2893
2893
2894 if repository.CACHE_BRANCHMAP_SERVED in caches:
2894 if repository.CACHE_BRANCHMAP_SERVED in caches:
2895 if tr is None or tr.changes[b'origrepolen'] < len(self):
2895 if tr is None or tr.changes[b'origrepolen'] < len(self):
2896 # accessing the 'served' branchmap should refresh all the others,
2896 # accessing the 'served' branchmap should refresh all the others,
2897 self.ui.debug(b'updating the branch cache\n')
2897 self.ui.debug(b'updating the branch cache\n')
2898 self.filtered(b'served').branchmap()
2898 self.filtered(b'served').branchmap()
2899 self.filtered(b'served.hidden').branchmap()
2899 self.filtered(b'served.hidden').branchmap()
2900 # flush all possibly delayed write.
2900 # flush all possibly delayed write.
2901 self._branchcaches.write_delayed(self)
2901 self._branchcaches.write_delayed(self)
2902
2902
2903 if repository.CACHE_CHANGELOG_CACHE in caches:
2903 if repository.CACHE_CHANGELOG_CACHE in caches:
2904 self.changelog.update_caches(transaction=tr)
2904 self.changelog.update_caches(transaction=tr)
2905
2905
2906 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2906 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2907 self.manifestlog.update_caches(transaction=tr)
2907 self.manifestlog.update_caches(transaction=tr)
2908
2908
2909 if repository.CACHE_REV_BRANCH in caches:
2909 if repository.CACHE_REV_BRANCH in caches:
2910 rbc = unfi.revbranchcache()
2910 rbc = unfi.revbranchcache()
2911 for r in unfi.changelog:
2911 for r in unfi.changelog:
2912 rbc.branchinfo(r)
2912 rbc.branchinfo(r)
2913 rbc.write()
2913 rbc.write()
2914
2914
2915 if repository.CACHE_FULL_MANIFEST in caches:
2915 if repository.CACHE_FULL_MANIFEST in caches:
2916 # ensure the working copy parents are in the manifestfulltextcache
2916 # ensure the working copy parents are in the manifestfulltextcache
2917 for ctx in self[b'.'].parents():
2917 for ctx in self[b'.'].parents():
2918 ctx.manifest() # accessing the manifest is enough
2918 ctx.manifest() # accessing the manifest is enough
2919
2919
2920 if repository.CACHE_FILE_NODE_TAGS in caches:
2920 if repository.CACHE_FILE_NODE_TAGS in caches:
2921 # accessing fnode cache warms the cache
2921 # accessing fnode cache warms the cache
2922 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2922 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2923
2923
2924 if repository.CACHE_TAGS_DEFAULT in caches:
2924 if repository.CACHE_TAGS_DEFAULT in caches:
2925 # accessing tags warm the cache
2925 # accessing tags warm the cache
2926 self.tags()
2926 self.tags()
2927 if repository.CACHE_TAGS_SERVED in caches:
2927 if repository.CACHE_TAGS_SERVED in caches:
2928 self.filtered(b'served').tags()
2928 self.filtered(b'served').tags()
2929
2929
2930 if repository.CACHE_BRANCHMAP_ALL in caches:
2930 if repository.CACHE_BRANCHMAP_ALL in caches:
2931 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2931 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2932 # so we're forcing a write to cause these caches to be warmed up
2932 # so we're forcing a write to cause these caches to be warmed up
2933 # even if they haven't explicitly been requested yet (if they've
2933 # even if they haven't explicitly been requested yet (if they've
2934 # never been used by hg, they won't ever have been written, even if
2934 # never been used by hg, they won't ever have been written, even if
2935 # they're a subset of another kind of cache that *has* been used).
2935 # they're a subset of another kind of cache that *has* been used).
2936 for filt in repoview.filtertable.keys():
2936 for filt in repoview.filtertable.keys():
2937 filtered = self.filtered(filt)
2937 filtered = self.filtered(filt)
2938 filtered.branchmap().write(filtered)
2938 filtered.branchmap().write(filtered)
2939
2939
2940 def invalidatecaches(self):
2940 def invalidatecaches(self):
2941 if '_tagscache' in vars(self):
2941 if '_tagscache' in vars(self):
2942 # can't use delattr on proxy
2942 # can't use delattr on proxy
2943 del self.__dict__['_tagscache']
2943 del self.__dict__['_tagscache']
2944
2944
2945 self._branchcaches.clear()
2945 self._branchcaches.clear()
2946 self.invalidatevolatilesets()
2946 self.invalidatevolatilesets()
2947 self._sparsesignaturecache.clear()
2947 self._sparsesignaturecache.clear()
2948
2948
2949 def invalidatevolatilesets(self):
2949 def invalidatevolatilesets(self):
2950 self.filteredrevcache.clear()
2950 self.filteredrevcache.clear()
2951 obsolete.clearobscaches(self)
2951 obsolete.clearobscaches(self)
2952 self._quick_access_changeid_invalidate()
2952 self._quick_access_changeid_invalidate()
2953
2953
2954 def invalidatedirstate(self):
2954 def invalidatedirstate(self):
2955 """Invalidates the dirstate, causing the next call to dirstate
2955 """Invalidates the dirstate, causing the next call to dirstate
2956 to check if it was modified since the last time it was read,
2956 to check if it was modified since the last time it was read,
2957 rereading it if it has.
2957 rereading it if it has.
2958
2958
2959 This is different to dirstate.invalidate() that it doesn't always
2959 This is different to dirstate.invalidate() that it doesn't always
2960 rereads the dirstate. Use dirstate.invalidate() if you want to
2960 rereads the dirstate. Use dirstate.invalidate() if you want to
2961 explicitly read the dirstate again (i.e. restoring it to a previous
2961 explicitly read the dirstate again (i.e. restoring it to a previous
2962 known good state)."""
2962 known good state)."""
2963 unfi = self.unfiltered()
2963 unfi = self.unfiltered()
2964 if 'dirstate' in unfi.__dict__:
2964 if 'dirstate' in unfi.__dict__:
2965 assert not self.dirstate.is_changing_any
2965 assert not self.dirstate.is_changing_any
2966 del unfi.__dict__['dirstate']
2966 del unfi.__dict__['dirstate']
2967
2967
2968 def invalidate(self, clearfilecache=False):
2968 def invalidate(self, clearfilecache=False):
2969 """Invalidates both store and non-store parts other than dirstate
2969 """Invalidates both store and non-store parts other than dirstate
2970
2970
2971 If a transaction is running, invalidation of store is omitted,
2971 If a transaction is running, invalidation of store is omitted,
2972 because discarding in-memory changes might cause inconsistency
2972 because discarding in-memory changes might cause inconsistency
2973 (e.g. incomplete fncache causes unintentional failure, but
2973 (e.g. incomplete fncache causes unintentional failure, but
2974 redundant one doesn't).
2974 redundant one doesn't).
2975 """
2975 """
2976 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2976 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2977 for k in list(self._filecache.keys()):
2977 for k in list(self._filecache.keys()):
2978 if (
2978 if (
2979 k == b'changelog'
2979 k == b'changelog'
2980 and self.currenttransaction()
2980 and self.currenttransaction()
2981 and self.changelog._delayed
2981 and self.changelog._delayed
2982 ):
2982 ):
2983 # The changelog object may store unwritten revisions. We don't
2983 # The changelog object may store unwritten revisions. We don't
2984 # want to lose them.
2984 # want to lose them.
2985 # TODO: Solve the problem instead of working around it.
2985 # TODO: Solve the problem instead of working around it.
2986 continue
2986 continue
2987
2987
2988 if clearfilecache:
2988 if clearfilecache:
2989 del self._filecache[k]
2989 del self._filecache[k]
2990 try:
2990 try:
2991 delattr(unfiltered, k)
2991 delattr(unfiltered, k)
2992 except AttributeError:
2992 except AttributeError:
2993 pass
2993 pass
2994 self.invalidatecaches()
2994 self.invalidatecaches()
2995 if not self.currenttransaction():
2995 if not self.currenttransaction():
2996 # TODO: Changing contents of store outside transaction
2996 # TODO: Changing contents of store outside transaction
2997 # causes inconsistency. We should make in-memory store
2997 # causes inconsistency. We should make in-memory store
2998 # changes detectable, and abort if changed.
2998 # changes detectable, and abort if changed.
2999 self.store.invalidatecaches()
2999 self.store.invalidatecaches()
3000
3000
3001 def invalidateall(self):
3001 def invalidateall(self):
3002 """Fully invalidates both store and non-store parts, causing the
3002 """Fully invalidates both store and non-store parts, causing the
3003 subsequent operation to reread any outside changes."""
3003 subsequent operation to reread any outside changes."""
3004 # extension should hook this to invalidate its caches
3004 # extension should hook this to invalidate its caches
3005 self.invalidate()
3005 self.invalidate()
3006 self.invalidatedirstate()
3006 self.invalidatedirstate()
3007
3007
3008 @unfilteredmethod
3008 @unfilteredmethod
3009 def _refreshfilecachestats(self, tr):
3009 def _refreshfilecachestats(self, tr):
3010 """Reload stats of cached files so that they are flagged as valid"""
3010 """Reload stats of cached files so that they are flagged as valid"""
3011 for k, ce in self._filecache.items():
3011 for k, ce in self._filecache.items():
3012 k = pycompat.sysstr(k)
3012 k = pycompat.sysstr(k)
3013 if k == 'dirstate' or k not in self.__dict__:
3013 if k == 'dirstate' or k not in self.__dict__:
3014 continue
3014 continue
3015 ce.refresh()
3015 ce.refresh()
3016
3016
3017 def _lock(
3017 def _lock(
3018 self,
3018 self,
3019 vfs,
3019 vfs,
3020 lockname,
3020 lockname,
3021 wait,
3021 wait,
3022 releasefn,
3022 releasefn,
3023 acquirefn,
3023 acquirefn,
3024 desc,
3024 desc,
3025 ):
3025 ):
3026 timeout = 0
3026 timeout = 0
3027 warntimeout = 0
3027 warntimeout = 0
3028 if wait:
3028 if wait:
3029 timeout = self.ui.configint(b"ui", b"timeout")
3029 timeout = self.ui.configint(b"ui", b"timeout")
3030 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3030 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3031 # internal config: ui.signal-safe-lock
3031 # internal config: ui.signal-safe-lock
3032 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3032 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3033
3033
3034 l = lockmod.trylock(
3034 l = lockmod.trylock(
3035 self.ui,
3035 self.ui,
3036 vfs,
3036 vfs,
3037 lockname,
3037 lockname,
3038 timeout,
3038 timeout,
3039 warntimeout,
3039 warntimeout,
3040 releasefn=releasefn,
3040 releasefn=releasefn,
3041 acquirefn=acquirefn,
3041 acquirefn=acquirefn,
3042 desc=desc,
3042 desc=desc,
3043 signalsafe=signalsafe,
3043 signalsafe=signalsafe,
3044 )
3044 )
3045 return l
3045 return l
3046
3046
3047 def _afterlock(self, callback):
3047 def _afterlock(self, callback):
3048 """add a callback to be run when the repository is fully unlocked
3048 """add a callback to be run when the repository is fully unlocked
3049
3049
3050 The callback will be executed when the outermost lock is released
3050 The callback will be executed when the outermost lock is released
3051 (with wlock being higher level than 'lock')."""
3051 (with wlock being higher level than 'lock')."""
3052 for ref in (self._wlockref, self._lockref):
3052 for ref in (self._wlockref, self._lockref):
3053 l = ref and ref()
3053 l = ref and ref()
3054 if l and l.held:
3054 if l and l.held:
3055 l.postrelease.append(callback)
3055 l.postrelease.append(callback)
3056 break
3056 break
3057 else: # no lock have been found.
3057 else: # no lock have been found.
3058 callback(True)
3058 callback(True)
3059
3059
3060 def lock(self, wait=True):
3060 def lock(self, wait=True):
3061 """Lock the repository store (.hg/store) and return a weak reference
3061 """Lock the repository store (.hg/store) and return a weak reference
3062 to the lock. Use this before modifying the store (e.g. committing or
3062 to the lock. Use this before modifying the store (e.g. committing or
3063 stripping). If you are opening a transaction, get a lock as well.)
3063 stripping). If you are opening a transaction, get a lock as well.)
3064
3064
3065 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3065 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3066 'wlock' first to avoid a dead-lock hazard."""
3066 'wlock' first to avoid a dead-lock hazard."""
3067 l = self._currentlock(self._lockref)
3067 l = self._currentlock(self._lockref)
3068 if l is not None:
3068 if l is not None:
3069 l.lock()
3069 l.lock()
3070 return l
3070 return l
3071
3071
3072 l = self._lock(
3072 l = self._lock(
3073 vfs=self.svfs,
3073 vfs=self.svfs,
3074 lockname=b"lock",
3074 lockname=b"lock",
3075 wait=wait,
3075 wait=wait,
3076 releasefn=None,
3076 releasefn=None,
3077 acquirefn=self.invalidate,
3077 acquirefn=self.invalidate,
3078 desc=_(b'repository %s') % self.origroot,
3078 desc=_(b'repository %s') % self.origroot,
3079 )
3079 )
3080 self._lockref = weakref.ref(l)
3080 self._lockref = weakref.ref(l)
3081 return l
3081 return l
3082
3082
3083 def wlock(self, wait=True):
3083 def wlock(self, wait=True):
3084 """Lock the non-store parts of the repository (everything under
3084 """Lock the non-store parts of the repository (everything under
3085 .hg except .hg/store) and return a weak reference to the lock.
3085 .hg except .hg/store) and return a weak reference to the lock.
3086
3086
3087 Use this before modifying files in .hg.
3087 Use this before modifying files in .hg.
3088
3088
3089 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3089 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3090 'wlock' first to avoid a dead-lock hazard."""
3090 'wlock' first to avoid a dead-lock hazard."""
3091 l = self._wlockref() if self._wlockref else None
3091 l = self._wlockref() if self._wlockref else None
3092 if l is not None and l.held:
3092 if l is not None and l.held:
3093 l.lock()
3093 l.lock()
3094 return l
3094 return l
3095
3095
3096 # We do not need to check for non-waiting lock acquisition. Such
3096 # We do not need to check for non-waiting lock acquisition. Such
3097 # acquisition would not cause dead-lock as they would just fail.
3097 # acquisition would not cause dead-lock as they would just fail.
3098 if wait and (
3098 if wait and (
3099 self.ui.configbool(b'devel', b'all-warnings')
3099 self.ui.configbool(b'devel', b'all-warnings')
3100 or self.ui.configbool(b'devel', b'check-locks')
3100 or self.ui.configbool(b'devel', b'check-locks')
3101 ):
3101 ):
3102 if self._currentlock(self._lockref) is not None:
3102 if self._currentlock(self._lockref) is not None:
3103 self.ui.develwarn(b'"wlock" acquired after "lock"')
3103 self.ui.develwarn(b'"wlock" acquired after "lock"')
3104
3104
3105 def unlock():
3105 def unlock():
3106 if self.dirstate.is_changing_any:
3106 if self.dirstate.is_changing_any:
3107 msg = b"wlock release in the middle of a changing parents"
3107 msg = b"wlock release in the middle of a changing parents"
3108 self.ui.develwarn(msg)
3108 self.ui.develwarn(msg)
3109 self.dirstate.invalidate()
3109 self.dirstate.invalidate()
3110 else:
3110 else:
3111 if self.dirstate._dirty:
3111 if self.dirstate._dirty:
3112 msg = b"dirty dirstate on wlock release"
3112 msg = b"dirty dirstate on wlock release"
3113 self.ui.develwarn(msg)
3113 self.ui.develwarn(msg)
3114 self.dirstate.write(None)
3114 self.dirstate.write(None)
3115
3115
3116 unfi = self.unfiltered()
3116 unfi = self.unfiltered()
3117 if 'dirstate' in unfi.__dict__:
3117 if 'dirstate' in unfi.__dict__:
3118 del unfi.__dict__['dirstate']
3118 del unfi.__dict__['dirstate']
3119
3119
3120 l = self._lock(
3120 l = self._lock(
3121 self.vfs,
3121 self.vfs,
3122 b"wlock",
3122 b"wlock",
3123 wait,
3123 wait,
3124 unlock,
3124 unlock,
3125 self.invalidatedirstate,
3125 self.invalidatedirstate,
3126 _(b'working directory of %s') % self.origroot,
3126 _(b'working directory of %s') % self.origroot,
3127 )
3127 )
3128 self._wlockref = weakref.ref(l)
3128 self._wlockref = weakref.ref(l)
3129 return l
3129 return l
3130
3130
3131 def _currentlock(self, lockref):
3131 def _currentlock(self, lockref):
3132 """Returns the lock if it's held, or None if it's not."""
3132 """Returns the lock if it's held, or None if it's not."""
3133 if lockref is None:
3133 if lockref is None:
3134 return None
3134 return None
3135 l = lockref()
3135 l = lockref()
3136 if l is None or not l.held:
3136 if l is None or not l.held:
3137 return None
3137 return None
3138 return l
3138 return l
3139
3139
3140 def currentwlock(self):
3140 def currentwlock(self):
3141 """Returns the wlock if it's held, or None if it's not."""
3141 """Returns the wlock if it's held, or None if it's not."""
3142 return self._currentlock(self._wlockref)
3142 return self._currentlock(self._wlockref)
3143
3143
3144 def currentlock(self):
3145 """Returns the lock if it's held, or None if it's not."""
3146 return self._currentlock(self._lockref)
3147
3144 def checkcommitpatterns(self, wctx, match, status, fail):
3148 def checkcommitpatterns(self, wctx, match, status, fail):
3145 """check for commit arguments that aren't committable"""
3149 """check for commit arguments that aren't committable"""
3146 if match.isexact() or match.prefix():
3150 if match.isexact() or match.prefix():
3147 matched = set(status.modified + status.added + status.removed)
3151 matched = set(status.modified + status.added + status.removed)
3148
3152
3149 for f in match.files():
3153 for f in match.files():
3150 f = self.dirstate.normalize(f)
3154 f = self.dirstate.normalize(f)
3151 if f == b'.' or f in matched or f in wctx.substate:
3155 if f == b'.' or f in matched or f in wctx.substate:
3152 continue
3156 continue
3153 if f in status.deleted:
3157 if f in status.deleted:
3154 fail(f, _(b'file not found!'))
3158 fail(f, _(b'file not found!'))
3155 # Is it a directory that exists or used to exist?
3159 # Is it a directory that exists or used to exist?
3156 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3160 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3157 d = f + b'/'
3161 d = f + b'/'
3158 for mf in matched:
3162 for mf in matched:
3159 if mf.startswith(d):
3163 if mf.startswith(d):
3160 break
3164 break
3161 else:
3165 else:
3162 fail(f, _(b"no match under directory!"))
3166 fail(f, _(b"no match under directory!"))
3163 elif f not in self.dirstate:
3167 elif f not in self.dirstate:
3164 fail(f, _(b"file not tracked!"))
3168 fail(f, _(b"file not tracked!"))
3165
3169
3166 @unfilteredmethod
3170 @unfilteredmethod
3167 def commit(
3171 def commit(
3168 self,
3172 self,
3169 text=b"",
3173 text=b"",
3170 user=None,
3174 user=None,
3171 date=None,
3175 date=None,
3172 match=None,
3176 match=None,
3173 force=False,
3177 force=False,
3174 editor=None,
3178 editor=None,
3175 extra=None,
3179 extra=None,
3176 ):
3180 ):
3177 """Add a new revision to current repository.
3181 """Add a new revision to current repository.
3178
3182
3179 Revision information is gathered from the working directory,
3183 Revision information is gathered from the working directory,
3180 match can be used to filter the committed files. If editor is
3184 match can be used to filter the committed files. If editor is
3181 supplied, it is called to get a commit message.
3185 supplied, it is called to get a commit message.
3182 """
3186 """
3183 if extra is None:
3187 if extra is None:
3184 extra = {}
3188 extra = {}
3185
3189
3186 def fail(f, msg):
3190 def fail(f, msg):
3187 raise error.InputError(b'%s: %s' % (f, msg))
3191 raise error.InputError(b'%s: %s' % (f, msg))
3188
3192
3189 if not match:
3193 if not match:
3190 match = matchmod.always()
3194 match = matchmod.always()
3191
3195
3192 if not force:
3196 if not force:
3193 match.bad = fail
3197 match.bad = fail
3194
3198
3195 # lock() for recent changelog (see issue4368)
3199 # lock() for recent changelog (see issue4368)
3196 with self.wlock(), self.lock():
3200 with self.wlock(), self.lock():
3197 wctx = self[None]
3201 wctx = self[None]
3198 merge = len(wctx.parents()) > 1
3202 merge = len(wctx.parents()) > 1
3199
3203
3200 if not force and merge and not match.always():
3204 if not force and merge and not match.always():
3201 raise error.Abort(
3205 raise error.Abort(
3202 _(
3206 _(
3203 b'cannot partially commit a merge '
3207 b'cannot partially commit a merge '
3204 b'(do not specify files or patterns)'
3208 b'(do not specify files or patterns)'
3205 )
3209 )
3206 )
3210 )
3207
3211
3208 status = self.status(match=match, clean=force)
3212 status = self.status(match=match, clean=force)
3209 if force:
3213 if force:
3210 status.modified.extend(
3214 status.modified.extend(
3211 status.clean
3215 status.clean
3212 ) # mq may commit clean files
3216 ) # mq may commit clean files
3213
3217
3214 # check subrepos
3218 # check subrepos
3215 subs, commitsubs, newstate = subrepoutil.precommit(
3219 subs, commitsubs, newstate = subrepoutil.precommit(
3216 self.ui, wctx, status, match, force=force
3220 self.ui, wctx, status, match, force=force
3217 )
3221 )
3218
3222
3219 # make sure all explicit patterns are matched
3223 # make sure all explicit patterns are matched
3220 if not force:
3224 if not force:
3221 self.checkcommitpatterns(wctx, match, status, fail)
3225 self.checkcommitpatterns(wctx, match, status, fail)
3222
3226
3223 cctx = context.workingcommitctx(
3227 cctx = context.workingcommitctx(
3224 self, status, text, user, date, extra
3228 self, status, text, user, date, extra
3225 )
3229 )
3226
3230
3227 ms = mergestatemod.mergestate.read(self)
3231 ms = mergestatemod.mergestate.read(self)
3228 mergeutil.checkunresolved(ms)
3232 mergeutil.checkunresolved(ms)
3229
3233
3230 # internal config: ui.allowemptycommit
3234 # internal config: ui.allowemptycommit
3231 if cctx.isempty() and not self.ui.configbool(
3235 if cctx.isempty() and not self.ui.configbool(
3232 b'ui', b'allowemptycommit'
3236 b'ui', b'allowemptycommit'
3233 ):
3237 ):
3234 self.ui.debug(b'nothing to commit, clearing merge state\n')
3238 self.ui.debug(b'nothing to commit, clearing merge state\n')
3235 ms.reset()
3239 ms.reset()
3236 return None
3240 return None
3237
3241
3238 if merge and cctx.deleted():
3242 if merge and cctx.deleted():
3239 raise error.Abort(_(b"cannot commit merge with missing files"))
3243 raise error.Abort(_(b"cannot commit merge with missing files"))
3240
3244
3241 if editor:
3245 if editor:
3242 cctx._text = editor(self, cctx, subs)
3246 cctx._text = editor(self, cctx, subs)
3243 edited = text != cctx._text
3247 edited = text != cctx._text
3244
3248
3245 # Save commit message in case this transaction gets rolled back
3249 # Save commit message in case this transaction gets rolled back
3246 # (e.g. by a pretxncommit hook). Leave the content alone on
3250 # (e.g. by a pretxncommit hook). Leave the content alone on
3247 # the assumption that the user will use the same editor again.
3251 # the assumption that the user will use the same editor again.
3248 msg_path = self.savecommitmessage(cctx._text)
3252 msg_path = self.savecommitmessage(cctx._text)
3249
3253
3250 # commit subs and write new state
3254 # commit subs and write new state
3251 if subs:
3255 if subs:
3252 uipathfn = scmutil.getuipathfn(self)
3256 uipathfn = scmutil.getuipathfn(self)
3253 for s in sorted(commitsubs):
3257 for s in sorted(commitsubs):
3254 sub = wctx.sub(s)
3258 sub = wctx.sub(s)
3255 self.ui.status(
3259 self.ui.status(
3256 _(b'committing subrepository %s\n')
3260 _(b'committing subrepository %s\n')
3257 % uipathfn(subrepoutil.subrelpath(sub))
3261 % uipathfn(subrepoutil.subrelpath(sub))
3258 )
3262 )
3259 sr = sub.commit(cctx._text, user, date)
3263 sr = sub.commit(cctx._text, user, date)
3260 newstate[s] = (newstate[s][0], sr)
3264 newstate[s] = (newstate[s][0], sr)
3261 subrepoutil.writestate(self, newstate)
3265 subrepoutil.writestate(self, newstate)
3262
3266
3263 p1, p2 = self.dirstate.parents()
3267 p1, p2 = self.dirstate.parents()
3264 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3268 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3265 try:
3269 try:
3266 self.hook(
3270 self.hook(
3267 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3271 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3268 )
3272 )
3269 with self.transaction(b'commit'):
3273 with self.transaction(b'commit'):
3270 ret = self.commitctx(cctx, True)
3274 ret = self.commitctx(cctx, True)
3271 # update bookmarks, dirstate and mergestate
3275 # update bookmarks, dirstate and mergestate
3272 bookmarks.update(self, [p1, p2], ret)
3276 bookmarks.update(self, [p1, p2], ret)
3273 cctx.markcommitted(ret)
3277 cctx.markcommitted(ret)
3274 ms.reset()
3278 ms.reset()
3275 except: # re-raises
3279 except: # re-raises
3276 if edited:
3280 if edited:
3277 self.ui.write(
3281 self.ui.write(
3278 _(b'note: commit message saved in %s\n') % msg_path
3282 _(b'note: commit message saved in %s\n') % msg_path
3279 )
3283 )
3280 self.ui.write(
3284 self.ui.write(
3281 _(
3285 _(
3282 b"note: use 'hg commit --logfile "
3286 b"note: use 'hg commit --logfile "
3283 b"%s --edit' to reuse it\n"
3287 b"%s --edit' to reuse it\n"
3284 )
3288 )
3285 % msg_path
3289 % msg_path
3286 )
3290 )
3287 raise
3291 raise
3288
3292
3289 def commithook(unused_success):
3293 def commithook(unused_success):
3290 # hack for command that use a temporary commit (eg: histedit)
3294 # hack for command that use a temporary commit (eg: histedit)
3291 # temporary commit got stripped before hook release
3295 # temporary commit got stripped before hook release
3292 if self.changelog.hasnode(ret):
3296 if self.changelog.hasnode(ret):
3293 self.hook(
3297 self.hook(
3294 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3298 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3295 )
3299 )
3296
3300
3297 self._afterlock(commithook)
3301 self._afterlock(commithook)
3298 return ret
3302 return ret
3299
3303
3300 @unfilteredmethod
3304 @unfilteredmethod
3301 def commitctx(self, ctx, error=False, origctx=None):
3305 def commitctx(self, ctx, error=False, origctx=None):
3302 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3306 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3303
3307
3304 @unfilteredmethod
3308 @unfilteredmethod
3305 def destroying(self):
3309 def destroying(self):
3306 """Inform the repository that nodes are about to be destroyed.
3310 """Inform the repository that nodes are about to be destroyed.
3307 Intended for use by strip and rollback, so there's a common
3311 Intended for use by strip and rollback, so there's a common
3308 place for anything that has to be done before destroying history.
3312 place for anything that has to be done before destroying history.
3309
3313
3310 This is mostly useful for saving state that is in memory and waiting
3314 This is mostly useful for saving state that is in memory and waiting
3311 to be flushed when the current lock is released. Because a call to
3315 to be flushed when the current lock is released. Because a call to
3312 destroyed is imminent, the repo will be invalidated causing those
3316 destroyed is imminent, the repo will be invalidated causing those
3313 changes to stay in memory (waiting for the next unlock), or vanish
3317 changes to stay in memory (waiting for the next unlock), or vanish
3314 completely.
3318 completely.
3315 """
3319 """
3316 # When using the same lock to commit and strip, the phasecache is left
3320 # When using the same lock to commit and strip, the phasecache is left
3317 # dirty after committing. Then when we strip, the repo is invalidated,
3321 # dirty after committing. Then when we strip, the repo is invalidated,
3318 # causing those changes to disappear.
3322 # causing those changes to disappear.
3319 if '_phasecache' in vars(self):
3323 if '_phasecache' in vars(self):
3320 self._phasecache.write()
3324 self._phasecache.write()
3321
3325
3322 @unfilteredmethod
3326 @unfilteredmethod
3323 def destroyed(self):
3327 def destroyed(self):
3324 """Inform the repository that nodes have been destroyed.
3328 """Inform the repository that nodes have been destroyed.
3325 Intended for use by strip and rollback, so there's a common
3329 Intended for use by strip and rollback, so there's a common
3326 place for anything that has to be done after destroying history.
3330 place for anything that has to be done after destroying history.
3327 """
3331 """
3328 # When one tries to:
3332 # When one tries to:
3329 # 1) destroy nodes thus calling this method (e.g. strip)
3333 # 1) destroy nodes thus calling this method (e.g. strip)
3330 # 2) use phasecache somewhere (e.g. commit)
3334 # 2) use phasecache somewhere (e.g. commit)
3331 #
3335 #
3332 # then 2) will fail because the phasecache contains nodes that were
3336 # then 2) will fail because the phasecache contains nodes that were
3333 # removed. We can either remove phasecache from the filecache,
3337 # removed. We can either remove phasecache from the filecache,
3334 # causing it to reload next time it is accessed, or simply filter
3338 # causing it to reload next time it is accessed, or simply filter
3335 # the removed nodes now and write the updated cache.
3339 # the removed nodes now and write the updated cache.
3336 self._phasecache.filterunknown(self)
3340 self._phasecache.filterunknown(self)
3337 self._phasecache.write()
3341 self._phasecache.write()
3338
3342
3339 # refresh all repository caches
3343 # refresh all repository caches
3340 self.updatecaches()
3344 self.updatecaches()
3341
3345
3342 # Ensure the persistent tag cache is updated. Doing it now
3346 # Ensure the persistent tag cache is updated. Doing it now
3343 # means that the tag cache only has to worry about destroyed
3347 # means that the tag cache only has to worry about destroyed
3344 # heads immediately after a strip/rollback. That in turn
3348 # heads immediately after a strip/rollback. That in turn
3345 # guarantees that "cachetip == currenttip" (comparing both rev
3349 # guarantees that "cachetip == currenttip" (comparing both rev
3346 # and node) always means no nodes have been added or destroyed.
3350 # and node) always means no nodes have been added or destroyed.
3347
3351
3348 # XXX this is suboptimal when qrefresh'ing: we strip the current
3352 # XXX this is suboptimal when qrefresh'ing: we strip the current
3349 # head, refresh the tag cache, then immediately add a new head.
3353 # head, refresh the tag cache, then immediately add a new head.
3350 # But I think doing it this way is necessary for the "instant
3354 # But I think doing it this way is necessary for the "instant
3351 # tag cache retrieval" case to work.
3355 # tag cache retrieval" case to work.
3352 self.invalidate()
3356 self.invalidate()
3353
3357
3354 def status(
3358 def status(
3355 self,
3359 self,
3356 node1=b'.',
3360 node1=b'.',
3357 node2=None,
3361 node2=None,
3358 match=None,
3362 match=None,
3359 ignored=False,
3363 ignored=False,
3360 clean=False,
3364 clean=False,
3361 unknown=False,
3365 unknown=False,
3362 listsubrepos=False,
3366 listsubrepos=False,
3363 ):
3367 ):
3364 '''a convenience method that calls node1.status(node2)'''
3368 '''a convenience method that calls node1.status(node2)'''
3365 return self[node1].status(
3369 return self[node1].status(
3366 node2, match, ignored, clean, unknown, listsubrepos
3370 node2, match, ignored, clean, unknown, listsubrepos
3367 )
3371 )
3368
3372
3369 def addpostdsstatus(self, ps):
3373 def addpostdsstatus(self, ps):
3370 """Add a callback to run within the wlock, at the point at which status
3374 """Add a callback to run within the wlock, at the point at which status
3371 fixups happen.
3375 fixups happen.
3372
3376
3373 On status completion, callback(wctx, status) will be called with the
3377 On status completion, callback(wctx, status) will be called with the
3374 wlock held, unless the dirstate has changed from underneath or the wlock
3378 wlock held, unless the dirstate has changed from underneath or the wlock
3375 couldn't be grabbed.
3379 couldn't be grabbed.
3376
3380
3377 Callbacks should not capture and use a cached copy of the dirstate --
3381 Callbacks should not capture and use a cached copy of the dirstate --
3378 it might change in the meanwhile. Instead, they should access the
3382 it might change in the meanwhile. Instead, they should access the
3379 dirstate via wctx.repo().dirstate.
3383 dirstate via wctx.repo().dirstate.
3380
3384
3381 This list is emptied out after each status run -- extensions should
3385 This list is emptied out after each status run -- extensions should
3382 make sure it adds to this list each time dirstate.status is called.
3386 make sure it adds to this list each time dirstate.status is called.
3383 Extensions should also make sure they don't call this for statuses
3387 Extensions should also make sure they don't call this for statuses
3384 that don't involve the dirstate.
3388 that don't involve the dirstate.
3385 """
3389 """
3386
3390
3387 # The list is located here for uniqueness reasons -- it is actually
3391 # The list is located here for uniqueness reasons -- it is actually
3388 # managed by the workingctx, but that isn't unique per-repo.
3392 # managed by the workingctx, but that isn't unique per-repo.
3389 self._postdsstatus.append(ps)
3393 self._postdsstatus.append(ps)
3390
3394
3391 def postdsstatus(self):
3395 def postdsstatus(self):
3392 """Used by workingctx to get the list of post-dirstate-status hooks."""
3396 """Used by workingctx to get the list of post-dirstate-status hooks."""
3393 return self._postdsstatus
3397 return self._postdsstatus
3394
3398
3395 def clearpostdsstatus(self):
3399 def clearpostdsstatus(self):
3396 """Used by workingctx to clear post-dirstate-status hooks."""
3400 """Used by workingctx to clear post-dirstate-status hooks."""
3397 del self._postdsstatus[:]
3401 del self._postdsstatus[:]
3398
3402
3399 def heads(self, start=None):
3403 def heads(self, start=None):
3400 if start is None:
3404 if start is None:
3401 cl = self.changelog
3405 cl = self.changelog
3402 headrevs = reversed(cl.headrevs())
3406 headrevs = reversed(cl.headrevs())
3403 return [cl.node(rev) for rev in headrevs]
3407 return [cl.node(rev) for rev in headrevs]
3404
3408
3405 heads = self.changelog.heads(start)
3409 heads = self.changelog.heads(start)
3406 # sort the output in rev descending order
3410 # sort the output in rev descending order
3407 return sorted(heads, key=self.changelog.rev, reverse=True)
3411 return sorted(heads, key=self.changelog.rev, reverse=True)
3408
3412
3409 def branchheads(self, branch=None, start=None, closed=False):
3413 def branchheads(self, branch=None, start=None, closed=False):
3410 """return a (possibly filtered) list of heads for the given branch
3414 """return a (possibly filtered) list of heads for the given branch
3411
3415
3412 Heads are returned in topological order, from newest to oldest.
3416 Heads are returned in topological order, from newest to oldest.
3413 If branch is None, use the dirstate branch.
3417 If branch is None, use the dirstate branch.
3414 If start is not None, return only heads reachable from start.
3418 If start is not None, return only heads reachable from start.
3415 If closed is True, return heads that are marked as closed as well.
3419 If closed is True, return heads that are marked as closed as well.
3416 """
3420 """
3417 if branch is None:
3421 if branch is None:
3418 branch = self[None].branch()
3422 branch = self[None].branch()
3419 branches = self.branchmap()
3423 branches = self.branchmap()
3420 if not branches.hasbranch(branch):
3424 if not branches.hasbranch(branch):
3421 return []
3425 return []
3422 # the cache returns heads ordered lowest to highest
3426 # the cache returns heads ordered lowest to highest
3423 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3427 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3424 if start is not None:
3428 if start is not None:
3425 # filter out the heads that cannot be reached from startrev
3429 # filter out the heads that cannot be reached from startrev
3426 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3430 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3427 bheads = [h for h in bheads if h in fbheads]
3431 bheads = [h for h in bheads if h in fbheads]
3428 return bheads
3432 return bheads
3429
3433
3430 def branches(self, nodes):
3434 def branches(self, nodes):
3431 if not nodes:
3435 if not nodes:
3432 nodes = [self.changelog.tip()]
3436 nodes = [self.changelog.tip()]
3433 b = []
3437 b = []
3434 for n in nodes:
3438 for n in nodes:
3435 t = n
3439 t = n
3436 while True:
3440 while True:
3437 p = self.changelog.parents(n)
3441 p = self.changelog.parents(n)
3438 if p[1] != self.nullid or p[0] == self.nullid:
3442 if p[1] != self.nullid or p[0] == self.nullid:
3439 b.append((t, n, p[0], p[1]))
3443 b.append((t, n, p[0], p[1]))
3440 break
3444 break
3441 n = p[0]
3445 n = p[0]
3442 return b
3446 return b
3443
3447
3444 def between(self, pairs):
3448 def between(self, pairs):
3445 r = []
3449 r = []
3446
3450
3447 for top, bottom in pairs:
3451 for top, bottom in pairs:
3448 n, l, i = top, [], 0
3452 n, l, i = top, [], 0
3449 f = 1
3453 f = 1
3450
3454
3451 while n != bottom and n != self.nullid:
3455 while n != bottom and n != self.nullid:
3452 p = self.changelog.parents(n)[0]
3456 p = self.changelog.parents(n)[0]
3453 if i == f:
3457 if i == f:
3454 l.append(n)
3458 l.append(n)
3455 f = f * 2
3459 f = f * 2
3456 n = p
3460 n = p
3457 i += 1
3461 i += 1
3458
3462
3459 r.append(l)
3463 r.append(l)
3460
3464
3461 return r
3465 return r
3462
3466
3463 def checkpush(self, pushop):
3467 def checkpush(self, pushop):
3464 """Extensions can override this function if additional checks have
3468 """Extensions can override this function if additional checks have
3465 to be performed before pushing, or call it if they override push
3469 to be performed before pushing, or call it if they override push
3466 command.
3470 command.
3467 """
3471 """
3468
3472
3469 @unfilteredpropertycache
3473 @unfilteredpropertycache
3470 def prepushoutgoinghooks(self):
3474 def prepushoutgoinghooks(self):
3471 """Return util.hooks consists of a pushop with repo, remote, outgoing
3475 """Return util.hooks consists of a pushop with repo, remote, outgoing
3472 methods, which are called before pushing changesets.
3476 methods, which are called before pushing changesets.
3473 """
3477 """
3474 return util.hooks()
3478 return util.hooks()
3475
3479
3476 def pushkey(self, namespace, key, old, new):
3480 def pushkey(self, namespace, key, old, new):
3477 try:
3481 try:
3478 tr = self.currenttransaction()
3482 tr = self.currenttransaction()
3479 hookargs = {}
3483 hookargs = {}
3480 if tr is not None:
3484 if tr is not None:
3481 hookargs.update(tr.hookargs)
3485 hookargs.update(tr.hookargs)
3482 hookargs = pycompat.strkwargs(hookargs)
3486 hookargs = pycompat.strkwargs(hookargs)
3483 hookargs['namespace'] = namespace
3487 hookargs['namespace'] = namespace
3484 hookargs['key'] = key
3488 hookargs['key'] = key
3485 hookargs['old'] = old
3489 hookargs['old'] = old
3486 hookargs['new'] = new
3490 hookargs['new'] = new
3487 self.hook(b'prepushkey', throw=True, **hookargs)
3491 self.hook(b'prepushkey', throw=True, **hookargs)
3488 except error.HookAbort as exc:
3492 except error.HookAbort as exc:
3489 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3493 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3490 if exc.hint:
3494 if exc.hint:
3491 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3495 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3492 return False
3496 return False
3493 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3497 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3494 ret = pushkey.push(self, namespace, key, old, new)
3498 ret = pushkey.push(self, namespace, key, old, new)
3495
3499
3496 def runhook(unused_success):
3500 def runhook(unused_success):
3497 self.hook(
3501 self.hook(
3498 b'pushkey',
3502 b'pushkey',
3499 namespace=namespace,
3503 namespace=namespace,
3500 key=key,
3504 key=key,
3501 old=old,
3505 old=old,
3502 new=new,
3506 new=new,
3503 ret=ret,
3507 ret=ret,
3504 )
3508 )
3505
3509
3506 self._afterlock(runhook)
3510 self._afterlock(runhook)
3507 return ret
3511 return ret
3508
3512
3509 def listkeys(self, namespace):
3513 def listkeys(self, namespace):
3510 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3514 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3511 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3515 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3512 values = pushkey.list(self, namespace)
3516 values = pushkey.list(self, namespace)
3513 self.hook(b'listkeys', namespace=namespace, values=values)
3517 self.hook(b'listkeys', namespace=namespace, values=values)
3514 return values
3518 return values
3515
3519
3516 def debugwireargs(self, one, two, three=None, four=None, five=None):
3520 def debugwireargs(self, one, two, three=None, four=None, five=None):
3517 '''used to test argument passing over the wire'''
3521 '''used to test argument passing over the wire'''
3518 return b"%s %s %s %s %s" % (
3522 return b"%s %s %s %s %s" % (
3519 one,
3523 one,
3520 two,
3524 two,
3521 pycompat.bytestr(three),
3525 pycompat.bytestr(three),
3522 pycompat.bytestr(four),
3526 pycompat.bytestr(four),
3523 pycompat.bytestr(five),
3527 pycompat.bytestr(five),
3524 )
3528 )
3525
3529
3526 def savecommitmessage(self, text):
3530 def savecommitmessage(self, text):
3527 fp = self.vfs(b'last-message.txt', b'wb')
3531 fp = self.vfs(b'last-message.txt', b'wb')
3528 try:
3532 try:
3529 fp.write(text)
3533 fp.write(text)
3530 finally:
3534 finally:
3531 fp.close()
3535 fp.close()
3532 return self.pathto(fp.name[len(self.root) + 1 :])
3536 return self.pathto(fp.name[len(self.root) + 1 :])
3533
3537
3534 def register_wanted_sidedata(self, category):
3538 def register_wanted_sidedata(self, category):
3535 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3539 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3536 # Only revlogv2 repos can want sidedata.
3540 # Only revlogv2 repos can want sidedata.
3537 return
3541 return
3538 self._wanted_sidedata.add(pycompat.bytestr(category))
3542 self._wanted_sidedata.add(pycompat.bytestr(category))
3539
3543
3540 def register_sidedata_computer(
3544 def register_sidedata_computer(
3541 self, kind, category, keys, computer, flags, replace=False
3545 self, kind, category, keys, computer, flags, replace=False
3542 ):
3546 ):
3543 if kind not in revlogconst.ALL_KINDS:
3547 if kind not in revlogconst.ALL_KINDS:
3544 msg = _(b"unexpected revlog kind '%s'.")
3548 msg = _(b"unexpected revlog kind '%s'.")
3545 raise error.ProgrammingError(msg % kind)
3549 raise error.ProgrammingError(msg % kind)
3546 category = pycompat.bytestr(category)
3550 category = pycompat.bytestr(category)
3547 already_registered = category in self._sidedata_computers.get(kind, [])
3551 already_registered = category in self._sidedata_computers.get(kind, [])
3548 if already_registered and not replace:
3552 if already_registered and not replace:
3549 msg = _(
3553 msg = _(
3550 b"cannot register a sidedata computer twice for category '%s'."
3554 b"cannot register a sidedata computer twice for category '%s'."
3551 )
3555 )
3552 raise error.ProgrammingError(msg % category)
3556 raise error.ProgrammingError(msg % category)
3553 if replace and not already_registered:
3557 if replace and not already_registered:
3554 msg = _(
3558 msg = _(
3555 b"cannot replace a sidedata computer that isn't registered "
3559 b"cannot replace a sidedata computer that isn't registered "
3556 b"for category '%s'."
3560 b"for category '%s'."
3557 )
3561 )
3558 raise error.ProgrammingError(msg % category)
3562 raise error.ProgrammingError(msg % category)
3559 self._sidedata_computers.setdefault(kind, {})
3563 self._sidedata_computers.setdefault(kind, {})
3560 self._sidedata_computers[kind][category] = (keys, computer, flags)
3564 self._sidedata_computers[kind][category] = (keys, computer, flags)
3561
3565
3562
3566
3563 def undoname(fn: bytes) -> bytes:
3567 def undoname(fn: bytes) -> bytes:
3564 base, name = os.path.split(fn)
3568 base, name = os.path.split(fn)
3565 assert name.startswith(b'journal')
3569 assert name.startswith(b'journal')
3566 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3570 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3567
3571
3568
3572
3569 def instance(ui, path: bytes, create, intents=None, createopts=None):
3573 def instance(ui, path: bytes, create, intents=None, createopts=None):
3570 # prevent cyclic import localrepo -> upgrade -> localrepo
3574 # prevent cyclic import localrepo -> upgrade -> localrepo
3571 from . import upgrade
3575 from . import upgrade
3572
3576
3573 localpath = urlutil.urllocalpath(path)
3577 localpath = urlutil.urllocalpath(path)
3574 if create:
3578 if create:
3575 createrepository(ui, localpath, createopts=createopts)
3579 createrepository(ui, localpath, createopts=createopts)
3576
3580
3577 def repo_maker():
3581 def repo_maker():
3578 return makelocalrepository(ui, localpath, intents=intents)
3582 return makelocalrepository(ui, localpath, intents=intents)
3579
3583
3580 repo = repo_maker()
3584 repo = repo_maker()
3581 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3585 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3582 return repo
3586 return repo
3583
3587
3584
3588
3585 def islocal(path: bytes) -> bool:
3589 def islocal(path: bytes) -> bool:
3586 return True
3590 return True
3587
3591
3588
3592
3589 def defaultcreateopts(ui, createopts=None):
3593 def defaultcreateopts(ui, createopts=None):
3590 """Populate the default creation options for a repository.
3594 """Populate the default creation options for a repository.
3591
3595
3592 A dictionary of explicitly requested creation options can be passed
3596 A dictionary of explicitly requested creation options can be passed
3593 in. Missing keys will be populated.
3597 in. Missing keys will be populated.
3594 """
3598 """
3595 createopts = dict(createopts or {})
3599 createopts = dict(createopts or {})
3596
3600
3597 if b'backend' not in createopts:
3601 if b'backend' not in createopts:
3598 # experimental config: storage.new-repo-backend
3602 # experimental config: storage.new-repo-backend
3599 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3603 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3600
3604
3601 return createopts
3605 return createopts
3602
3606
3603
3607
3604 def clone_requirements(ui, createopts, srcrepo):
3608 def clone_requirements(ui, createopts, srcrepo):
3605 """clone the requirements of a local repo for a local clone
3609 """clone the requirements of a local repo for a local clone
3606
3610
3607 The store requirements are unchanged while the working copy requirements
3611 The store requirements are unchanged while the working copy requirements
3608 depends on the configuration
3612 depends on the configuration
3609 """
3613 """
3610 target_requirements = set()
3614 target_requirements = set()
3611 if not srcrepo.requirements:
3615 if not srcrepo.requirements:
3612 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3616 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3613 # with it.
3617 # with it.
3614 return target_requirements
3618 return target_requirements
3615 createopts = defaultcreateopts(ui, createopts=createopts)
3619 createopts = defaultcreateopts(ui, createopts=createopts)
3616 for r in newreporequirements(ui, createopts):
3620 for r in newreporequirements(ui, createopts):
3617 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3621 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3618 target_requirements.add(r)
3622 target_requirements.add(r)
3619
3623
3620 for r in srcrepo.requirements:
3624 for r in srcrepo.requirements:
3621 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3625 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3622 target_requirements.add(r)
3626 target_requirements.add(r)
3623 return target_requirements
3627 return target_requirements
3624
3628
3625
3629
3626 def newreporequirements(ui, createopts):
3630 def newreporequirements(ui, createopts):
3627 """Determine the set of requirements for a new local repository.
3631 """Determine the set of requirements for a new local repository.
3628
3632
3629 Extensions can wrap this function to specify custom requirements for
3633 Extensions can wrap this function to specify custom requirements for
3630 new repositories.
3634 new repositories.
3631 """
3635 """
3632
3636
3633 if b'backend' not in createopts:
3637 if b'backend' not in createopts:
3634 raise error.ProgrammingError(
3638 raise error.ProgrammingError(
3635 b'backend key not present in createopts; '
3639 b'backend key not present in createopts; '
3636 b'was defaultcreateopts() called?'
3640 b'was defaultcreateopts() called?'
3637 )
3641 )
3638
3642
3639 if createopts[b'backend'] != b'revlogv1':
3643 if createopts[b'backend'] != b'revlogv1':
3640 raise error.Abort(
3644 raise error.Abort(
3641 _(
3645 _(
3642 b'unable to determine repository requirements for '
3646 b'unable to determine repository requirements for '
3643 b'storage backend: %s'
3647 b'storage backend: %s'
3644 )
3648 )
3645 % createopts[b'backend']
3649 % createopts[b'backend']
3646 )
3650 )
3647
3651
3648 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3652 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3649 if ui.configbool(b'format', b'usestore'):
3653 if ui.configbool(b'format', b'usestore'):
3650 requirements.add(requirementsmod.STORE_REQUIREMENT)
3654 requirements.add(requirementsmod.STORE_REQUIREMENT)
3651 if ui.configbool(b'format', b'usefncache'):
3655 if ui.configbool(b'format', b'usefncache'):
3652 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3656 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3653 if ui.configbool(b'format', b'dotencode'):
3657 if ui.configbool(b'format', b'dotencode'):
3654 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3658 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3655
3659
3656 compengines = ui.configlist(b'format', b'revlog-compression')
3660 compengines = ui.configlist(b'format', b'revlog-compression')
3657 for compengine in compengines:
3661 for compengine in compengines:
3658 if compengine in util.compengines:
3662 if compengine in util.compengines:
3659 engine = util.compengines[compengine]
3663 engine = util.compengines[compengine]
3660 if engine.available() and engine.revlogheader():
3664 if engine.available() and engine.revlogheader():
3661 break
3665 break
3662 else:
3666 else:
3663 raise error.Abort(
3667 raise error.Abort(
3664 _(
3668 _(
3665 b'compression engines %s defined by '
3669 b'compression engines %s defined by '
3666 b'format.revlog-compression not available'
3670 b'format.revlog-compression not available'
3667 )
3671 )
3668 % b', '.join(b'"%s"' % e for e in compengines),
3672 % b', '.join(b'"%s"' % e for e in compengines),
3669 hint=_(
3673 hint=_(
3670 b'run "hg debuginstall" to list available '
3674 b'run "hg debuginstall" to list available '
3671 b'compression engines'
3675 b'compression engines'
3672 ),
3676 ),
3673 )
3677 )
3674
3678
3675 # zlib is the historical default and doesn't need an explicit requirement.
3679 # zlib is the historical default and doesn't need an explicit requirement.
3676 if compengine == b'zstd':
3680 if compengine == b'zstd':
3677 requirements.add(b'revlog-compression-zstd')
3681 requirements.add(b'revlog-compression-zstd')
3678 elif compengine != b'zlib':
3682 elif compengine != b'zlib':
3679 requirements.add(b'exp-compression-%s' % compengine)
3683 requirements.add(b'exp-compression-%s' % compengine)
3680
3684
3681 if scmutil.gdinitconfig(ui):
3685 if scmutil.gdinitconfig(ui):
3682 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3686 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3683 if ui.configbool(b'format', b'sparse-revlog'):
3687 if ui.configbool(b'format', b'sparse-revlog'):
3684 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3688 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3685
3689
3686 # experimental config: format.use-dirstate-v2
3690 # experimental config: format.use-dirstate-v2
3687 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3691 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3688 if ui.configbool(b'format', b'use-dirstate-v2'):
3692 if ui.configbool(b'format', b'use-dirstate-v2'):
3689 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3693 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3690
3694
3691 # experimental config: format.exp-use-copies-side-data-changeset
3695 # experimental config: format.exp-use-copies-side-data-changeset
3692 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3696 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3693 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3697 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3694 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3698 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3695 if ui.configbool(b'experimental', b'treemanifest'):
3699 if ui.configbool(b'experimental', b'treemanifest'):
3696 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3700 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3697
3701
3698 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3702 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3699 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3703 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3700 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3704 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3701
3705
3702 revlogv2 = ui.config(b'experimental', b'revlogv2')
3706 revlogv2 = ui.config(b'experimental', b'revlogv2')
3703 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3707 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3704 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3708 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3705 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3709 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3706 # experimental config: format.internal-phase
3710 # experimental config: format.internal-phase
3707 if ui.configbool(b'format', b'use-internal-phase'):
3711 if ui.configbool(b'format', b'use-internal-phase'):
3708 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3712 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3709
3713
3710 # experimental config: format.exp-archived-phase
3714 # experimental config: format.exp-archived-phase
3711 if ui.configbool(b'format', b'exp-archived-phase'):
3715 if ui.configbool(b'format', b'exp-archived-phase'):
3712 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3716 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3713
3717
3714 if createopts.get(b'narrowfiles'):
3718 if createopts.get(b'narrowfiles'):
3715 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3719 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3716
3720
3717 if createopts.get(b'lfs'):
3721 if createopts.get(b'lfs'):
3718 requirements.add(b'lfs')
3722 requirements.add(b'lfs')
3719
3723
3720 if ui.configbool(b'format', b'bookmarks-in-store'):
3724 if ui.configbool(b'format', b'bookmarks-in-store'):
3721 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3725 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3722
3726
3723 if ui.configbool(b'format', b'use-persistent-nodemap'):
3727 if ui.configbool(b'format', b'use-persistent-nodemap'):
3724 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3728 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3725
3729
3726 # if share-safe is enabled, let's create the new repository with the new
3730 # if share-safe is enabled, let's create the new repository with the new
3727 # requirement
3731 # requirement
3728 if ui.configbool(b'format', b'use-share-safe'):
3732 if ui.configbool(b'format', b'use-share-safe'):
3729 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3733 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3730
3734
3731 # if we are creating a share-repoΒΉ we have to handle requirement
3735 # if we are creating a share-repoΒΉ we have to handle requirement
3732 # differently.
3736 # differently.
3733 #
3737 #
3734 # [1] (i.e. reusing the store from another repository, just having a
3738 # [1] (i.e. reusing the store from another repository, just having a
3735 # working copy)
3739 # working copy)
3736 if b'sharedrepo' in createopts:
3740 if b'sharedrepo' in createopts:
3737 source_requirements = set(createopts[b'sharedrepo'].requirements)
3741 source_requirements = set(createopts[b'sharedrepo'].requirements)
3738
3742
3739 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3743 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3740 # share to an old school repository, we have to copy the
3744 # share to an old school repository, we have to copy the
3741 # requirements and hope for the best.
3745 # requirements and hope for the best.
3742 requirements = source_requirements
3746 requirements = source_requirements
3743 else:
3747 else:
3744 # We have control on the working copy only, so "copy" the non
3748 # We have control on the working copy only, so "copy" the non
3745 # working copy part over, ignoring previous logic.
3749 # working copy part over, ignoring previous logic.
3746 to_drop = set()
3750 to_drop = set()
3747 for req in requirements:
3751 for req in requirements:
3748 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3752 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3749 continue
3753 continue
3750 if req in source_requirements:
3754 if req in source_requirements:
3751 continue
3755 continue
3752 to_drop.add(req)
3756 to_drop.add(req)
3753 requirements -= to_drop
3757 requirements -= to_drop
3754 requirements |= source_requirements
3758 requirements |= source_requirements
3755
3759
3756 if createopts.get(b'sharedrelative'):
3760 if createopts.get(b'sharedrelative'):
3757 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3761 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3758 else:
3762 else:
3759 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3763 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3760
3764
3761 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3765 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3762 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3766 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3763 msg = _(b"ignoring unknown tracked key version: %d\n")
3767 msg = _(b"ignoring unknown tracked key version: %d\n")
3764 hint = _(
3768 hint = _(
3765 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3769 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3766 )
3770 )
3767 if version != 1:
3771 if version != 1:
3768 ui.warn(msg % version, hint=hint)
3772 ui.warn(msg % version, hint=hint)
3769 else:
3773 else:
3770 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3774 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3771
3775
3772 return requirements
3776 return requirements
3773
3777
3774
3778
3775 def checkrequirementscompat(ui, requirements):
3779 def checkrequirementscompat(ui, requirements):
3776 """Checks compatibility of repository requirements enabled and disabled.
3780 """Checks compatibility of repository requirements enabled and disabled.
3777
3781
3778 Returns a set of requirements which needs to be dropped because dependend
3782 Returns a set of requirements which needs to be dropped because dependend
3779 requirements are not enabled. Also warns users about it"""
3783 requirements are not enabled. Also warns users about it"""
3780
3784
3781 dropped = set()
3785 dropped = set()
3782
3786
3783 if requirementsmod.STORE_REQUIREMENT not in requirements:
3787 if requirementsmod.STORE_REQUIREMENT not in requirements:
3784 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3788 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3785 ui.warn(
3789 ui.warn(
3786 _(
3790 _(
3787 b'ignoring enabled \'format.bookmarks-in-store\' config '
3791 b'ignoring enabled \'format.bookmarks-in-store\' config '
3788 b'beacuse it is incompatible with disabled '
3792 b'beacuse it is incompatible with disabled '
3789 b'\'format.usestore\' config\n'
3793 b'\'format.usestore\' config\n'
3790 )
3794 )
3791 )
3795 )
3792 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3796 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3793
3797
3794 if (
3798 if (
3795 requirementsmod.SHARED_REQUIREMENT in requirements
3799 requirementsmod.SHARED_REQUIREMENT in requirements
3796 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3800 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3797 ):
3801 ):
3798 raise error.Abort(
3802 raise error.Abort(
3799 _(
3803 _(
3800 b"cannot create shared repository as source was created"
3804 b"cannot create shared repository as source was created"
3801 b" with 'format.usestore' config disabled"
3805 b" with 'format.usestore' config disabled"
3802 )
3806 )
3803 )
3807 )
3804
3808
3805 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3809 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3806 if ui.hasconfig(b'format', b'use-share-safe'):
3810 if ui.hasconfig(b'format', b'use-share-safe'):
3807 msg = _(
3811 msg = _(
3808 b"ignoring enabled 'format.use-share-safe' config because "
3812 b"ignoring enabled 'format.use-share-safe' config because "
3809 b"it is incompatible with disabled 'format.usestore'"
3813 b"it is incompatible with disabled 'format.usestore'"
3810 b" config\n"
3814 b" config\n"
3811 )
3815 )
3812 ui.warn(msg)
3816 ui.warn(msg)
3813 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3817 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3814
3818
3815 return dropped
3819 return dropped
3816
3820
3817
3821
3818 def filterknowncreateopts(ui, createopts):
3822 def filterknowncreateopts(ui, createopts):
3819 """Filters a dict of repo creation options against options that are known.
3823 """Filters a dict of repo creation options against options that are known.
3820
3824
3821 Receives a dict of repo creation options and returns a dict of those
3825 Receives a dict of repo creation options and returns a dict of those
3822 options that we don't know how to handle.
3826 options that we don't know how to handle.
3823
3827
3824 This function is called as part of repository creation. If the
3828 This function is called as part of repository creation. If the
3825 returned dict contains any items, repository creation will not
3829 returned dict contains any items, repository creation will not
3826 be allowed, as it means there was a request to create a repository
3830 be allowed, as it means there was a request to create a repository
3827 with options not recognized by loaded code.
3831 with options not recognized by loaded code.
3828
3832
3829 Extensions can wrap this function to filter out creation options
3833 Extensions can wrap this function to filter out creation options
3830 they know how to handle.
3834 they know how to handle.
3831 """
3835 """
3832 known = {
3836 known = {
3833 b'backend',
3837 b'backend',
3834 b'lfs',
3838 b'lfs',
3835 b'narrowfiles',
3839 b'narrowfiles',
3836 b'sharedrepo',
3840 b'sharedrepo',
3837 b'sharedrelative',
3841 b'sharedrelative',
3838 b'shareditems',
3842 b'shareditems',
3839 b'shallowfilestore',
3843 b'shallowfilestore',
3840 }
3844 }
3841
3845
3842 return {k: v for k, v in createopts.items() if k not in known}
3846 return {k: v for k, v in createopts.items() if k not in known}
3843
3847
3844
3848
3845 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3849 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3846 """Create a new repository in a vfs.
3850 """Create a new repository in a vfs.
3847
3851
3848 ``path`` path to the new repo's working directory.
3852 ``path`` path to the new repo's working directory.
3849 ``createopts`` options for the new repository.
3853 ``createopts`` options for the new repository.
3850 ``requirement`` predefined set of requirements.
3854 ``requirement`` predefined set of requirements.
3851 (incompatible with ``createopts``)
3855 (incompatible with ``createopts``)
3852
3856
3853 The following keys for ``createopts`` are recognized:
3857 The following keys for ``createopts`` are recognized:
3854
3858
3855 backend
3859 backend
3856 The storage backend to use.
3860 The storage backend to use.
3857 lfs
3861 lfs
3858 Repository will be created with ``lfs`` requirement. The lfs extension
3862 Repository will be created with ``lfs`` requirement. The lfs extension
3859 will automatically be loaded when the repository is accessed.
3863 will automatically be loaded when the repository is accessed.
3860 narrowfiles
3864 narrowfiles
3861 Set up repository to support narrow file storage.
3865 Set up repository to support narrow file storage.
3862 sharedrepo
3866 sharedrepo
3863 Repository object from which storage should be shared.
3867 Repository object from which storage should be shared.
3864 sharedrelative
3868 sharedrelative
3865 Boolean indicating if the path to the shared repo should be
3869 Boolean indicating if the path to the shared repo should be
3866 stored as relative. By default, the pointer to the "parent" repo
3870 stored as relative. By default, the pointer to the "parent" repo
3867 is stored as an absolute path.
3871 is stored as an absolute path.
3868 shareditems
3872 shareditems
3869 Set of items to share to the new repository (in addition to storage).
3873 Set of items to share to the new repository (in addition to storage).
3870 shallowfilestore
3874 shallowfilestore
3871 Indicates that storage for files should be shallow (not all ancestor
3875 Indicates that storage for files should be shallow (not all ancestor
3872 revisions are known).
3876 revisions are known).
3873 """
3877 """
3874
3878
3875 if requirements is not None:
3879 if requirements is not None:
3876 if createopts is not None:
3880 if createopts is not None:
3877 msg = b'cannot specify both createopts and requirements'
3881 msg = b'cannot specify both createopts and requirements'
3878 raise error.ProgrammingError(msg)
3882 raise error.ProgrammingError(msg)
3879 createopts = {}
3883 createopts = {}
3880 else:
3884 else:
3881 createopts = defaultcreateopts(ui, createopts=createopts)
3885 createopts = defaultcreateopts(ui, createopts=createopts)
3882
3886
3883 unknownopts = filterknowncreateopts(ui, createopts)
3887 unknownopts = filterknowncreateopts(ui, createopts)
3884
3888
3885 if not isinstance(unknownopts, dict):
3889 if not isinstance(unknownopts, dict):
3886 raise error.ProgrammingError(
3890 raise error.ProgrammingError(
3887 b'filterknowncreateopts() did not return a dict'
3891 b'filterknowncreateopts() did not return a dict'
3888 )
3892 )
3889
3893
3890 if unknownopts:
3894 if unknownopts:
3891 raise error.Abort(
3895 raise error.Abort(
3892 _(
3896 _(
3893 b'unable to create repository because of unknown '
3897 b'unable to create repository because of unknown '
3894 b'creation option: %s'
3898 b'creation option: %s'
3895 )
3899 )
3896 % b', '.join(sorted(unknownopts)),
3900 % b', '.join(sorted(unknownopts)),
3897 hint=_(b'is a required extension not loaded?'),
3901 hint=_(b'is a required extension not loaded?'),
3898 )
3902 )
3899
3903
3900 requirements = newreporequirements(ui, createopts=createopts)
3904 requirements = newreporequirements(ui, createopts=createopts)
3901 requirements -= checkrequirementscompat(ui, requirements)
3905 requirements -= checkrequirementscompat(ui, requirements)
3902
3906
3903 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3907 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3904
3908
3905 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3909 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3906 if hgvfs.exists():
3910 if hgvfs.exists():
3907 raise error.RepoError(_(b'repository %s already exists') % path)
3911 raise error.RepoError(_(b'repository %s already exists') % path)
3908
3912
3909 if b'sharedrepo' in createopts:
3913 if b'sharedrepo' in createopts:
3910 sharedpath = createopts[b'sharedrepo'].sharedpath
3914 sharedpath = createopts[b'sharedrepo'].sharedpath
3911
3915
3912 if createopts.get(b'sharedrelative'):
3916 if createopts.get(b'sharedrelative'):
3913 try:
3917 try:
3914 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3918 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3915 sharedpath = util.pconvert(sharedpath)
3919 sharedpath = util.pconvert(sharedpath)
3916 except (IOError, ValueError) as e:
3920 except (IOError, ValueError) as e:
3917 # ValueError is raised on Windows if the drive letters differ
3921 # ValueError is raised on Windows if the drive letters differ
3918 # on each path.
3922 # on each path.
3919 raise error.Abort(
3923 raise error.Abort(
3920 _(b'cannot calculate relative path'),
3924 _(b'cannot calculate relative path'),
3921 hint=stringutil.forcebytestr(e),
3925 hint=stringutil.forcebytestr(e),
3922 )
3926 )
3923
3927
3924 if not wdirvfs.exists():
3928 if not wdirvfs.exists():
3925 wdirvfs.makedirs()
3929 wdirvfs.makedirs()
3926
3930
3927 hgvfs.makedir(notindexed=True)
3931 hgvfs.makedir(notindexed=True)
3928 if b'sharedrepo' not in createopts:
3932 if b'sharedrepo' not in createopts:
3929 hgvfs.mkdir(b'cache')
3933 hgvfs.mkdir(b'cache')
3930 hgvfs.mkdir(b'wcache')
3934 hgvfs.mkdir(b'wcache')
3931
3935
3932 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3936 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3933 if has_store and b'sharedrepo' not in createopts:
3937 if has_store and b'sharedrepo' not in createopts:
3934 hgvfs.mkdir(b'store')
3938 hgvfs.mkdir(b'store')
3935
3939
3936 # We create an invalid changelog outside the store so very old
3940 # We create an invalid changelog outside the store so very old
3937 # Mercurial versions (which didn't know about the requirements
3941 # Mercurial versions (which didn't know about the requirements
3938 # file) encounter an error on reading the changelog. This
3942 # file) encounter an error on reading the changelog. This
3939 # effectively locks out old clients and prevents them from
3943 # effectively locks out old clients and prevents them from
3940 # mucking with a repo in an unknown format.
3944 # mucking with a repo in an unknown format.
3941 #
3945 #
3942 # The revlog header has version 65535, which won't be recognized by
3946 # The revlog header has version 65535, which won't be recognized by
3943 # such old clients.
3947 # such old clients.
3944 hgvfs.append(
3948 hgvfs.append(
3945 b'00changelog.i',
3949 b'00changelog.i',
3946 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3950 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3947 b'layout',
3951 b'layout',
3948 )
3952 )
3949
3953
3950 # Filter the requirements into working copy and store ones
3954 # Filter the requirements into working copy and store ones
3951 wcreq, storereq = scmutil.filterrequirements(requirements)
3955 wcreq, storereq = scmutil.filterrequirements(requirements)
3952 # write working copy ones
3956 # write working copy ones
3953 scmutil.writerequires(hgvfs, wcreq)
3957 scmutil.writerequires(hgvfs, wcreq)
3954 # If there are store requirements and the current repository
3958 # If there are store requirements and the current repository
3955 # is not a shared one, write stored requirements
3959 # is not a shared one, write stored requirements
3956 # For new shared repository, we don't need to write the store
3960 # For new shared repository, we don't need to write the store
3957 # requirements as they are already present in store requires
3961 # requirements as they are already present in store requires
3958 if storereq and b'sharedrepo' not in createopts:
3962 if storereq and b'sharedrepo' not in createopts:
3959 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3963 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3960 scmutil.writerequires(storevfs, storereq)
3964 scmutil.writerequires(storevfs, storereq)
3961
3965
3962 # Write out file telling readers where to find the shared store.
3966 # Write out file telling readers where to find the shared store.
3963 if b'sharedrepo' in createopts:
3967 if b'sharedrepo' in createopts:
3964 hgvfs.write(b'sharedpath', sharedpath)
3968 hgvfs.write(b'sharedpath', sharedpath)
3965
3969
3966 if createopts.get(b'shareditems'):
3970 if createopts.get(b'shareditems'):
3967 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3971 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3968 hgvfs.write(b'shared', shared)
3972 hgvfs.write(b'shared', shared)
3969
3973
3970
3974
3971 def poisonrepository(repo):
3975 def poisonrepository(repo):
3972 """Poison a repository instance so it can no longer be used."""
3976 """Poison a repository instance so it can no longer be used."""
3973 # Perform any cleanup on the instance.
3977 # Perform any cleanup on the instance.
3974 repo.close()
3978 repo.close()
3975
3979
3976 # Our strategy is to replace the type of the object with one that
3980 # Our strategy is to replace the type of the object with one that
3977 # has all attribute lookups result in error.
3981 # has all attribute lookups result in error.
3978 #
3982 #
3979 # But we have to allow the close() method because some constructors
3983 # But we have to allow the close() method because some constructors
3980 # of repos call close() on repo references.
3984 # of repos call close() on repo references.
3981 class poisonedrepository:
3985 class poisonedrepository:
3982 def __getattribute__(self, item):
3986 def __getattribute__(self, item):
3983 if item == 'close':
3987 if item == 'close':
3984 return object.__getattribute__(self, item)
3988 return object.__getattribute__(self, item)
3985
3989
3986 raise error.ProgrammingError(
3990 raise error.ProgrammingError(
3987 b'repo instances should not be used after unshare'
3991 b'repo instances should not be used after unshare'
3988 )
3992 )
3989
3993
3990 def close(self):
3994 def close(self):
3991 pass
3995 pass
3992
3996
3993 # We may have a repoview, which intercepts __setattr__. So be sure
3997 # We may have a repoview, which intercepts __setattr__. So be sure
3994 # we operate at the lowest level possible.
3998 # we operate at the lowest level possible.
3995 object.__setattr__(repo, '__class__', poisonedrepository)
3999 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now