##// END OF EJS Templates
sidedata: add a way of replacing an existing sidedata computer...
Raphaël Gomès -
r47846:81eb7091 default
parent child Browse files
Show More
@@ -1,2028 +1,2030 b''
1 # repository.py - Interfaces and base classes for repositories and peers.
1 # repository.py - Interfaces and base classes for repositories and peers.
2 #
2 #
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from ..i18n import _
10 from ..i18n import _
11 from .. import error
11 from .. import error
12 from . import util as interfaceutil
12 from . import util as interfaceutil
13
13
14 # Local repository feature string.
14 # Local repository feature string.
15
15
16 # Revlogs are being used for file storage.
16 # Revlogs are being used for file storage.
17 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
17 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
18 # The storage part of the repository is shared from an external source.
18 # The storage part of the repository is shared from an external source.
19 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
19 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
20 # LFS supported for backing file storage.
20 # LFS supported for backing file storage.
21 REPO_FEATURE_LFS = b'lfs'
21 REPO_FEATURE_LFS = b'lfs'
22 # Repository supports being stream cloned.
22 # Repository supports being stream cloned.
23 REPO_FEATURE_STREAM_CLONE = b'streamclone'
23 REPO_FEATURE_STREAM_CLONE = b'streamclone'
24 # Files storage may lack data for all ancestors.
24 # Files storage may lack data for all ancestors.
25 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
25 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
26
26
27 REVISION_FLAG_CENSORED = 1 << 15
27 REVISION_FLAG_CENSORED = 1 << 15
28 REVISION_FLAG_ELLIPSIS = 1 << 14
28 REVISION_FLAG_ELLIPSIS = 1 << 14
29 REVISION_FLAG_EXTSTORED = 1 << 13
29 REVISION_FLAG_EXTSTORED = 1 << 13
30 REVISION_FLAG_HASCOPIESINFO = 1 << 12
30 REVISION_FLAG_HASCOPIESINFO = 1 << 12
31
31
32 REVISION_FLAGS_KNOWN = (
32 REVISION_FLAGS_KNOWN = (
33 REVISION_FLAG_CENSORED
33 REVISION_FLAG_CENSORED
34 | REVISION_FLAG_ELLIPSIS
34 | REVISION_FLAG_ELLIPSIS
35 | REVISION_FLAG_EXTSTORED
35 | REVISION_FLAG_EXTSTORED
36 | REVISION_FLAG_HASCOPIESINFO
36 | REVISION_FLAG_HASCOPIESINFO
37 )
37 )
38
38
39 CG_DELTAMODE_STD = b'default'
39 CG_DELTAMODE_STD = b'default'
40 CG_DELTAMODE_PREV = b'previous'
40 CG_DELTAMODE_PREV = b'previous'
41 CG_DELTAMODE_FULL = b'fulltext'
41 CG_DELTAMODE_FULL = b'fulltext'
42 CG_DELTAMODE_P1 = b'p1'
42 CG_DELTAMODE_P1 = b'p1'
43
43
44
44
45 class ipeerconnection(interfaceutil.Interface):
45 class ipeerconnection(interfaceutil.Interface):
46 """Represents a "connection" to a repository.
46 """Represents a "connection" to a repository.
47
47
48 This is the base interface for representing a connection to a repository.
48 This is the base interface for representing a connection to a repository.
49 It holds basic properties and methods applicable to all peer types.
49 It holds basic properties and methods applicable to all peer types.
50
50
51 This is not a complete interface definition and should not be used
51 This is not a complete interface definition and should not be used
52 outside of this module.
52 outside of this module.
53 """
53 """
54
54
55 ui = interfaceutil.Attribute("""ui.ui instance""")
55 ui = interfaceutil.Attribute("""ui.ui instance""")
56
56
57 def url():
57 def url():
58 """Returns a URL string representing this peer.
58 """Returns a URL string representing this peer.
59
59
60 Currently, implementations expose the raw URL used to construct the
60 Currently, implementations expose the raw URL used to construct the
61 instance. It may contain credentials as part of the URL. The
61 instance. It may contain credentials as part of the URL. The
62 expectations of the value aren't well-defined and this could lead to
62 expectations of the value aren't well-defined and this could lead to
63 data leakage.
63 data leakage.
64
64
65 TODO audit/clean consumers and more clearly define the contents of this
65 TODO audit/clean consumers and more clearly define the contents of this
66 value.
66 value.
67 """
67 """
68
68
69 def local():
69 def local():
70 """Returns a local repository instance.
70 """Returns a local repository instance.
71
71
72 If the peer represents a local repository, returns an object that
72 If the peer represents a local repository, returns an object that
73 can be used to interface with it. Otherwise returns ``None``.
73 can be used to interface with it. Otherwise returns ``None``.
74 """
74 """
75
75
76 def peer():
76 def peer():
77 """Returns an object conforming to this interface.
77 """Returns an object conforming to this interface.
78
78
79 Most implementations will ``return self``.
79 Most implementations will ``return self``.
80 """
80 """
81
81
82 def canpush():
82 def canpush():
83 """Returns a boolean indicating if this peer can be pushed to."""
83 """Returns a boolean indicating if this peer can be pushed to."""
84
84
85 def close():
85 def close():
86 """Close the connection to this peer.
86 """Close the connection to this peer.
87
87
88 This is called when the peer will no longer be used. Resources
88 This is called when the peer will no longer be used. Resources
89 associated with the peer should be cleaned up.
89 associated with the peer should be cleaned up.
90 """
90 """
91
91
92
92
93 class ipeercapabilities(interfaceutil.Interface):
93 class ipeercapabilities(interfaceutil.Interface):
94 """Peer sub-interface related to capabilities."""
94 """Peer sub-interface related to capabilities."""
95
95
96 def capable(name):
96 def capable(name):
97 """Determine support for a named capability.
97 """Determine support for a named capability.
98
98
99 Returns ``False`` if capability not supported.
99 Returns ``False`` if capability not supported.
100
100
101 Returns ``True`` if boolean capability is supported. Returns a string
101 Returns ``True`` if boolean capability is supported. Returns a string
102 if capability support is non-boolean.
102 if capability support is non-boolean.
103
103
104 Capability strings may or may not map to wire protocol capabilities.
104 Capability strings may or may not map to wire protocol capabilities.
105 """
105 """
106
106
107 def requirecap(name, purpose):
107 def requirecap(name, purpose):
108 """Require a capability to be present.
108 """Require a capability to be present.
109
109
110 Raises a ``CapabilityError`` if the capability isn't present.
110 Raises a ``CapabilityError`` if the capability isn't present.
111 """
111 """
112
112
113
113
114 class ipeercommands(interfaceutil.Interface):
114 class ipeercommands(interfaceutil.Interface):
115 """Client-side interface for communicating over the wire protocol.
115 """Client-side interface for communicating over the wire protocol.
116
116
117 This interface is used as a gateway to the Mercurial wire protocol.
117 This interface is used as a gateway to the Mercurial wire protocol.
118 methods commonly call wire protocol commands of the same name.
118 methods commonly call wire protocol commands of the same name.
119 """
119 """
120
120
121 def branchmap():
121 def branchmap():
122 """Obtain heads in named branches.
122 """Obtain heads in named branches.
123
123
124 Returns a dict mapping branch name to an iterable of nodes that are
124 Returns a dict mapping branch name to an iterable of nodes that are
125 heads on that branch.
125 heads on that branch.
126 """
126 """
127
127
128 def capabilities():
128 def capabilities():
129 """Obtain capabilities of the peer.
129 """Obtain capabilities of the peer.
130
130
131 Returns a set of string capabilities.
131 Returns a set of string capabilities.
132 """
132 """
133
133
134 def clonebundles():
134 def clonebundles():
135 """Obtains the clone bundles manifest for the repo.
135 """Obtains the clone bundles manifest for the repo.
136
136
137 Returns the manifest as unparsed bytes.
137 Returns the manifest as unparsed bytes.
138 """
138 """
139
139
140 def debugwireargs(one, two, three=None, four=None, five=None):
140 def debugwireargs(one, two, three=None, four=None, five=None):
141 """Used to facilitate debugging of arguments passed over the wire."""
141 """Used to facilitate debugging of arguments passed over the wire."""
142
142
143 def getbundle(source, **kwargs):
143 def getbundle(source, **kwargs):
144 """Obtain remote repository data as a bundle.
144 """Obtain remote repository data as a bundle.
145
145
146 This command is how the bulk of repository data is transferred from
146 This command is how the bulk of repository data is transferred from
147 the peer to the local repository
147 the peer to the local repository
148
148
149 Returns a generator of bundle data.
149 Returns a generator of bundle data.
150 """
150 """
151
151
152 def heads():
152 def heads():
153 """Determine all known head revisions in the peer.
153 """Determine all known head revisions in the peer.
154
154
155 Returns an iterable of binary nodes.
155 Returns an iterable of binary nodes.
156 """
156 """
157
157
158 def known(nodes):
158 def known(nodes):
159 """Determine whether multiple nodes are known.
159 """Determine whether multiple nodes are known.
160
160
161 Accepts an iterable of nodes whose presence to check for.
161 Accepts an iterable of nodes whose presence to check for.
162
162
163 Returns an iterable of booleans indicating of the corresponding node
163 Returns an iterable of booleans indicating of the corresponding node
164 at that index is known to the peer.
164 at that index is known to the peer.
165 """
165 """
166
166
167 def listkeys(namespace):
167 def listkeys(namespace):
168 """Obtain all keys in a pushkey namespace.
168 """Obtain all keys in a pushkey namespace.
169
169
170 Returns an iterable of key names.
170 Returns an iterable of key names.
171 """
171 """
172
172
173 def lookup(key):
173 def lookup(key):
174 """Resolve a value to a known revision.
174 """Resolve a value to a known revision.
175
175
176 Returns a binary node of the resolved revision on success.
176 Returns a binary node of the resolved revision on success.
177 """
177 """
178
178
179 def pushkey(namespace, key, old, new):
179 def pushkey(namespace, key, old, new):
180 """Set a value using the ``pushkey`` protocol.
180 """Set a value using the ``pushkey`` protocol.
181
181
182 Arguments correspond to the pushkey namespace and key to operate on and
182 Arguments correspond to the pushkey namespace and key to operate on and
183 the old and new values for that key.
183 the old and new values for that key.
184
184
185 Returns a string with the peer result. The value inside varies by the
185 Returns a string with the peer result. The value inside varies by the
186 namespace.
186 namespace.
187 """
187 """
188
188
189 def stream_out():
189 def stream_out():
190 """Obtain streaming clone data.
190 """Obtain streaming clone data.
191
191
192 Successful result should be a generator of data chunks.
192 Successful result should be a generator of data chunks.
193 """
193 """
194
194
195 def unbundle(bundle, heads, url):
195 def unbundle(bundle, heads, url):
196 """Transfer repository data to the peer.
196 """Transfer repository data to the peer.
197
197
198 This is how the bulk of data during a push is transferred.
198 This is how the bulk of data during a push is transferred.
199
199
200 Returns the integer number of heads added to the peer.
200 Returns the integer number of heads added to the peer.
201 """
201 """
202
202
203
203
204 class ipeerlegacycommands(interfaceutil.Interface):
204 class ipeerlegacycommands(interfaceutil.Interface):
205 """Interface for implementing support for legacy wire protocol commands.
205 """Interface for implementing support for legacy wire protocol commands.
206
206
207 Wire protocol commands transition to legacy status when they are no longer
207 Wire protocol commands transition to legacy status when they are no longer
208 used by modern clients. To facilitate identifying which commands are
208 used by modern clients. To facilitate identifying which commands are
209 legacy, the interfaces are split.
209 legacy, the interfaces are split.
210 """
210 """
211
211
212 def between(pairs):
212 def between(pairs):
213 """Obtain nodes between pairs of nodes.
213 """Obtain nodes between pairs of nodes.
214
214
215 ``pairs`` is an iterable of node pairs.
215 ``pairs`` is an iterable of node pairs.
216
216
217 Returns an iterable of iterables of nodes corresponding to each
217 Returns an iterable of iterables of nodes corresponding to each
218 requested pair.
218 requested pair.
219 """
219 """
220
220
221 def branches(nodes):
221 def branches(nodes):
222 """Obtain ancestor changesets of specific nodes back to a branch point.
222 """Obtain ancestor changesets of specific nodes back to a branch point.
223
223
224 For each requested node, the peer finds the first ancestor node that is
224 For each requested node, the peer finds the first ancestor node that is
225 a DAG root or is a merge.
225 a DAG root or is a merge.
226
226
227 Returns an iterable of iterables with the resolved values for each node.
227 Returns an iterable of iterables with the resolved values for each node.
228 """
228 """
229
229
230 def changegroup(nodes, source):
230 def changegroup(nodes, source):
231 """Obtain a changegroup with data for descendants of specified nodes."""
231 """Obtain a changegroup with data for descendants of specified nodes."""
232
232
233 def changegroupsubset(bases, heads, source):
233 def changegroupsubset(bases, heads, source):
234 pass
234 pass
235
235
236
236
237 class ipeercommandexecutor(interfaceutil.Interface):
237 class ipeercommandexecutor(interfaceutil.Interface):
238 """Represents a mechanism to execute remote commands.
238 """Represents a mechanism to execute remote commands.
239
239
240 This is the primary interface for requesting that wire protocol commands
240 This is the primary interface for requesting that wire protocol commands
241 be executed. Instances of this interface are active in a context manager
241 be executed. Instances of this interface are active in a context manager
242 and have a well-defined lifetime. When the context manager exits, all
242 and have a well-defined lifetime. When the context manager exits, all
243 outstanding requests are waited on.
243 outstanding requests are waited on.
244 """
244 """
245
245
246 def callcommand(name, args):
246 def callcommand(name, args):
247 """Request that a named command be executed.
247 """Request that a named command be executed.
248
248
249 Receives the command name and a dictionary of command arguments.
249 Receives the command name and a dictionary of command arguments.
250
250
251 Returns a ``concurrent.futures.Future`` that will resolve to the
251 Returns a ``concurrent.futures.Future`` that will resolve to the
252 result of that command request. That exact value is left up to
252 result of that command request. That exact value is left up to
253 the implementation and possibly varies by command.
253 the implementation and possibly varies by command.
254
254
255 Not all commands can coexist with other commands in an executor
255 Not all commands can coexist with other commands in an executor
256 instance: it depends on the underlying wire protocol transport being
256 instance: it depends on the underlying wire protocol transport being
257 used and the command itself.
257 used and the command itself.
258
258
259 Implementations MAY call ``sendcommands()`` automatically if the
259 Implementations MAY call ``sendcommands()`` automatically if the
260 requested command can not coexist with other commands in this executor.
260 requested command can not coexist with other commands in this executor.
261
261
262 Implementations MAY call ``sendcommands()`` automatically when the
262 Implementations MAY call ``sendcommands()`` automatically when the
263 future's ``result()`` is called. So, consumers using multiple
263 future's ``result()`` is called. So, consumers using multiple
264 commands with an executor MUST ensure that ``result()`` is not called
264 commands with an executor MUST ensure that ``result()`` is not called
265 until all command requests have been issued.
265 until all command requests have been issued.
266 """
266 """
267
267
268 def sendcommands():
268 def sendcommands():
269 """Trigger submission of queued command requests.
269 """Trigger submission of queued command requests.
270
270
271 Not all transports submit commands as soon as they are requested to
271 Not all transports submit commands as soon as they are requested to
272 run. When called, this method forces queued command requests to be
272 run. When called, this method forces queued command requests to be
273 issued. It will no-op if all commands have already been sent.
273 issued. It will no-op if all commands have already been sent.
274
274
275 When called, no more new commands may be issued with this executor.
275 When called, no more new commands may be issued with this executor.
276 """
276 """
277
277
278 def close():
278 def close():
279 """Signal that this command request is finished.
279 """Signal that this command request is finished.
280
280
281 When called, no more new commands may be issued. All outstanding
281 When called, no more new commands may be issued. All outstanding
282 commands that have previously been issued are waited on before
282 commands that have previously been issued are waited on before
283 returning. This not only includes waiting for the futures to resolve,
283 returning. This not only includes waiting for the futures to resolve,
284 but also waiting for all response data to arrive. In other words,
284 but also waiting for all response data to arrive. In other words,
285 calling this waits for all on-wire state for issued command requests
285 calling this waits for all on-wire state for issued command requests
286 to finish.
286 to finish.
287
287
288 When used as a context manager, this method is called when exiting the
288 When used as a context manager, this method is called when exiting the
289 context manager.
289 context manager.
290
290
291 This method may call ``sendcommands()`` if there are buffered commands.
291 This method may call ``sendcommands()`` if there are buffered commands.
292 """
292 """
293
293
294
294
295 class ipeerrequests(interfaceutil.Interface):
295 class ipeerrequests(interfaceutil.Interface):
296 """Interface for executing commands on a peer."""
296 """Interface for executing commands on a peer."""
297
297
298 limitedarguments = interfaceutil.Attribute(
298 limitedarguments = interfaceutil.Attribute(
299 """True if the peer cannot receive large argument value for commands."""
299 """True if the peer cannot receive large argument value for commands."""
300 )
300 )
301
301
302 def commandexecutor():
302 def commandexecutor():
303 """A context manager that resolves to an ipeercommandexecutor.
303 """A context manager that resolves to an ipeercommandexecutor.
304
304
305 The object this resolves to can be used to issue command requests
305 The object this resolves to can be used to issue command requests
306 to the peer.
306 to the peer.
307
307
308 Callers should call its ``callcommand`` method to issue command
308 Callers should call its ``callcommand`` method to issue command
309 requests.
309 requests.
310
310
311 A new executor should be obtained for each distinct set of commands
311 A new executor should be obtained for each distinct set of commands
312 (possibly just a single command) that the consumer wants to execute
312 (possibly just a single command) that the consumer wants to execute
313 as part of a single operation or round trip. This is because some
313 as part of a single operation or round trip. This is because some
314 peers are half-duplex and/or don't support persistent connections.
314 peers are half-duplex and/or don't support persistent connections.
315 e.g. in the case of HTTP peers, commands sent to an executor represent
315 e.g. in the case of HTTP peers, commands sent to an executor represent
316 a single HTTP request. While some peers may support multiple command
316 a single HTTP request. While some peers may support multiple command
317 sends over the wire per executor, consumers need to code to the least
317 sends over the wire per executor, consumers need to code to the least
318 capable peer. So it should be assumed that command executors buffer
318 capable peer. So it should be assumed that command executors buffer
319 called commands until they are told to send them and that each
319 called commands until they are told to send them and that each
320 command executor could result in a new connection or wire-level request
320 command executor could result in a new connection or wire-level request
321 being issued.
321 being issued.
322 """
322 """
323
323
324
324
325 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
325 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
326 """Unified interface for peer repositories.
326 """Unified interface for peer repositories.
327
327
328 All peer instances must conform to this interface.
328 All peer instances must conform to this interface.
329 """
329 """
330
330
331
331
332 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
332 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
333 """Unified peer interface for wire protocol version 2 peers."""
333 """Unified peer interface for wire protocol version 2 peers."""
334
334
335 apidescriptor = interfaceutil.Attribute(
335 apidescriptor = interfaceutil.Attribute(
336 """Data structure holding description of server API."""
336 """Data structure holding description of server API."""
337 )
337 )
338
338
339
339
340 @interfaceutil.implementer(ipeerbase)
340 @interfaceutil.implementer(ipeerbase)
341 class peer(object):
341 class peer(object):
342 """Base class for peer repositories."""
342 """Base class for peer repositories."""
343
343
344 limitedarguments = False
344 limitedarguments = False
345
345
346 def capable(self, name):
346 def capable(self, name):
347 caps = self.capabilities()
347 caps = self.capabilities()
348 if name in caps:
348 if name in caps:
349 return True
349 return True
350
350
351 name = b'%s=' % name
351 name = b'%s=' % name
352 for cap in caps:
352 for cap in caps:
353 if cap.startswith(name):
353 if cap.startswith(name):
354 return cap[len(name) :]
354 return cap[len(name) :]
355
355
356 return False
356 return False
357
357
358 def requirecap(self, name, purpose):
358 def requirecap(self, name, purpose):
359 if self.capable(name):
359 if self.capable(name):
360 return
360 return
361
361
362 raise error.CapabilityError(
362 raise error.CapabilityError(
363 _(
363 _(
364 b'cannot %s; remote repository does not support the '
364 b'cannot %s; remote repository does not support the '
365 b'\'%s\' capability'
365 b'\'%s\' capability'
366 )
366 )
367 % (purpose, name)
367 % (purpose, name)
368 )
368 )
369
369
370
370
371 class iverifyproblem(interfaceutil.Interface):
371 class iverifyproblem(interfaceutil.Interface):
372 """Represents a problem with the integrity of the repository.
372 """Represents a problem with the integrity of the repository.
373
373
374 Instances of this interface are emitted to describe an integrity issue
374 Instances of this interface are emitted to describe an integrity issue
375 with a repository (e.g. corrupt storage, missing data, etc).
375 with a repository (e.g. corrupt storage, missing data, etc).
376
376
377 Instances are essentially messages associated with severity.
377 Instances are essentially messages associated with severity.
378 """
378 """
379
379
380 warning = interfaceutil.Attribute(
380 warning = interfaceutil.Attribute(
381 """Message indicating a non-fatal problem."""
381 """Message indicating a non-fatal problem."""
382 )
382 )
383
383
384 error = interfaceutil.Attribute("""Message indicating a fatal problem.""")
384 error = interfaceutil.Attribute("""Message indicating a fatal problem.""")
385
385
386 node = interfaceutil.Attribute(
386 node = interfaceutil.Attribute(
387 """Revision encountering the problem.
387 """Revision encountering the problem.
388
388
389 ``None`` means the problem doesn't apply to a single revision.
389 ``None`` means the problem doesn't apply to a single revision.
390 """
390 """
391 )
391 )
392
392
393
393
394 class irevisiondelta(interfaceutil.Interface):
394 class irevisiondelta(interfaceutil.Interface):
395 """Represents a delta between one revision and another.
395 """Represents a delta between one revision and another.
396
396
397 Instances convey enough information to allow a revision to be exchanged
397 Instances convey enough information to allow a revision to be exchanged
398 with another repository.
398 with another repository.
399
399
400 Instances represent the fulltext revision data or a delta against
400 Instances represent the fulltext revision data or a delta against
401 another revision. Therefore the ``revision`` and ``delta`` attributes
401 another revision. Therefore the ``revision`` and ``delta`` attributes
402 are mutually exclusive.
402 are mutually exclusive.
403
403
404 Typically used for changegroup generation.
404 Typically used for changegroup generation.
405 """
405 """
406
406
407 node = interfaceutil.Attribute("""20 byte node of this revision.""")
407 node = interfaceutil.Attribute("""20 byte node of this revision.""")
408
408
409 p1node = interfaceutil.Attribute(
409 p1node = interfaceutil.Attribute(
410 """20 byte node of 1st parent of this revision."""
410 """20 byte node of 1st parent of this revision."""
411 )
411 )
412
412
413 p2node = interfaceutil.Attribute(
413 p2node = interfaceutil.Attribute(
414 """20 byte node of 2nd parent of this revision."""
414 """20 byte node of 2nd parent of this revision."""
415 )
415 )
416
416
417 linknode = interfaceutil.Attribute(
417 linknode = interfaceutil.Attribute(
418 """20 byte node of the changelog revision this node is linked to."""
418 """20 byte node of the changelog revision this node is linked to."""
419 )
419 )
420
420
421 flags = interfaceutil.Attribute(
421 flags = interfaceutil.Attribute(
422 """2 bytes of integer flags that apply to this revision.
422 """2 bytes of integer flags that apply to this revision.
423
423
424 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
424 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
425 """
425 """
426 )
426 )
427
427
428 basenode = interfaceutil.Attribute(
428 basenode = interfaceutil.Attribute(
429 """20 byte node of the revision this data is a delta against.
429 """20 byte node of the revision this data is a delta against.
430
430
431 ``nullid`` indicates that the revision is a full revision and not
431 ``nullid`` indicates that the revision is a full revision and not
432 a delta.
432 a delta.
433 """
433 """
434 )
434 )
435
435
436 baserevisionsize = interfaceutil.Attribute(
436 baserevisionsize = interfaceutil.Attribute(
437 """Size of base revision this delta is against.
437 """Size of base revision this delta is against.
438
438
439 May be ``None`` if ``basenode`` is ``nullid``.
439 May be ``None`` if ``basenode`` is ``nullid``.
440 """
440 """
441 )
441 )
442
442
443 revision = interfaceutil.Attribute(
443 revision = interfaceutil.Attribute(
444 """Raw fulltext of revision data for this node."""
444 """Raw fulltext of revision data for this node."""
445 )
445 )
446
446
447 delta = interfaceutil.Attribute(
447 delta = interfaceutil.Attribute(
448 """Delta between ``basenode`` and ``node``.
448 """Delta between ``basenode`` and ``node``.
449
449
450 Stored in the bdiff delta format.
450 Stored in the bdiff delta format.
451 """
451 """
452 )
452 )
453
453
454 sidedata = interfaceutil.Attribute(
454 sidedata = interfaceutil.Attribute(
455 """Raw sidedata bytes for the given revision."""
455 """Raw sidedata bytes for the given revision."""
456 )
456 )
457
457
458 protocol_flags = interfaceutil.Attribute(
458 protocol_flags = interfaceutil.Attribute(
459 """Single byte of integer flags that can influence the protocol.
459 """Single byte of integer flags that can influence the protocol.
460
460
461 This is a bitwise composition of the ``storageutil.CG_FLAG*`` constants.
461 This is a bitwise composition of the ``storageutil.CG_FLAG*`` constants.
462 """
462 """
463 )
463 )
464
464
465
465
466 class ifilerevisionssequence(interfaceutil.Interface):
466 class ifilerevisionssequence(interfaceutil.Interface):
467 """Contains index data for all revisions of a file.
467 """Contains index data for all revisions of a file.
468
468
469 Types implementing this behave like lists of tuples. The index
469 Types implementing this behave like lists of tuples. The index
470 in the list corresponds to the revision number. The values contain
470 in the list corresponds to the revision number. The values contain
471 index metadata.
471 index metadata.
472
472
473 The *null* revision (revision number -1) is always the last item
473 The *null* revision (revision number -1) is always the last item
474 in the index.
474 in the index.
475 """
475 """
476
476
477 def __len__():
477 def __len__():
478 """The total number of revisions."""
478 """The total number of revisions."""
479
479
480 def __getitem__(rev):
480 def __getitem__(rev):
481 """Returns the object having a specific revision number.
481 """Returns the object having a specific revision number.
482
482
483 Returns an 8-tuple with the following fields:
483 Returns an 8-tuple with the following fields:
484
484
485 offset+flags
485 offset+flags
486 Contains the offset and flags for the revision. 64-bit unsigned
486 Contains the offset and flags for the revision. 64-bit unsigned
487 integer where first 6 bytes are the offset and the next 2 bytes
487 integer where first 6 bytes are the offset and the next 2 bytes
488 are flags. The offset can be 0 if it is not used by the store.
488 are flags. The offset can be 0 if it is not used by the store.
489 compressed size
489 compressed size
490 Size of the revision data in the store. It can be 0 if it isn't
490 Size of the revision data in the store. It can be 0 if it isn't
491 needed by the store.
491 needed by the store.
492 uncompressed size
492 uncompressed size
493 Fulltext size. It can be 0 if it isn't needed by the store.
493 Fulltext size. It can be 0 if it isn't needed by the store.
494 base revision
494 base revision
495 Revision number of revision the delta for storage is encoded
495 Revision number of revision the delta for storage is encoded
496 against. -1 indicates not encoded against a base revision.
496 against. -1 indicates not encoded against a base revision.
497 link revision
497 link revision
498 Revision number of changelog revision this entry is related to.
498 Revision number of changelog revision this entry is related to.
499 p1 revision
499 p1 revision
500 Revision number of 1st parent. -1 if no 1st parent.
500 Revision number of 1st parent. -1 if no 1st parent.
501 p2 revision
501 p2 revision
502 Revision number of 2nd parent. -1 if no 1st parent.
502 Revision number of 2nd parent. -1 if no 1st parent.
503 node
503 node
504 Binary node value for this revision number.
504 Binary node value for this revision number.
505
505
506 Negative values should index off the end of the sequence. ``-1``
506 Negative values should index off the end of the sequence. ``-1``
507 should return the null revision. ``-2`` should return the most
507 should return the null revision. ``-2`` should return the most
508 recent revision.
508 recent revision.
509 """
509 """
510
510
511 def __contains__(rev):
511 def __contains__(rev):
512 """Whether a revision number exists."""
512 """Whether a revision number exists."""
513
513
514 def insert(self, i, entry):
514 def insert(self, i, entry):
515 """Add an item to the index at specific revision."""
515 """Add an item to the index at specific revision."""
516
516
517
517
518 class ifileindex(interfaceutil.Interface):
518 class ifileindex(interfaceutil.Interface):
519 """Storage interface for index data of a single file.
519 """Storage interface for index data of a single file.
520
520
521 File storage data is divided into index metadata and data storage.
521 File storage data is divided into index metadata and data storage.
522 This interface defines the index portion of the interface.
522 This interface defines the index portion of the interface.
523
523
524 The index logically consists of:
524 The index logically consists of:
525
525
526 * A mapping between revision numbers and nodes.
526 * A mapping between revision numbers and nodes.
527 * DAG data (storing and querying the relationship between nodes).
527 * DAG data (storing and querying the relationship between nodes).
528 * Metadata to facilitate storage.
528 * Metadata to facilitate storage.
529 """
529 """
530
530
531 nullid = interfaceutil.Attribute(
531 nullid = interfaceutil.Attribute(
532 """node for the null revision for use as delta base."""
532 """node for the null revision for use as delta base."""
533 )
533 )
534
534
535 def __len__():
535 def __len__():
536 """Obtain the number of revisions stored for this file."""
536 """Obtain the number of revisions stored for this file."""
537
537
538 def __iter__():
538 def __iter__():
539 """Iterate over revision numbers for this file."""
539 """Iterate over revision numbers for this file."""
540
540
541 def hasnode(node):
541 def hasnode(node):
542 """Returns a bool indicating if a node is known to this store.
542 """Returns a bool indicating if a node is known to this store.
543
543
544 Implementations must only return True for full, binary node values:
544 Implementations must only return True for full, binary node values:
545 hex nodes, revision numbers, and partial node matches must be
545 hex nodes, revision numbers, and partial node matches must be
546 rejected.
546 rejected.
547
547
548 The null node is never present.
548 The null node is never present.
549 """
549 """
550
550
551 def revs(start=0, stop=None):
551 def revs(start=0, stop=None):
552 """Iterate over revision numbers for this file, with control."""
552 """Iterate over revision numbers for this file, with control."""
553
553
554 def parents(node):
554 def parents(node):
555 """Returns a 2-tuple of parent nodes for a revision.
555 """Returns a 2-tuple of parent nodes for a revision.
556
556
557 Values will be ``nullid`` if the parent is empty.
557 Values will be ``nullid`` if the parent is empty.
558 """
558 """
559
559
560 def parentrevs(rev):
560 def parentrevs(rev):
561 """Like parents() but operates on revision numbers."""
561 """Like parents() but operates on revision numbers."""
562
562
563 def rev(node):
563 def rev(node):
564 """Obtain the revision number given a node.
564 """Obtain the revision number given a node.
565
565
566 Raises ``error.LookupError`` if the node is not known.
566 Raises ``error.LookupError`` if the node is not known.
567 """
567 """
568
568
569 def node(rev):
569 def node(rev):
570 """Obtain the node value given a revision number.
570 """Obtain the node value given a revision number.
571
571
572 Raises ``IndexError`` if the node is not known.
572 Raises ``IndexError`` if the node is not known.
573 """
573 """
574
574
575 def lookup(node):
575 def lookup(node):
576 """Attempt to resolve a value to a node.
576 """Attempt to resolve a value to a node.
577
577
578 Value can be a binary node, hex node, revision number, or a string
578 Value can be a binary node, hex node, revision number, or a string
579 that can be converted to an integer.
579 that can be converted to an integer.
580
580
581 Raises ``error.LookupError`` if a node could not be resolved.
581 Raises ``error.LookupError`` if a node could not be resolved.
582 """
582 """
583
583
584 def linkrev(rev):
584 def linkrev(rev):
585 """Obtain the changeset revision number a revision is linked to."""
585 """Obtain the changeset revision number a revision is linked to."""
586
586
587 def iscensored(rev):
587 def iscensored(rev):
588 """Return whether a revision's content has been censored."""
588 """Return whether a revision's content has been censored."""
589
589
590 def commonancestorsheads(node1, node2):
590 def commonancestorsheads(node1, node2):
591 """Obtain an iterable of nodes containing heads of common ancestors.
591 """Obtain an iterable of nodes containing heads of common ancestors.
592
592
593 See ``ancestor.commonancestorsheads()``.
593 See ``ancestor.commonancestorsheads()``.
594 """
594 """
595
595
596 def descendants(revs):
596 def descendants(revs):
597 """Obtain descendant revision numbers for a set of revision numbers.
597 """Obtain descendant revision numbers for a set of revision numbers.
598
598
599 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
599 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
600 """
600 """
601
601
602 def heads(start=None, stop=None):
602 def heads(start=None, stop=None):
603 """Obtain a list of nodes that are DAG heads, with control.
603 """Obtain a list of nodes that are DAG heads, with control.
604
604
605 The set of revisions examined can be limited by specifying
605 The set of revisions examined can be limited by specifying
606 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
606 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
607 iterable of nodes. DAG traversal starts at earlier revision
607 iterable of nodes. DAG traversal starts at earlier revision
608 ``start`` and iterates forward until any node in ``stop`` is
608 ``start`` and iterates forward until any node in ``stop`` is
609 encountered.
609 encountered.
610 """
610 """
611
611
612 def children(node):
612 def children(node):
613 """Obtain nodes that are children of a node.
613 """Obtain nodes that are children of a node.
614
614
615 Returns a list of nodes.
615 Returns a list of nodes.
616 """
616 """
617
617
618
618
619 class ifiledata(interfaceutil.Interface):
619 class ifiledata(interfaceutil.Interface):
620 """Storage interface for data storage of a specific file.
620 """Storage interface for data storage of a specific file.
621
621
622 This complements ``ifileindex`` and provides an interface for accessing
622 This complements ``ifileindex`` and provides an interface for accessing
623 data for a tracked file.
623 data for a tracked file.
624 """
624 """
625
625
626 def size(rev):
626 def size(rev):
627 """Obtain the fulltext size of file data.
627 """Obtain the fulltext size of file data.
628
628
629 Any metadata is excluded from size measurements.
629 Any metadata is excluded from size measurements.
630 """
630 """
631
631
632 def revision(node, raw=False):
632 def revision(node, raw=False):
633 """Obtain fulltext data for a node.
633 """Obtain fulltext data for a node.
634
634
635 By default, any storage transformations are applied before the data
635 By default, any storage transformations are applied before the data
636 is returned. If ``raw`` is True, non-raw storage transformations
636 is returned. If ``raw`` is True, non-raw storage transformations
637 are not applied.
637 are not applied.
638
638
639 The fulltext data may contain a header containing metadata. Most
639 The fulltext data may contain a header containing metadata. Most
640 consumers should use ``read()`` to obtain the actual file data.
640 consumers should use ``read()`` to obtain the actual file data.
641 """
641 """
642
642
643 def rawdata(node):
643 def rawdata(node):
644 """Obtain raw data for a node."""
644 """Obtain raw data for a node."""
645
645
646 def read(node):
646 def read(node):
647 """Resolve file fulltext data.
647 """Resolve file fulltext data.
648
648
649 This is similar to ``revision()`` except any metadata in the data
649 This is similar to ``revision()`` except any metadata in the data
650 headers is stripped.
650 headers is stripped.
651 """
651 """
652
652
653 def renamed(node):
653 def renamed(node):
654 """Obtain copy metadata for a node.
654 """Obtain copy metadata for a node.
655
655
656 Returns ``False`` if no copy metadata is stored or a 2-tuple of
656 Returns ``False`` if no copy metadata is stored or a 2-tuple of
657 (path, node) from which this revision was copied.
657 (path, node) from which this revision was copied.
658 """
658 """
659
659
660 def cmp(node, fulltext):
660 def cmp(node, fulltext):
661 """Compare fulltext to another revision.
661 """Compare fulltext to another revision.
662
662
663 Returns True if the fulltext is different from what is stored.
663 Returns True if the fulltext is different from what is stored.
664
664
665 This takes copy metadata into account.
665 This takes copy metadata into account.
666
666
667 TODO better document the copy metadata and censoring logic.
667 TODO better document the copy metadata and censoring logic.
668 """
668 """
669
669
670 def emitrevisions(
670 def emitrevisions(
671 nodes,
671 nodes,
672 nodesorder=None,
672 nodesorder=None,
673 revisiondata=False,
673 revisiondata=False,
674 assumehaveparentrevisions=False,
674 assumehaveparentrevisions=False,
675 deltamode=CG_DELTAMODE_STD,
675 deltamode=CG_DELTAMODE_STD,
676 ):
676 ):
677 """Produce ``irevisiondelta`` for revisions.
677 """Produce ``irevisiondelta`` for revisions.
678
678
679 Given an iterable of nodes, emits objects conforming to the
679 Given an iterable of nodes, emits objects conforming to the
680 ``irevisiondelta`` interface that describe revisions in storage.
680 ``irevisiondelta`` interface that describe revisions in storage.
681
681
682 This method is a generator.
682 This method is a generator.
683
683
684 The input nodes may be unordered. Implementations must ensure that a
684 The input nodes may be unordered. Implementations must ensure that a
685 node's parents are emitted before the node itself. Transitively, this
685 node's parents are emitted before the node itself. Transitively, this
686 means that a node may only be emitted once all its ancestors in
686 means that a node may only be emitted once all its ancestors in
687 ``nodes`` have also been emitted.
687 ``nodes`` have also been emitted.
688
688
689 By default, emits "index" data (the ``node``, ``p1node``, and
689 By default, emits "index" data (the ``node``, ``p1node``, and
690 ``p2node`` attributes). If ``revisiondata`` is set, revision data
690 ``p2node`` attributes). If ``revisiondata`` is set, revision data
691 will also be present on the emitted objects.
691 will also be present on the emitted objects.
692
692
693 With default argument values, implementations can choose to emit
693 With default argument values, implementations can choose to emit
694 either fulltext revision data or a delta. When emitting deltas,
694 either fulltext revision data or a delta. When emitting deltas,
695 implementations must consider whether the delta's base revision
695 implementations must consider whether the delta's base revision
696 fulltext is available to the receiver.
696 fulltext is available to the receiver.
697
697
698 The base revision fulltext is guaranteed to be available if any of
698 The base revision fulltext is guaranteed to be available if any of
699 the following are met:
699 the following are met:
700
700
701 * Its fulltext revision was emitted by this method call.
701 * Its fulltext revision was emitted by this method call.
702 * A delta for that revision was emitted by this method call.
702 * A delta for that revision was emitted by this method call.
703 * ``assumehaveparentrevisions`` is True and the base revision is a
703 * ``assumehaveparentrevisions`` is True and the base revision is a
704 parent of the node.
704 parent of the node.
705
705
706 ``nodesorder`` can be used to control the order that revisions are
706 ``nodesorder`` can be used to control the order that revisions are
707 emitted. By default, revisions can be reordered as long as they are
707 emitted. By default, revisions can be reordered as long as they are
708 in DAG topological order (see above). If the value is ``nodes``,
708 in DAG topological order (see above). If the value is ``nodes``,
709 the iteration order from ``nodes`` should be used. If the value is
709 the iteration order from ``nodes`` should be used. If the value is
710 ``storage``, then the native order from the backing storage layer
710 ``storage``, then the native order from the backing storage layer
711 is used. (Not all storage layers will have strong ordering and behavior
711 is used. (Not all storage layers will have strong ordering and behavior
712 of this mode is storage-dependent.) ``nodes`` ordering can force
712 of this mode is storage-dependent.) ``nodes`` ordering can force
713 revisions to be emitted before their ancestors, so consumers should
713 revisions to be emitted before their ancestors, so consumers should
714 use it with care.
714 use it with care.
715
715
716 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
716 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
717 be set and it is the caller's responsibility to resolve it, if needed.
717 be set and it is the caller's responsibility to resolve it, if needed.
718
718
719 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
719 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
720 all revision data should be emitted as deltas against the revision
720 all revision data should be emitted as deltas against the revision
721 emitted just prior. The initial revision should be a delta against its
721 emitted just prior. The initial revision should be a delta against its
722 1st parent.
722 1st parent.
723 """
723 """
724
724
725
725
726 class ifilemutation(interfaceutil.Interface):
726 class ifilemutation(interfaceutil.Interface):
727 """Storage interface for mutation events of a tracked file."""
727 """Storage interface for mutation events of a tracked file."""
728
728
729 def add(filedata, meta, transaction, linkrev, p1, p2):
729 def add(filedata, meta, transaction, linkrev, p1, p2):
730 """Add a new revision to the store.
730 """Add a new revision to the store.
731
731
732 Takes file data, dictionary of metadata, a transaction, linkrev,
732 Takes file data, dictionary of metadata, a transaction, linkrev,
733 and parent nodes.
733 and parent nodes.
734
734
735 Returns the node that was added.
735 Returns the node that was added.
736
736
737 May no-op if a revision matching the supplied data is already stored.
737 May no-op if a revision matching the supplied data is already stored.
738 """
738 """
739
739
740 def addrevision(
740 def addrevision(
741 revisiondata,
741 revisiondata,
742 transaction,
742 transaction,
743 linkrev,
743 linkrev,
744 p1,
744 p1,
745 p2,
745 p2,
746 node=None,
746 node=None,
747 flags=0,
747 flags=0,
748 cachedelta=None,
748 cachedelta=None,
749 ):
749 ):
750 """Add a new revision to the store and return its number.
750 """Add a new revision to the store and return its number.
751
751
752 This is similar to ``add()`` except it operates at a lower level.
752 This is similar to ``add()`` except it operates at a lower level.
753
753
754 The data passed in already contains a metadata header, if any.
754 The data passed in already contains a metadata header, if any.
755
755
756 ``node`` and ``flags`` can be used to define the expected node and
756 ``node`` and ``flags`` can be used to define the expected node and
757 the flags to use with storage. ``flags`` is a bitwise value composed
757 the flags to use with storage. ``flags`` is a bitwise value composed
758 of the various ``REVISION_FLAG_*`` constants.
758 of the various ``REVISION_FLAG_*`` constants.
759
759
760 ``add()`` is usually called when adding files from e.g. the working
760 ``add()`` is usually called when adding files from e.g. the working
761 directory. ``addrevision()`` is often called by ``add()`` and for
761 directory. ``addrevision()`` is often called by ``add()`` and for
762 scenarios where revision data has already been computed, such as when
762 scenarios where revision data has already been computed, such as when
763 applying raw data from a peer repo.
763 applying raw data from a peer repo.
764 """
764 """
765
765
766 def addgroup(
766 def addgroup(
767 deltas,
767 deltas,
768 linkmapper,
768 linkmapper,
769 transaction,
769 transaction,
770 addrevisioncb=None,
770 addrevisioncb=None,
771 duplicaterevisioncb=None,
771 duplicaterevisioncb=None,
772 maybemissingparents=False,
772 maybemissingparents=False,
773 ):
773 ):
774 """Process a series of deltas for storage.
774 """Process a series of deltas for storage.
775
775
776 ``deltas`` is an iterable of 7-tuples of
776 ``deltas`` is an iterable of 7-tuples of
777 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
777 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
778 to add.
778 to add.
779
779
780 The ``delta`` field contains ``mpatch`` data to apply to a base
780 The ``delta`` field contains ``mpatch`` data to apply to a base
781 revision, identified by ``deltabase``. The base node can be
781 revision, identified by ``deltabase``. The base node can be
782 ``nullid``, in which case the header from the delta can be ignored
782 ``nullid``, in which case the header from the delta can be ignored
783 and the delta used as the fulltext.
783 and the delta used as the fulltext.
784
784
785 ``alwayscache`` instructs the lower layers to cache the content of the
785 ``alwayscache`` instructs the lower layers to cache the content of the
786 newly added revision, even if it needs to be explicitly computed.
786 newly added revision, even if it needs to be explicitly computed.
787 This used to be the default when ``addrevisioncb`` was provided up to
787 This used to be the default when ``addrevisioncb`` was provided up to
788 Mercurial 5.8.
788 Mercurial 5.8.
789
789
790 ``addrevisioncb`` should be called for each new rev as it is committed.
790 ``addrevisioncb`` should be called for each new rev as it is committed.
791 ``duplicaterevisioncb`` should be called for all revs with a
791 ``duplicaterevisioncb`` should be called for all revs with a
792 pre-existing node.
792 pre-existing node.
793
793
794 ``maybemissingparents`` is a bool indicating whether the incoming
794 ``maybemissingparents`` is a bool indicating whether the incoming
795 data may reference parents/ancestor revisions that aren't present.
795 data may reference parents/ancestor revisions that aren't present.
796 This flag is set when receiving data into a "shallow" store that
796 This flag is set when receiving data into a "shallow" store that
797 doesn't hold all history.
797 doesn't hold all history.
798
798
799 Returns a list of nodes that were processed. A node will be in the list
799 Returns a list of nodes that were processed. A node will be in the list
800 even if it existed in the store previously.
800 even if it existed in the store previously.
801 """
801 """
802
802
803 def censorrevision(tr, node, tombstone=b''):
803 def censorrevision(tr, node, tombstone=b''):
804 """Remove the content of a single revision.
804 """Remove the content of a single revision.
805
805
806 The specified ``node`` will have its content purged from storage.
806 The specified ``node`` will have its content purged from storage.
807 Future attempts to access the revision data for this node will
807 Future attempts to access the revision data for this node will
808 result in failure.
808 result in failure.
809
809
810 A ``tombstone`` message can optionally be stored. This message may be
810 A ``tombstone`` message can optionally be stored. This message may be
811 displayed to users when they attempt to access the missing revision
811 displayed to users when they attempt to access the missing revision
812 data.
812 data.
813
813
814 Storage backends may have stored deltas against the previous content
814 Storage backends may have stored deltas against the previous content
815 in this revision. As part of censoring a revision, these storage
815 in this revision. As part of censoring a revision, these storage
816 backends are expected to rewrite any internally stored deltas such
816 backends are expected to rewrite any internally stored deltas such
817 that they no longer reference the deleted content.
817 that they no longer reference the deleted content.
818 """
818 """
819
819
820 def getstrippoint(minlink):
820 def getstrippoint(minlink):
821 """Find the minimum revision that must be stripped to strip a linkrev.
821 """Find the minimum revision that must be stripped to strip a linkrev.
822
822
823 Returns a 2-tuple containing the minimum revision number and a set
823 Returns a 2-tuple containing the minimum revision number and a set
824 of all revisions numbers that would be broken by this strip.
824 of all revisions numbers that would be broken by this strip.
825
825
826 TODO this is highly revlog centric and should be abstracted into
826 TODO this is highly revlog centric and should be abstracted into
827 a higher-level deletion API. ``repair.strip()`` relies on this.
827 a higher-level deletion API. ``repair.strip()`` relies on this.
828 """
828 """
829
829
830 def strip(minlink, transaction):
830 def strip(minlink, transaction):
831 """Remove storage of items starting at a linkrev.
831 """Remove storage of items starting at a linkrev.
832
832
833 This uses ``getstrippoint()`` to determine the first node to remove.
833 This uses ``getstrippoint()`` to determine the first node to remove.
834 Then it effectively truncates storage for all revisions after that.
834 Then it effectively truncates storage for all revisions after that.
835
835
836 TODO this is highly revlog centric and should be abstracted into a
836 TODO this is highly revlog centric and should be abstracted into a
837 higher-level deletion API.
837 higher-level deletion API.
838 """
838 """
839
839
840
840
841 class ifilestorage(ifileindex, ifiledata, ifilemutation):
841 class ifilestorage(ifileindex, ifiledata, ifilemutation):
842 """Complete storage interface for a single tracked file."""
842 """Complete storage interface for a single tracked file."""
843
843
844 def files():
844 def files():
845 """Obtain paths that are backing storage for this file.
845 """Obtain paths that are backing storage for this file.
846
846
847 TODO this is used heavily by verify code and there should probably
847 TODO this is used heavily by verify code and there should probably
848 be a better API for that.
848 be a better API for that.
849 """
849 """
850
850
851 def storageinfo(
851 def storageinfo(
852 exclusivefiles=False,
852 exclusivefiles=False,
853 sharedfiles=False,
853 sharedfiles=False,
854 revisionscount=False,
854 revisionscount=False,
855 trackedsize=False,
855 trackedsize=False,
856 storedsize=False,
856 storedsize=False,
857 ):
857 ):
858 """Obtain information about storage for this file's data.
858 """Obtain information about storage for this file's data.
859
859
860 Returns a dict describing storage for this tracked path. The keys
860 Returns a dict describing storage for this tracked path. The keys
861 in the dict map to arguments of the same. The arguments are bools
861 in the dict map to arguments of the same. The arguments are bools
862 indicating whether to calculate and obtain that data.
862 indicating whether to calculate and obtain that data.
863
863
864 exclusivefiles
864 exclusivefiles
865 Iterable of (vfs, path) describing files that are exclusively
865 Iterable of (vfs, path) describing files that are exclusively
866 used to back storage for this tracked path.
866 used to back storage for this tracked path.
867
867
868 sharedfiles
868 sharedfiles
869 Iterable of (vfs, path) describing files that are used to back
869 Iterable of (vfs, path) describing files that are used to back
870 storage for this tracked path. Those files may also provide storage
870 storage for this tracked path. Those files may also provide storage
871 for other stored entities.
871 for other stored entities.
872
872
873 revisionscount
873 revisionscount
874 Number of revisions available for retrieval.
874 Number of revisions available for retrieval.
875
875
876 trackedsize
876 trackedsize
877 Total size in bytes of all tracked revisions. This is a sum of the
877 Total size in bytes of all tracked revisions. This is a sum of the
878 length of the fulltext of all revisions.
878 length of the fulltext of all revisions.
879
879
880 storedsize
880 storedsize
881 Total size in bytes used to store data for all tracked revisions.
881 Total size in bytes used to store data for all tracked revisions.
882 This is commonly less than ``trackedsize`` due to internal usage
882 This is commonly less than ``trackedsize`` due to internal usage
883 of deltas rather than fulltext revisions.
883 of deltas rather than fulltext revisions.
884
884
885 Not all storage backends may support all queries are have a reasonable
885 Not all storage backends may support all queries are have a reasonable
886 value to use. In that case, the value should be set to ``None`` and
886 value to use. In that case, the value should be set to ``None`` and
887 callers are expected to handle this special value.
887 callers are expected to handle this special value.
888 """
888 """
889
889
890 def verifyintegrity(state):
890 def verifyintegrity(state):
891 """Verifies the integrity of file storage.
891 """Verifies the integrity of file storage.
892
892
893 ``state`` is a dict holding state of the verifier process. It can be
893 ``state`` is a dict holding state of the verifier process. It can be
894 used to communicate data between invocations of multiple storage
894 used to communicate data between invocations of multiple storage
895 primitives.
895 primitives.
896
896
897 If individual revisions cannot have their revision content resolved,
897 If individual revisions cannot have their revision content resolved,
898 the method is expected to set the ``skipread`` key to a set of nodes
898 the method is expected to set the ``skipread`` key to a set of nodes
899 that encountered problems. If set, the method can also add the node(s)
899 that encountered problems. If set, the method can also add the node(s)
900 to ``safe_renamed`` in order to indicate nodes that may perform the
900 to ``safe_renamed`` in order to indicate nodes that may perform the
901 rename checks with currently accessible data.
901 rename checks with currently accessible data.
902
902
903 The method yields objects conforming to the ``iverifyproblem``
903 The method yields objects conforming to the ``iverifyproblem``
904 interface.
904 interface.
905 """
905 """
906
906
907
907
908 class idirs(interfaceutil.Interface):
908 class idirs(interfaceutil.Interface):
909 """Interface representing a collection of directories from paths.
909 """Interface representing a collection of directories from paths.
910
910
911 This interface is essentially a derived data structure representing
911 This interface is essentially a derived data structure representing
912 directories from a collection of paths.
912 directories from a collection of paths.
913 """
913 """
914
914
915 def addpath(path):
915 def addpath(path):
916 """Add a path to the collection.
916 """Add a path to the collection.
917
917
918 All directories in the path will be added to the collection.
918 All directories in the path will be added to the collection.
919 """
919 """
920
920
921 def delpath(path):
921 def delpath(path):
922 """Remove a path from the collection.
922 """Remove a path from the collection.
923
923
924 If the removal was the last path in a particular directory, the
924 If the removal was the last path in a particular directory, the
925 directory is removed from the collection.
925 directory is removed from the collection.
926 """
926 """
927
927
928 def __iter__():
928 def __iter__():
929 """Iterate over the directories in this collection of paths."""
929 """Iterate over the directories in this collection of paths."""
930
930
931 def __contains__(path):
931 def __contains__(path):
932 """Whether a specific directory is in this collection."""
932 """Whether a specific directory is in this collection."""
933
933
934
934
935 class imanifestdict(interfaceutil.Interface):
935 class imanifestdict(interfaceutil.Interface):
936 """Interface representing a manifest data structure.
936 """Interface representing a manifest data structure.
937
937
938 A manifest is effectively a dict mapping paths to entries. Each entry
938 A manifest is effectively a dict mapping paths to entries. Each entry
939 consists of a binary node and extra flags affecting that entry.
939 consists of a binary node and extra flags affecting that entry.
940 """
940 """
941
941
942 def __getitem__(path):
942 def __getitem__(path):
943 """Returns the binary node value for a path in the manifest.
943 """Returns the binary node value for a path in the manifest.
944
944
945 Raises ``KeyError`` if the path does not exist in the manifest.
945 Raises ``KeyError`` if the path does not exist in the manifest.
946
946
947 Equivalent to ``self.find(path)[0]``.
947 Equivalent to ``self.find(path)[0]``.
948 """
948 """
949
949
950 def find(path):
950 def find(path):
951 """Returns the entry for a path in the manifest.
951 """Returns the entry for a path in the manifest.
952
952
953 Returns a 2-tuple of (node, flags).
953 Returns a 2-tuple of (node, flags).
954
954
955 Raises ``KeyError`` if the path does not exist in the manifest.
955 Raises ``KeyError`` if the path does not exist in the manifest.
956 """
956 """
957
957
958 def __len__():
958 def __len__():
959 """Return the number of entries in the manifest."""
959 """Return the number of entries in the manifest."""
960
960
961 def __nonzero__():
961 def __nonzero__():
962 """Returns True if the manifest has entries, False otherwise."""
962 """Returns True if the manifest has entries, False otherwise."""
963
963
964 __bool__ = __nonzero__
964 __bool__ = __nonzero__
965
965
966 def __setitem__(path, node):
966 def __setitem__(path, node):
967 """Define the node value for a path in the manifest.
967 """Define the node value for a path in the manifest.
968
968
969 If the path is already in the manifest, its flags will be copied to
969 If the path is already in the manifest, its flags will be copied to
970 the new entry.
970 the new entry.
971 """
971 """
972
972
973 def __contains__(path):
973 def __contains__(path):
974 """Whether a path exists in the manifest."""
974 """Whether a path exists in the manifest."""
975
975
976 def __delitem__(path):
976 def __delitem__(path):
977 """Remove a path from the manifest.
977 """Remove a path from the manifest.
978
978
979 Raises ``KeyError`` if the path is not in the manifest.
979 Raises ``KeyError`` if the path is not in the manifest.
980 """
980 """
981
981
982 def __iter__():
982 def __iter__():
983 """Iterate over paths in the manifest."""
983 """Iterate over paths in the manifest."""
984
984
985 def iterkeys():
985 def iterkeys():
986 """Iterate over paths in the manifest."""
986 """Iterate over paths in the manifest."""
987
987
988 def keys():
988 def keys():
989 """Obtain a list of paths in the manifest."""
989 """Obtain a list of paths in the manifest."""
990
990
991 def filesnotin(other, match=None):
991 def filesnotin(other, match=None):
992 """Obtain the set of paths in this manifest but not in another.
992 """Obtain the set of paths in this manifest but not in another.
993
993
994 ``match`` is an optional matcher function to be applied to both
994 ``match`` is an optional matcher function to be applied to both
995 manifests.
995 manifests.
996
996
997 Returns a set of paths.
997 Returns a set of paths.
998 """
998 """
999
999
1000 def dirs():
1000 def dirs():
1001 """Returns an object implementing the ``idirs`` interface."""
1001 """Returns an object implementing the ``idirs`` interface."""
1002
1002
1003 def hasdir(dir):
1003 def hasdir(dir):
1004 """Returns a bool indicating if a directory is in this manifest."""
1004 """Returns a bool indicating if a directory is in this manifest."""
1005
1005
1006 def walk(match):
1006 def walk(match):
1007 """Generator of paths in manifest satisfying a matcher.
1007 """Generator of paths in manifest satisfying a matcher.
1008
1008
1009 If the matcher has explicit files listed and they don't exist in
1009 If the matcher has explicit files listed and they don't exist in
1010 the manifest, ``match.bad()`` is called for each missing file.
1010 the manifest, ``match.bad()`` is called for each missing file.
1011 """
1011 """
1012
1012
1013 def diff(other, match=None, clean=False):
1013 def diff(other, match=None, clean=False):
1014 """Find differences between this manifest and another.
1014 """Find differences between this manifest and another.
1015
1015
1016 This manifest is compared to ``other``.
1016 This manifest is compared to ``other``.
1017
1017
1018 If ``match`` is provided, the two manifests are filtered against this
1018 If ``match`` is provided, the two manifests are filtered against this
1019 matcher and only entries satisfying the matcher are compared.
1019 matcher and only entries satisfying the matcher are compared.
1020
1020
1021 If ``clean`` is True, unchanged files are included in the returned
1021 If ``clean`` is True, unchanged files are included in the returned
1022 object.
1022 object.
1023
1023
1024 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
1024 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
1025 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
1025 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
1026 represents the node and flags for this manifest and ``(node2, flag2)``
1026 represents the node and flags for this manifest and ``(node2, flag2)``
1027 are the same for the other manifest.
1027 are the same for the other manifest.
1028 """
1028 """
1029
1029
1030 def setflag(path, flag):
1030 def setflag(path, flag):
1031 """Set the flag value for a given path.
1031 """Set the flag value for a given path.
1032
1032
1033 Raises ``KeyError`` if the path is not already in the manifest.
1033 Raises ``KeyError`` if the path is not already in the manifest.
1034 """
1034 """
1035
1035
1036 def get(path, default=None):
1036 def get(path, default=None):
1037 """Obtain the node value for a path or a default value if missing."""
1037 """Obtain the node value for a path or a default value if missing."""
1038
1038
1039 def flags(path):
1039 def flags(path):
1040 """Return the flags value for a path (default: empty bytestring)."""
1040 """Return the flags value for a path (default: empty bytestring)."""
1041
1041
1042 def copy():
1042 def copy():
1043 """Return a copy of this manifest."""
1043 """Return a copy of this manifest."""
1044
1044
1045 def items():
1045 def items():
1046 """Returns an iterable of (path, node) for items in this manifest."""
1046 """Returns an iterable of (path, node) for items in this manifest."""
1047
1047
1048 def iteritems():
1048 def iteritems():
1049 """Identical to items()."""
1049 """Identical to items()."""
1050
1050
1051 def iterentries():
1051 def iterentries():
1052 """Returns an iterable of (path, node, flags) for this manifest.
1052 """Returns an iterable of (path, node, flags) for this manifest.
1053
1053
1054 Similar to ``iteritems()`` except items are a 3-tuple and include
1054 Similar to ``iteritems()`` except items are a 3-tuple and include
1055 flags.
1055 flags.
1056 """
1056 """
1057
1057
1058 def text():
1058 def text():
1059 """Obtain the raw data representation for this manifest.
1059 """Obtain the raw data representation for this manifest.
1060
1060
1061 Result is used to create a manifest revision.
1061 Result is used to create a manifest revision.
1062 """
1062 """
1063
1063
1064 def fastdelta(base, changes):
1064 def fastdelta(base, changes):
1065 """Obtain a delta between this manifest and another given changes.
1065 """Obtain a delta between this manifest and another given changes.
1066
1066
1067 ``base`` in the raw data representation for another manifest.
1067 ``base`` in the raw data representation for another manifest.
1068
1068
1069 ``changes`` is an iterable of ``(path, to_delete)``.
1069 ``changes`` is an iterable of ``(path, to_delete)``.
1070
1070
1071 Returns a 2-tuple containing ``bytearray(self.text())`` and the
1071 Returns a 2-tuple containing ``bytearray(self.text())`` and the
1072 delta between ``base`` and this manifest.
1072 delta between ``base`` and this manifest.
1073
1073
1074 If this manifest implementation can't support ``fastdelta()``,
1074 If this manifest implementation can't support ``fastdelta()``,
1075 raise ``mercurial.manifest.FastdeltaUnavailable``.
1075 raise ``mercurial.manifest.FastdeltaUnavailable``.
1076 """
1076 """
1077
1077
1078
1078
1079 class imanifestrevisionbase(interfaceutil.Interface):
1079 class imanifestrevisionbase(interfaceutil.Interface):
1080 """Base interface representing a single revision of a manifest.
1080 """Base interface representing a single revision of a manifest.
1081
1081
1082 Should not be used as a primary interface: should always be inherited
1082 Should not be used as a primary interface: should always be inherited
1083 as part of a larger interface.
1083 as part of a larger interface.
1084 """
1084 """
1085
1085
1086 def copy():
1086 def copy():
1087 """Obtain a copy of this manifest instance.
1087 """Obtain a copy of this manifest instance.
1088
1088
1089 Returns an object conforming to the ``imanifestrevisionwritable``
1089 Returns an object conforming to the ``imanifestrevisionwritable``
1090 interface. The instance will be associated with the same
1090 interface. The instance will be associated with the same
1091 ``imanifestlog`` collection as this instance.
1091 ``imanifestlog`` collection as this instance.
1092 """
1092 """
1093
1093
1094 def read():
1094 def read():
1095 """Obtain the parsed manifest data structure.
1095 """Obtain the parsed manifest data structure.
1096
1096
1097 The returned object conforms to the ``imanifestdict`` interface.
1097 The returned object conforms to the ``imanifestdict`` interface.
1098 """
1098 """
1099
1099
1100
1100
1101 class imanifestrevisionstored(imanifestrevisionbase):
1101 class imanifestrevisionstored(imanifestrevisionbase):
1102 """Interface representing a manifest revision committed to storage."""
1102 """Interface representing a manifest revision committed to storage."""
1103
1103
1104 def node():
1104 def node():
1105 """The binary node for this manifest."""
1105 """The binary node for this manifest."""
1106
1106
1107 parents = interfaceutil.Attribute(
1107 parents = interfaceutil.Attribute(
1108 """List of binary nodes that are parents for this manifest revision."""
1108 """List of binary nodes that are parents for this manifest revision."""
1109 )
1109 )
1110
1110
1111 def readdelta(shallow=False):
1111 def readdelta(shallow=False):
1112 """Obtain the manifest data structure representing changes from parent.
1112 """Obtain the manifest data structure representing changes from parent.
1113
1113
1114 This manifest is compared to its 1st parent. A new manifest representing
1114 This manifest is compared to its 1st parent. A new manifest representing
1115 those differences is constructed.
1115 those differences is constructed.
1116
1116
1117 The returned object conforms to the ``imanifestdict`` interface.
1117 The returned object conforms to the ``imanifestdict`` interface.
1118 """
1118 """
1119
1119
1120 def readfast(shallow=False):
1120 def readfast(shallow=False):
1121 """Calls either ``read()`` or ``readdelta()``.
1121 """Calls either ``read()`` or ``readdelta()``.
1122
1122
1123 The faster of the two options is called.
1123 The faster of the two options is called.
1124 """
1124 """
1125
1125
1126 def find(key):
1126 def find(key):
1127 """Calls self.read().find(key)``.
1127 """Calls self.read().find(key)``.
1128
1128
1129 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1129 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1130 """
1130 """
1131
1131
1132
1132
1133 class imanifestrevisionwritable(imanifestrevisionbase):
1133 class imanifestrevisionwritable(imanifestrevisionbase):
1134 """Interface representing a manifest revision that can be committed."""
1134 """Interface representing a manifest revision that can be committed."""
1135
1135
1136 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1136 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1137 """Add this revision to storage.
1137 """Add this revision to storage.
1138
1138
1139 Takes a transaction object, the changeset revision number it will
1139 Takes a transaction object, the changeset revision number it will
1140 be associated with, its parent nodes, and lists of added and
1140 be associated with, its parent nodes, and lists of added and
1141 removed paths.
1141 removed paths.
1142
1142
1143 If match is provided, storage can choose not to inspect or write out
1143 If match is provided, storage can choose not to inspect or write out
1144 items that do not match. Storage is still required to be able to provide
1144 items that do not match. Storage is still required to be able to provide
1145 the full manifest in the future for any directories written (these
1145 the full manifest in the future for any directories written (these
1146 manifests should not be "narrowed on disk").
1146 manifests should not be "narrowed on disk").
1147
1147
1148 Returns the binary node of the created revision.
1148 Returns the binary node of the created revision.
1149 """
1149 """
1150
1150
1151
1151
1152 class imanifeststorage(interfaceutil.Interface):
1152 class imanifeststorage(interfaceutil.Interface):
1153 """Storage interface for manifest data."""
1153 """Storage interface for manifest data."""
1154
1154
1155 nodeconstants = interfaceutil.Attribute(
1155 nodeconstants = interfaceutil.Attribute(
1156 """nodeconstants used by the current repository."""
1156 """nodeconstants used by the current repository."""
1157 )
1157 )
1158
1158
1159 tree = interfaceutil.Attribute(
1159 tree = interfaceutil.Attribute(
1160 """The path to the directory this manifest tracks.
1160 """The path to the directory this manifest tracks.
1161
1161
1162 The empty bytestring represents the root manifest.
1162 The empty bytestring represents the root manifest.
1163 """
1163 """
1164 )
1164 )
1165
1165
1166 index = interfaceutil.Attribute(
1166 index = interfaceutil.Attribute(
1167 """An ``ifilerevisionssequence`` instance."""
1167 """An ``ifilerevisionssequence`` instance."""
1168 )
1168 )
1169
1169
1170 indexfile = interfaceutil.Attribute(
1170 indexfile = interfaceutil.Attribute(
1171 """Path of revlog index file.
1171 """Path of revlog index file.
1172
1172
1173 TODO this is revlog specific and should not be exposed.
1173 TODO this is revlog specific and should not be exposed.
1174 """
1174 """
1175 )
1175 )
1176
1176
1177 opener = interfaceutil.Attribute(
1177 opener = interfaceutil.Attribute(
1178 """VFS opener to use to access underlying files used for storage.
1178 """VFS opener to use to access underlying files used for storage.
1179
1179
1180 TODO this is revlog specific and should not be exposed.
1180 TODO this is revlog specific and should not be exposed.
1181 """
1181 """
1182 )
1182 )
1183
1183
1184 version = interfaceutil.Attribute(
1184 version = interfaceutil.Attribute(
1185 """Revlog version number.
1185 """Revlog version number.
1186
1186
1187 TODO this is revlog specific and should not be exposed.
1187 TODO this is revlog specific and should not be exposed.
1188 """
1188 """
1189 )
1189 )
1190
1190
1191 _generaldelta = interfaceutil.Attribute(
1191 _generaldelta = interfaceutil.Attribute(
1192 """Whether generaldelta storage is being used.
1192 """Whether generaldelta storage is being used.
1193
1193
1194 TODO this is revlog specific and should not be exposed.
1194 TODO this is revlog specific and should not be exposed.
1195 """
1195 """
1196 )
1196 )
1197
1197
1198 fulltextcache = interfaceutil.Attribute(
1198 fulltextcache = interfaceutil.Attribute(
1199 """Dict with cache of fulltexts.
1199 """Dict with cache of fulltexts.
1200
1200
1201 TODO this doesn't feel appropriate for the storage interface.
1201 TODO this doesn't feel appropriate for the storage interface.
1202 """
1202 """
1203 )
1203 )
1204
1204
1205 def __len__():
1205 def __len__():
1206 """Obtain the number of revisions stored for this manifest."""
1206 """Obtain the number of revisions stored for this manifest."""
1207
1207
1208 def __iter__():
1208 def __iter__():
1209 """Iterate over revision numbers for this manifest."""
1209 """Iterate over revision numbers for this manifest."""
1210
1210
1211 def rev(node):
1211 def rev(node):
1212 """Obtain the revision number given a binary node.
1212 """Obtain the revision number given a binary node.
1213
1213
1214 Raises ``error.LookupError`` if the node is not known.
1214 Raises ``error.LookupError`` if the node is not known.
1215 """
1215 """
1216
1216
1217 def node(rev):
1217 def node(rev):
1218 """Obtain the node value given a revision number.
1218 """Obtain the node value given a revision number.
1219
1219
1220 Raises ``error.LookupError`` if the revision is not known.
1220 Raises ``error.LookupError`` if the revision is not known.
1221 """
1221 """
1222
1222
1223 def lookup(value):
1223 def lookup(value):
1224 """Attempt to resolve a value to a node.
1224 """Attempt to resolve a value to a node.
1225
1225
1226 Value can be a binary node, hex node, revision number, or a bytes
1226 Value can be a binary node, hex node, revision number, or a bytes
1227 that can be converted to an integer.
1227 that can be converted to an integer.
1228
1228
1229 Raises ``error.LookupError`` if a ndoe could not be resolved.
1229 Raises ``error.LookupError`` if a ndoe could not be resolved.
1230 """
1230 """
1231
1231
1232 def parents(node):
1232 def parents(node):
1233 """Returns a 2-tuple of parent nodes for a node.
1233 """Returns a 2-tuple of parent nodes for a node.
1234
1234
1235 Values will be ``nullid`` if the parent is empty.
1235 Values will be ``nullid`` if the parent is empty.
1236 """
1236 """
1237
1237
1238 def parentrevs(rev):
1238 def parentrevs(rev):
1239 """Like parents() but operates on revision numbers."""
1239 """Like parents() but operates on revision numbers."""
1240
1240
1241 def linkrev(rev):
1241 def linkrev(rev):
1242 """Obtain the changeset revision number a revision is linked to."""
1242 """Obtain the changeset revision number a revision is linked to."""
1243
1243
1244 def revision(node, _df=None, raw=False):
1244 def revision(node, _df=None, raw=False):
1245 """Obtain fulltext data for a node."""
1245 """Obtain fulltext data for a node."""
1246
1246
1247 def rawdata(node, _df=None):
1247 def rawdata(node, _df=None):
1248 """Obtain raw data for a node."""
1248 """Obtain raw data for a node."""
1249
1249
1250 def revdiff(rev1, rev2):
1250 def revdiff(rev1, rev2):
1251 """Obtain a delta between two revision numbers.
1251 """Obtain a delta between two revision numbers.
1252
1252
1253 The returned data is the result of ``bdiff.bdiff()`` on the raw
1253 The returned data is the result of ``bdiff.bdiff()`` on the raw
1254 revision data.
1254 revision data.
1255 """
1255 """
1256
1256
1257 def cmp(node, fulltext):
1257 def cmp(node, fulltext):
1258 """Compare fulltext to another revision.
1258 """Compare fulltext to another revision.
1259
1259
1260 Returns True if the fulltext is different from what is stored.
1260 Returns True if the fulltext is different from what is stored.
1261 """
1261 """
1262
1262
1263 def emitrevisions(
1263 def emitrevisions(
1264 nodes,
1264 nodes,
1265 nodesorder=None,
1265 nodesorder=None,
1266 revisiondata=False,
1266 revisiondata=False,
1267 assumehaveparentrevisions=False,
1267 assumehaveparentrevisions=False,
1268 ):
1268 ):
1269 """Produce ``irevisiondelta`` describing revisions.
1269 """Produce ``irevisiondelta`` describing revisions.
1270
1270
1271 See the documentation for ``ifiledata`` for more.
1271 See the documentation for ``ifiledata`` for more.
1272 """
1272 """
1273
1273
1274 def addgroup(
1274 def addgroup(
1275 deltas,
1275 deltas,
1276 linkmapper,
1276 linkmapper,
1277 transaction,
1277 transaction,
1278 addrevisioncb=None,
1278 addrevisioncb=None,
1279 duplicaterevisioncb=None,
1279 duplicaterevisioncb=None,
1280 ):
1280 ):
1281 """Process a series of deltas for storage.
1281 """Process a series of deltas for storage.
1282
1282
1283 See the documentation in ``ifilemutation`` for more.
1283 See the documentation in ``ifilemutation`` for more.
1284 """
1284 """
1285
1285
1286 def rawsize(rev):
1286 def rawsize(rev):
1287 """Obtain the size of tracked data.
1287 """Obtain the size of tracked data.
1288
1288
1289 Is equivalent to ``len(m.rawdata(node))``.
1289 Is equivalent to ``len(m.rawdata(node))``.
1290
1290
1291 TODO this method is only used by upgrade code and may be removed.
1291 TODO this method is only used by upgrade code and may be removed.
1292 """
1292 """
1293
1293
1294 def getstrippoint(minlink):
1294 def getstrippoint(minlink):
1295 """Find minimum revision that must be stripped to strip a linkrev.
1295 """Find minimum revision that must be stripped to strip a linkrev.
1296
1296
1297 See the documentation in ``ifilemutation`` for more.
1297 See the documentation in ``ifilemutation`` for more.
1298 """
1298 """
1299
1299
1300 def strip(minlink, transaction):
1300 def strip(minlink, transaction):
1301 """Remove storage of items starting at a linkrev.
1301 """Remove storage of items starting at a linkrev.
1302
1302
1303 See the documentation in ``ifilemutation`` for more.
1303 See the documentation in ``ifilemutation`` for more.
1304 """
1304 """
1305
1305
1306 def checksize():
1306 def checksize():
1307 """Obtain the expected sizes of backing files.
1307 """Obtain the expected sizes of backing files.
1308
1308
1309 TODO this is used by verify and it should not be part of the interface.
1309 TODO this is used by verify and it should not be part of the interface.
1310 """
1310 """
1311
1311
1312 def files():
1312 def files():
1313 """Obtain paths that are backing storage for this manifest.
1313 """Obtain paths that are backing storage for this manifest.
1314
1314
1315 TODO this is used by verify and there should probably be a better API
1315 TODO this is used by verify and there should probably be a better API
1316 for this functionality.
1316 for this functionality.
1317 """
1317 """
1318
1318
1319 def deltaparent(rev):
1319 def deltaparent(rev):
1320 """Obtain the revision that a revision is delta'd against.
1320 """Obtain the revision that a revision is delta'd against.
1321
1321
1322 TODO delta encoding is an implementation detail of storage and should
1322 TODO delta encoding is an implementation detail of storage and should
1323 not be exposed to the storage interface.
1323 not be exposed to the storage interface.
1324 """
1324 """
1325
1325
1326 def clone(tr, dest, **kwargs):
1326 def clone(tr, dest, **kwargs):
1327 """Clone this instance to another."""
1327 """Clone this instance to another."""
1328
1328
1329 def clearcaches(clear_persisted_data=False):
1329 def clearcaches(clear_persisted_data=False):
1330 """Clear any caches associated with this instance."""
1330 """Clear any caches associated with this instance."""
1331
1331
1332 def dirlog(d):
1332 def dirlog(d):
1333 """Obtain a manifest storage instance for a tree."""
1333 """Obtain a manifest storage instance for a tree."""
1334
1334
1335 def add(
1335 def add(
1336 m, transaction, link, p1, p2, added, removed, readtree=None, match=None
1336 m, transaction, link, p1, p2, added, removed, readtree=None, match=None
1337 ):
1337 ):
1338 """Add a revision to storage.
1338 """Add a revision to storage.
1339
1339
1340 ``m`` is an object conforming to ``imanifestdict``.
1340 ``m`` is an object conforming to ``imanifestdict``.
1341
1341
1342 ``link`` is the linkrev revision number.
1342 ``link`` is the linkrev revision number.
1343
1343
1344 ``p1`` and ``p2`` are the parent revision numbers.
1344 ``p1`` and ``p2`` are the parent revision numbers.
1345
1345
1346 ``added`` and ``removed`` are iterables of added and removed paths,
1346 ``added`` and ``removed`` are iterables of added and removed paths,
1347 respectively.
1347 respectively.
1348
1348
1349 ``readtree`` is a function that can be used to read the child tree(s)
1349 ``readtree`` is a function that can be used to read the child tree(s)
1350 when recursively writing the full tree structure when using
1350 when recursively writing the full tree structure when using
1351 treemanifets.
1351 treemanifets.
1352
1352
1353 ``match`` is a matcher that can be used to hint to storage that not all
1353 ``match`` is a matcher that can be used to hint to storage that not all
1354 paths must be inspected; this is an optimization and can be safely
1354 paths must be inspected; this is an optimization and can be safely
1355 ignored. Note that the storage must still be able to reproduce a full
1355 ignored. Note that the storage must still be able to reproduce a full
1356 manifest including files that did not match.
1356 manifest including files that did not match.
1357 """
1357 """
1358
1358
1359 def storageinfo(
1359 def storageinfo(
1360 exclusivefiles=False,
1360 exclusivefiles=False,
1361 sharedfiles=False,
1361 sharedfiles=False,
1362 revisionscount=False,
1362 revisionscount=False,
1363 trackedsize=False,
1363 trackedsize=False,
1364 storedsize=False,
1364 storedsize=False,
1365 ):
1365 ):
1366 """Obtain information about storage for this manifest's data.
1366 """Obtain information about storage for this manifest's data.
1367
1367
1368 See ``ifilestorage.storageinfo()`` for a description of this method.
1368 See ``ifilestorage.storageinfo()`` for a description of this method.
1369 This one behaves the same way, except for manifest data.
1369 This one behaves the same way, except for manifest data.
1370 """
1370 """
1371
1371
1372
1372
1373 class imanifestlog(interfaceutil.Interface):
1373 class imanifestlog(interfaceutil.Interface):
1374 """Interface representing a collection of manifest snapshots.
1374 """Interface representing a collection of manifest snapshots.
1375
1375
1376 Represents the root manifest in a repository.
1376 Represents the root manifest in a repository.
1377
1377
1378 Also serves as a means to access nested tree manifests and to cache
1378 Also serves as a means to access nested tree manifests and to cache
1379 tree manifests.
1379 tree manifests.
1380 """
1380 """
1381
1381
1382 nodeconstants = interfaceutil.Attribute(
1382 nodeconstants = interfaceutil.Attribute(
1383 """nodeconstants used by the current repository."""
1383 """nodeconstants used by the current repository."""
1384 )
1384 )
1385
1385
1386 def __getitem__(node):
1386 def __getitem__(node):
1387 """Obtain a manifest instance for a given binary node.
1387 """Obtain a manifest instance for a given binary node.
1388
1388
1389 Equivalent to calling ``self.get('', node)``.
1389 Equivalent to calling ``self.get('', node)``.
1390
1390
1391 The returned object conforms to the ``imanifestrevisionstored``
1391 The returned object conforms to the ``imanifestrevisionstored``
1392 interface.
1392 interface.
1393 """
1393 """
1394
1394
1395 def get(tree, node, verify=True):
1395 def get(tree, node, verify=True):
1396 """Retrieve the manifest instance for a given directory and binary node.
1396 """Retrieve the manifest instance for a given directory and binary node.
1397
1397
1398 ``node`` always refers to the node of the root manifest (which will be
1398 ``node`` always refers to the node of the root manifest (which will be
1399 the only manifest if flat manifests are being used).
1399 the only manifest if flat manifests are being used).
1400
1400
1401 If ``tree`` is the empty string, the root manifest is returned.
1401 If ``tree`` is the empty string, the root manifest is returned.
1402 Otherwise the manifest for the specified directory will be returned
1402 Otherwise the manifest for the specified directory will be returned
1403 (requires tree manifests).
1403 (requires tree manifests).
1404
1404
1405 If ``verify`` is True, ``LookupError`` is raised if the node is not
1405 If ``verify`` is True, ``LookupError`` is raised if the node is not
1406 known.
1406 known.
1407
1407
1408 The returned object conforms to the ``imanifestrevisionstored``
1408 The returned object conforms to the ``imanifestrevisionstored``
1409 interface.
1409 interface.
1410 """
1410 """
1411
1411
1412 def getstorage(tree):
1412 def getstorage(tree):
1413 """Retrieve an interface to storage for a particular tree.
1413 """Retrieve an interface to storage for a particular tree.
1414
1414
1415 If ``tree`` is the empty bytestring, storage for the root manifest will
1415 If ``tree`` is the empty bytestring, storage for the root manifest will
1416 be returned. Otherwise storage for a tree manifest is returned.
1416 be returned. Otherwise storage for a tree manifest is returned.
1417
1417
1418 TODO formalize interface for returned object.
1418 TODO formalize interface for returned object.
1419 """
1419 """
1420
1420
1421 def clearcaches():
1421 def clearcaches():
1422 """Clear caches associated with this collection."""
1422 """Clear caches associated with this collection."""
1423
1423
1424 def rev(node):
1424 def rev(node):
1425 """Obtain the revision number for a binary node.
1425 """Obtain the revision number for a binary node.
1426
1426
1427 Raises ``error.LookupError`` if the node is not known.
1427 Raises ``error.LookupError`` if the node is not known.
1428 """
1428 """
1429
1429
1430 def update_caches(transaction):
1430 def update_caches(transaction):
1431 """update whatever cache are relevant for the used storage."""
1431 """update whatever cache are relevant for the used storage."""
1432
1432
1433
1433
1434 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1434 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1435 """Local repository sub-interface providing access to tracked file storage.
1435 """Local repository sub-interface providing access to tracked file storage.
1436
1436
1437 This interface defines how a repository accesses storage for a single
1437 This interface defines how a repository accesses storage for a single
1438 tracked file path.
1438 tracked file path.
1439 """
1439 """
1440
1440
1441 def file(f):
1441 def file(f):
1442 """Obtain a filelog for a tracked path.
1442 """Obtain a filelog for a tracked path.
1443
1443
1444 The returned type conforms to the ``ifilestorage`` interface.
1444 The returned type conforms to the ``ifilestorage`` interface.
1445 """
1445 """
1446
1446
1447
1447
1448 class ilocalrepositorymain(interfaceutil.Interface):
1448 class ilocalrepositorymain(interfaceutil.Interface):
1449 """Main interface for local repositories.
1449 """Main interface for local repositories.
1450
1450
1451 This currently captures the reality of things - not how things should be.
1451 This currently captures the reality of things - not how things should be.
1452 """
1452 """
1453
1453
1454 nodeconstants = interfaceutil.Attribute(
1454 nodeconstants = interfaceutil.Attribute(
1455 """Constant nodes matching the hash function used by the repository."""
1455 """Constant nodes matching the hash function used by the repository."""
1456 )
1456 )
1457 nullid = interfaceutil.Attribute(
1457 nullid = interfaceutil.Attribute(
1458 """null revision for the hash function used by the repository."""
1458 """null revision for the hash function used by the repository."""
1459 )
1459 )
1460
1460
1461 supportedformats = interfaceutil.Attribute(
1461 supportedformats = interfaceutil.Attribute(
1462 """Set of requirements that apply to stream clone.
1462 """Set of requirements that apply to stream clone.
1463
1463
1464 This is actually a class attribute and is shared among all instances.
1464 This is actually a class attribute and is shared among all instances.
1465 """
1465 """
1466 )
1466 )
1467
1467
1468 supported = interfaceutil.Attribute(
1468 supported = interfaceutil.Attribute(
1469 """Set of requirements that this repo is capable of opening."""
1469 """Set of requirements that this repo is capable of opening."""
1470 )
1470 )
1471
1471
1472 requirements = interfaceutil.Attribute(
1472 requirements = interfaceutil.Attribute(
1473 """Set of requirements this repo uses."""
1473 """Set of requirements this repo uses."""
1474 )
1474 )
1475
1475
1476 features = interfaceutil.Attribute(
1476 features = interfaceutil.Attribute(
1477 """Set of "features" this repository supports.
1477 """Set of "features" this repository supports.
1478
1478
1479 A "feature" is a loosely-defined term. It can refer to a feature
1479 A "feature" is a loosely-defined term. It can refer to a feature
1480 in the classical sense or can describe an implementation detail
1480 in the classical sense or can describe an implementation detail
1481 of the repository. For example, a ``readonly`` feature may denote
1481 of the repository. For example, a ``readonly`` feature may denote
1482 the repository as read-only. Or a ``revlogfilestore`` feature may
1482 the repository as read-only. Or a ``revlogfilestore`` feature may
1483 denote that the repository is using revlogs for file storage.
1483 denote that the repository is using revlogs for file storage.
1484
1484
1485 The intent of features is to provide a machine-queryable mechanism
1485 The intent of features is to provide a machine-queryable mechanism
1486 for repo consumers to test for various repository characteristics.
1486 for repo consumers to test for various repository characteristics.
1487
1487
1488 Features are similar to ``requirements``. The main difference is that
1488 Features are similar to ``requirements``. The main difference is that
1489 requirements are stored on-disk and represent requirements to open the
1489 requirements are stored on-disk and represent requirements to open the
1490 repository. Features are more run-time capabilities of the repository
1490 repository. Features are more run-time capabilities of the repository
1491 and more granular capabilities (which may be derived from requirements).
1491 and more granular capabilities (which may be derived from requirements).
1492 """
1492 """
1493 )
1493 )
1494
1494
1495 filtername = interfaceutil.Attribute(
1495 filtername = interfaceutil.Attribute(
1496 """Name of the repoview that is active on this repo."""
1496 """Name of the repoview that is active on this repo."""
1497 )
1497 )
1498
1498
1499 wvfs = interfaceutil.Attribute(
1499 wvfs = interfaceutil.Attribute(
1500 """VFS used to access the working directory."""
1500 """VFS used to access the working directory."""
1501 )
1501 )
1502
1502
1503 vfs = interfaceutil.Attribute(
1503 vfs = interfaceutil.Attribute(
1504 """VFS rooted at the .hg directory.
1504 """VFS rooted at the .hg directory.
1505
1505
1506 Used to access repository data not in the store.
1506 Used to access repository data not in the store.
1507 """
1507 """
1508 )
1508 )
1509
1509
1510 svfs = interfaceutil.Attribute(
1510 svfs = interfaceutil.Attribute(
1511 """VFS rooted at the store.
1511 """VFS rooted at the store.
1512
1512
1513 Used to access repository data in the store. Typically .hg/store.
1513 Used to access repository data in the store. Typically .hg/store.
1514 But can point elsewhere if the store is shared.
1514 But can point elsewhere if the store is shared.
1515 """
1515 """
1516 )
1516 )
1517
1517
1518 root = interfaceutil.Attribute(
1518 root = interfaceutil.Attribute(
1519 """Path to the root of the working directory."""
1519 """Path to the root of the working directory."""
1520 )
1520 )
1521
1521
1522 path = interfaceutil.Attribute("""Path to the .hg directory.""")
1522 path = interfaceutil.Attribute("""Path to the .hg directory.""")
1523
1523
1524 origroot = interfaceutil.Attribute(
1524 origroot = interfaceutil.Attribute(
1525 """The filesystem path that was used to construct the repo."""
1525 """The filesystem path that was used to construct the repo."""
1526 )
1526 )
1527
1527
1528 auditor = interfaceutil.Attribute(
1528 auditor = interfaceutil.Attribute(
1529 """A pathauditor for the working directory.
1529 """A pathauditor for the working directory.
1530
1530
1531 This checks if a path refers to a nested repository.
1531 This checks if a path refers to a nested repository.
1532
1532
1533 Operates on the filesystem.
1533 Operates on the filesystem.
1534 """
1534 """
1535 )
1535 )
1536
1536
1537 nofsauditor = interfaceutil.Attribute(
1537 nofsauditor = interfaceutil.Attribute(
1538 """A pathauditor for the working directory.
1538 """A pathauditor for the working directory.
1539
1539
1540 This is like ``auditor`` except it doesn't do filesystem checks.
1540 This is like ``auditor`` except it doesn't do filesystem checks.
1541 """
1541 """
1542 )
1542 )
1543
1543
1544 baseui = interfaceutil.Attribute(
1544 baseui = interfaceutil.Attribute(
1545 """Original ui instance passed into constructor."""
1545 """Original ui instance passed into constructor."""
1546 )
1546 )
1547
1547
1548 ui = interfaceutil.Attribute("""Main ui instance for this instance.""")
1548 ui = interfaceutil.Attribute("""Main ui instance for this instance.""")
1549
1549
1550 sharedpath = interfaceutil.Attribute(
1550 sharedpath = interfaceutil.Attribute(
1551 """Path to the .hg directory of the repo this repo was shared from."""
1551 """Path to the .hg directory of the repo this repo was shared from."""
1552 )
1552 )
1553
1553
1554 store = interfaceutil.Attribute("""A store instance.""")
1554 store = interfaceutil.Attribute("""A store instance.""")
1555
1555
1556 spath = interfaceutil.Attribute("""Path to the store.""")
1556 spath = interfaceutil.Attribute("""Path to the store.""")
1557
1557
1558 sjoin = interfaceutil.Attribute("""Alias to self.store.join.""")
1558 sjoin = interfaceutil.Attribute("""Alias to self.store.join.""")
1559
1559
1560 cachevfs = interfaceutil.Attribute(
1560 cachevfs = interfaceutil.Attribute(
1561 """A VFS used to access the cache directory.
1561 """A VFS used to access the cache directory.
1562
1562
1563 Typically .hg/cache.
1563 Typically .hg/cache.
1564 """
1564 """
1565 )
1565 )
1566
1566
1567 wcachevfs = interfaceutil.Attribute(
1567 wcachevfs = interfaceutil.Attribute(
1568 """A VFS used to access the cache directory dedicated to working copy
1568 """A VFS used to access the cache directory dedicated to working copy
1569
1569
1570 Typically .hg/wcache.
1570 Typically .hg/wcache.
1571 """
1571 """
1572 )
1572 )
1573
1573
1574 filteredrevcache = interfaceutil.Attribute(
1574 filteredrevcache = interfaceutil.Attribute(
1575 """Holds sets of revisions to be filtered."""
1575 """Holds sets of revisions to be filtered."""
1576 )
1576 )
1577
1577
1578 names = interfaceutil.Attribute("""A ``namespaces`` instance.""")
1578 names = interfaceutil.Attribute("""A ``namespaces`` instance.""")
1579
1579
1580 filecopiesmode = interfaceutil.Attribute(
1580 filecopiesmode = interfaceutil.Attribute(
1581 """The way files copies should be dealt with in this repo."""
1581 """The way files copies should be dealt with in this repo."""
1582 )
1582 )
1583
1583
1584 def close():
1584 def close():
1585 """Close the handle on this repository."""
1585 """Close the handle on this repository."""
1586
1586
1587 def peer():
1587 def peer():
1588 """Obtain an object conforming to the ``peer`` interface."""
1588 """Obtain an object conforming to the ``peer`` interface."""
1589
1589
1590 def unfiltered():
1590 def unfiltered():
1591 """Obtain an unfiltered/raw view of this repo."""
1591 """Obtain an unfiltered/raw view of this repo."""
1592
1592
1593 def filtered(name, visibilityexceptions=None):
1593 def filtered(name, visibilityexceptions=None):
1594 """Obtain a named view of this repository."""
1594 """Obtain a named view of this repository."""
1595
1595
1596 obsstore = interfaceutil.Attribute("""A store of obsolescence data.""")
1596 obsstore = interfaceutil.Attribute("""A store of obsolescence data.""")
1597
1597
1598 changelog = interfaceutil.Attribute("""A handle on the changelog revlog.""")
1598 changelog = interfaceutil.Attribute("""A handle on the changelog revlog.""")
1599
1599
1600 manifestlog = interfaceutil.Attribute(
1600 manifestlog = interfaceutil.Attribute(
1601 """An instance conforming to the ``imanifestlog`` interface.
1601 """An instance conforming to the ``imanifestlog`` interface.
1602
1602
1603 Provides access to manifests for the repository.
1603 Provides access to manifests for the repository.
1604 """
1604 """
1605 )
1605 )
1606
1606
1607 dirstate = interfaceutil.Attribute("""Working directory state.""")
1607 dirstate = interfaceutil.Attribute("""Working directory state.""")
1608
1608
1609 narrowpats = interfaceutil.Attribute(
1609 narrowpats = interfaceutil.Attribute(
1610 """Matcher patterns for this repository's narrowspec."""
1610 """Matcher patterns for this repository's narrowspec."""
1611 )
1611 )
1612
1612
1613 def narrowmatch(match=None, includeexact=False):
1613 def narrowmatch(match=None, includeexact=False):
1614 """Obtain a matcher for the narrowspec."""
1614 """Obtain a matcher for the narrowspec."""
1615
1615
1616 def setnarrowpats(newincludes, newexcludes):
1616 def setnarrowpats(newincludes, newexcludes):
1617 """Define the narrowspec for this repository."""
1617 """Define the narrowspec for this repository."""
1618
1618
1619 def __getitem__(changeid):
1619 def __getitem__(changeid):
1620 """Try to resolve a changectx."""
1620 """Try to resolve a changectx."""
1621
1621
1622 def __contains__(changeid):
1622 def __contains__(changeid):
1623 """Whether a changeset exists."""
1623 """Whether a changeset exists."""
1624
1624
1625 def __nonzero__():
1625 def __nonzero__():
1626 """Always returns True."""
1626 """Always returns True."""
1627 return True
1627 return True
1628
1628
1629 __bool__ = __nonzero__
1629 __bool__ = __nonzero__
1630
1630
1631 def __len__():
1631 def __len__():
1632 """Returns the number of changesets in the repo."""
1632 """Returns the number of changesets in the repo."""
1633
1633
1634 def __iter__():
1634 def __iter__():
1635 """Iterate over revisions in the changelog."""
1635 """Iterate over revisions in the changelog."""
1636
1636
1637 def revs(expr, *args):
1637 def revs(expr, *args):
1638 """Evaluate a revset.
1638 """Evaluate a revset.
1639
1639
1640 Emits revisions.
1640 Emits revisions.
1641 """
1641 """
1642
1642
1643 def set(expr, *args):
1643 def set(expr, *args):
1644 """Evaluate a revset.
1644 """Evaluate a revset.
1645
1645
1646 Emits changectx instances.
1646 Emits changectx instances.
1647 """
1647 """
1648
1648
1649 def anyrevs(specs, user=False, localalias=None):
1649 def anyrevs(specs, user=False, localalias=None):
1650 """Find revisions matching one of the given revsets."""
1650 """Find revisions matching one of the given revsets."""
1651
1651
1652 def url():
1652 def url():
1653 """Returns a string representing the location of this repo."""
1653 """Returns a string representing the location of this repo."""
1654
1654
1655 def hook(name, throw=False, **args):
1655 def hook(name, throw=False, **args):
1656 """Call a hook."""
1656 """Call a hook."""
1657
1657
1658 def tags():
1658 def tags():
1659 """Return a mapping of tag to node."""
1659 """Return a mapping of tag to node."""
1660
1660
1661 def tagtype(tagname):
1661 def tagtype(tagname):
1662 """Return the type of a given tag."""
1662 """Return the type of a given tag."""
1663
1663
1664 def tagslist():
1664 def tagslist():
1665 """Return a list of tags ordered by revision."""
1665 """Return a list of tags ordered by revision."""
1666
1666
1667 def nodetags(node):
1667 def nodetags(node):
1668 """Return the tags associated with a node."""
1668 """Return the tags associated with a node."""
1669
1669
1670 def nodebookmarks(node):
1670 def nodebookmarks(node):
1671 """Return the list of bookmarks pointing to the specified node."""
1671 """Return the list of bookmarks pointing to the specified node."""
1672
1672
1673 def branchmap():
1673 def branchmap():
1674 """Return a mapping of branch to heads in that branch."""
1674 """Return a mapping of branch to heads in that branch."""
1675
1675
1676 def revbranchcache():
1676 def revbranchcache():
1677 pass
1677 pass
1678
1678
1679 def register_changeset(rev, changelogrevision):
1679 def register_changeset(rev, changelogrevision):
1680 """Extension point for caches for new nodes.
1680 """Extension point for caches for new nodes.
1681
1681
1682 Multiple consumers are expected to need parts of the changelogrevision,
1682 Multiple consumers are expected to need parts of the changelogrevision,
1683 so it is provided as optimization to avoid duplicate lookups. A simple
1683 so it is provided as optimization to avoid duplicate lookups. A simple
1684 cache would be fragile when other revisions are accessed, too."""
1684 cache would be fragile when other revisions are accessed, too."""
1685 pass
1685 pass
1686
1686
1687 def branchtip(branchtip, ignoremissing=False):
1687 def branchtip(branchtip, ignoremissing=False):
1688 """Return the tip node for a given branch."""
1688 """Return the tip node for a given branch."""
1689
1689
1690 def lookup(key):
1690 def lookup(key):
1691 """Resolve the node for a revision."""
1691 """Resolve the node for a revision."""
1692
1692
1693 def lookupbranch(key):
1693 def lookupbranch(key):
1694 """Look up the branch name of the given revision or branch name."""
1694 """Look up the branch name of the given revision or branch name."""
1695
1695
1696 def known(nodes):
1696 def known(nodes):
1697 """Determine whether a series of nodes is known.
1697 """Determine whether a series of nodes is known.
1698
1698
1699 Returns a list of bools.
1699 Returns a list of bools.
1700 """
1700 """
1701
1701
1702 def local():
1702 def local():
1703 """Whether the repository is local."""
1703 """Whether the repository is local."""
1704 return True
1704 return True
1705
1705
1706 def publishing():
1706 def publishing():
1707 """Whether the repository is a publishing repository."""
1707 """Whether the repository is a publishing repository."""
1708
1708
1709 def cancopy():
1709 def cancopy():
1710 pass
1710 pass
1711
1711
1712 def shared():
1712 def shared():
1713 """The type of shared repository or None."""
1713 """The type of shared repository or None."""
1714
1714
1715 def wjoin(f, *insidef):
1715 def wjoin(f, *insidef):
1716 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1716 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1717
1717
1718 def setparents(p1, p2):
1718 def setparents(p1, p2):
1719 """Set the parent nodes of the working directory."""
1719 """Set the parent nodes of the working directory."""
1720
1720
1721 def filectx(path, changeid=None, fileid=None):
1721 def filectx(path, changeid=None, fileid=None):
1722 """Obtain a filectx for the given file revision."""
1722 """Obtain a filectx for the given file revision."""
1723
1723
1724 def getcwd():
1724 def getcwd():
1725 """Obtain the current working directory from the dirstate."""
1725 """Obtain the current working directory from the dirstate."""
1726
1726
1727 def pathto(f, cwd=None):
1727 def pathto(f, cwd=None):
1728 """Obtain the relative path to a file."""
1728 """Obtain the relative path to a file."""
1729
1729
1730 def adddatafilter(name, fltr):
1730 def adddatafilter(name, fltr):
1731 pass
1731 pass
1732
1732
1733 def wread(filename):
1733 def wread(filename):
1734 """Read a file from wvfs, using data filters."""
1734 """Read a file from wvfs, using data filters."""
1735
1735
1736 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1736 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1737 """Write data to a file in the wvfs, using data filters."""
1737 """Write data to a file in the wvfs, using data filters."""
1738
1738
1739 def wwritedata(filename, data):
1739 def wwritedata(filename, data):
1740 """Resolve data for writing to the wvfs, using data filters."""
1740 """Resolve data for writing to the wvfs, using data filters."""
1741
1741
1742 def currenttransaction():
1742 def currenttransaction():
1743 """Obtain the current transaction instance or None."""
1743 """Obtain the current transaction instance or None."""
1744
1744
1745 def transaction(desc, report=None):
1745 def transaction(desc, report=None):
1746 """Open a new transaction to write to the repository."""
1746 """Open a new transaction to write to the repository."""
1747
1747
1748 def undofiles():
1748 def undofiles():
1749 """Returns a list of (vfs, path) for files to undo transactions."""
1749 """Returns a list of (vfs, path) for files to undo transactions."""
1750
1750
1751 def recover():
1751 def recover():
1752 """Roll back an interrupted transaction."""
1752 """Roll back an interrupted transaction."""
1753
1753
1754 def rollback(dryrun=False, force=False):
1754 def rollback(dryrun=False, force=False):
1755 """Undo the last transaction.
1755 """Undo the last transaction.
1756
1756
1757 DANGEROUS.
1757 DANGEROUS.
1758 """
1758 """
1759
1759
1760 def updatecaches(tr=None, full=False):
1760 def updatecaches(tr=None, full=False):
1761 """Warm repo caches."""
1761 """Warm repo caches."""
1762
1762
1763 def invalidatecaches():
1763 def invalidatecaches():
1764 """Invalidate cached data due to the repository mutating."""
1764 """Invalidate cached data due to the repository mutating."""
1765
1765
1766 def invalidatevolatilesets():
1766 def invalidatevolatilesets():
1767 pass
1767 pass
1768
1768
1769 def invalidatedirstate():
1769 def invalidatedirstate():
1770 """Invalidate the dirstate."""
1770 """Invalidate the dirstate."""
1771
1771
1772 def invalidate(clearfilecache=False):
1772 def invalidate(clearfilecache=False):
1773 pass
1773 pass
1774
1774
1775 def invalidateall():
1775 def invalidateall():
1776 pass
1776 pass
1777
1777
1778 def lock(wait=True):
1778 def lock(wait=True):
1779 """Lock the repository store and return a lock instance."""
1779 """Lock the repository store and return a lock instance."""
1780
1780
1781 def wlock(wait=True):
1781 def wlock(wait=True):
1782 """Lock the non-store parts of the repository."""
1782 """Lock the non-store parts of the repository."""
1783
1783
1784 def currentwlock():
1784 def currentwlock():
1785 """Return the wlock if it's held or None."""
1785 """Return the wlock if it's held or None."""
1786
1786
1787 def checkcommitpatterns(wctx, match, status, fail):
1787 def checkcommitpatterns(wctx, match, status, fail):
1788 pass
1788 pass
1789
1789
1790 def commit(
1790 def commit(
1791 text=b'',
1791 text=b'',
1792 user=None,
1792 user=None,
1793 date=None,
1793 date=None,
1794 match=None,
1794 match=None,
1795 force=False,
1795 force=False,
1796 editor=False,
1796 editor=False,
1797 extra=None,
1797 extra=None,
1798 ):
1798 ):
1799 """Add a new revision to the repository."""
1799 """Add a new revision to the repository."""
1800
1800
1801 def commitctx(ctx, error=False, origctx=None):
1801 def commitctx(ctx, error=False, origctx=None):
1802 """Commit a commitctx instance to the repository."""
1802 """Commit a commitctx instance to the repository."""
1803
1803
1804 def destroying():
1804 def destroying():
1805 """Inform the repository that nodes are about to be destroyed."""
1805 """Inform the repository that nodes are about to be destroyed."""
1806
1806
1807 def destroyed():
1807 def destroyed():
1808 """Inform the repository that nodes have been destroyed."""
1808 """Inform the repository that nodes have been destroyed."""
1809
1809
1810 def status(
1810 def status(
1811 node1=b'.',
1811 node1=b'.',
1812 node2=None,
1812 node2=None,
1813 match=None,
1813 match=None,
1814 ignored=False,
1814 ignored=False,
1815 clean=False,
1815 clean=False,
1816 unknown=False,
1816 unknown=False,
1817 listsubrepos=False,
1817 listsubrepos=False,
1818 ):
1818 ):
1819 """Convenience method to call repo[x].status()."""
1819 """Convenience method to call repo[x].status()."""
1820
1820
1821 def addpostdsstatus(ps):
1821 def addpostdsstatus(ps):
1822 pass
1822 pass
1823
1823
1824 def postdsstatus():
1824 def postdsstatus():
1825 pass
1825 pass
1826
1826
1827 def clearpostdsstatus():
1827 def clearpostdsstatus():
1828 pass
1828 pass
1829
1829
1830 def heads(start=None):
1830 def heads(start=None):
1831 """Obtain list of nodes that are DAG heads."""
1831 """Obtain list of nodes that are DAG heads."""
1832
1832
1833 def branchheads(branch=None, start=None, closed=False):
1833 def branchheads(branch=None, start=None, closed=False):
1834 pass
1834 pass
1835
1835
1836 def branches(nodes):
1836 def branches(nodes):
1837 pass
1837 pass
1838
1838
1839 def between(pairs):
1839 def between(pairs):
1840 pass
1840 pass
1841
1841
1842 def checkpush(pushop):
1842 def checkpush(pushop):
1843 pass
1843 pass
1844
1844
1845 prepushoutgoinghooks = interfaceutil.Attribute("""util.hooks instance.""")
1845 prepushoutgoinghooks = interfaceutil.Attribute("""util.hooks instance.""")
1846
1846
1847 def pushkey(namespace, key, old, new):
1847 def pushkey(namespace, key, old, new):
1848 pass
1848 pass
1849
1849
1850 def listkeys(namespace):
1850 def listkeys(namespace):
1851 pass
1851 pass
1852
1852
1853 def debugwireargs(one, two, three=None, four=None, five=None):
1853 def debugwireargs(one, two, three=None, four=None, five=None):
1854 pass
1854 pass
1855
1855
1856 def savecommitmessage(text):
1856 def savecommitmessage(text):
1857 pass
1857 pass
1858
1858
1859 def register_sidedata_computer(kind, category, keys, computer, flags):
1859 def register_sidedata_computer(
1860 kind, category, keys, computer, flags, replace=False
1861 ):
1860 pass
1862 pass
1861
1863
1862 def register_wanted_sidedata(category):
1864 def register_wanted_sidedata(category):
1863 pass
1865 pass
1864
1866
1865
1867
1866 class completelocalrepository(
1868 class completelocalrepository(
1867 ilocalrepositorymain, ilocalrepositoryfilestorage
1869 ilocalrepositorymain, ilocalrepositoryfilestorage
1868 ):
1870 ):
1869 """Complete interface for a local repository."""
1871 """Complete interface for a local repository."""
1870
1872
1871
1873
1872 class iwireprotocolcommandcacher(interfaceutil.Interface):
1874 class iwireprotocolcommandcacher(interfaceutil.Interface):
1873 """Represents a caching backend for wire protocol commands.
1875 """Represents a caching backend for wire protocol commands.
1874
1876
1875 Wire protocol version 2 supports transparent caching of many commands.
1877 Wire protocol version 2 supports transparent caching of many commands.
1876 To leverage this caching, servers can activate objects that cache
1878 To leverage this caching, servers can activate objects that cache
1877 command responses. Objects handle both cache writing and reading.
1879 command responses. Objects handle both cache writing and reading.
1878 This interface defines how that response caching mechanism works.
1880 This interface defines how that response caching mechanism works.
1879
1881
1880 Wire protocol version 2 commands emit a series of objects that are
1882 Wire protocol version 2 commands emit a series of objects that are
1881 serialized and sent to the client. The caching layer exists between
1883 serialized and sent to the client. The caching layer exists between
1882 the invocation of the command function and the sending of its output
1884 the invocation of the command function and the sending of its output
1883 objects to an output layer.
1885 objects to an output layer.
1884
1886
1885 Instances of this interface represent a binding to a cache that
1887 Instances of this interface represent a binding to a cache that
1886 can serve a response (in place of calling a command function) and/or
1888 can serve a response (in place of calling a command function) and/or
1887 write responses to a cache for subsequent use.
1889 write responses to a cache for subsequent use.
1888
1890
1889 When a command request arrives, the following happens with regards
1891 When a command request arrives, the following happens with regards
1890 to this interface:
1892 to this interface:
1891
1893
1892 1. The server determines whether the command request is cacheable.
1894 1. The server determines whether the command request is cacheable.
1893 2. If it is, an instance of this interface is spawned.
1895 2. If it is, an instance of this interface is spawned.
1894 3. The cacher is activated in a context manager (``__enter__`` is called).
1896 3. The cacher is activated in a context manager (``__enter__`` is called).
1895 4. A cache *key* for that request is derived. This will call the
1897 4. A cache *key* for that request is derived. This will call the
1896 instance's ``adjustcachekeystate()`` method so the derivation
1898 instance's ``adjustcachekeystate()`` method so the derivation
1897 can be influenced.
1899 can be influenced.
1898 5. The cacher is informed of the derived cache key via a call to
1900 5. The cacher is informed of the derived cache key via a call to
1899 ``setcachekey()``.
1901 ``setcachekey()``.
1900 6. The cacher's ``lookup()`` method is called to test for presence of
1902 6. The cacher's ``lookup()`` method is called to test for presence of
1901 the derived key in the cache.
1903 the derived key in the cache.
1902 7. If ``lookup()`` returns a hit, that cached result is used in place
1904 7. If ``lookup()`` returns a hit, that cached result is used in place
1903 of invoking the command function. ``__exit__`` is called and the instance
1905 of invoking the command function. ``__exit__`` is called and the instance
1904 is discarded.
1906 is discarded.
1905 8. The command function is invoked.
1907 8. The command function is invoked.
1906 9. ``onobject()`` is called for each object emitted by the command
1908 9. ``onobject()`` is called for each object emitted by the command
1907 function.
1909 function.
1908 10. After the final object is seen, ``onfinished()`` is called.
1910 10. After the final object is seen, ``onfinished()`` is called.
1909 11. ``__exit__`` is called to signal the end of use of the instance.
1911 11. ``__exit__`` is called to signal the end of use of the instance.
1910
1912
1911 Cache *key* derivation can be influenced by the instance.
1913 Cache *key* derivation can be influenced by the instance.
1912
1914
1913 Cache keys are initially derived by a deterministic representation of
1915 Cache keys are initially derived by a deterministic representation of
1914 the command request. This includes the command name, arguments, protocol
1916 the command request. This includes the command name, arguments, protocol
1915 version, etc. This initial key derivation is performed by CBOR-encoding a
1917 version, etc. This initial key derivation is performed by CBOR-encoding a
1916 data structure and feeding that output into a hasher.
1918 data structure and feeding that output into a hasher.
1917
1919
1918 Instances of this interface can influence this initial key derivation
1920 Instances of this interface can influence this initial key derivation
1919 via ``adjustcachekeystate()``.
1921 via ``adjustcachekeystate()``.
1920
1922
1921 The instance is informed of the derived cache key via a call to
1923 The instance is informed of the derived cache key via a call to
1922 ``setcachekey()``. The instance must store the key locally so it can
1924 ``setcachekey()``. The instance must store the key locally so it can
1923 be consulted on subsequent operations that may require it.
1925 be consulted on subsequent operations that may require it.
1924
1926
1925 When constructed, the instance has access to a callable that can be used
1927 When constructed, the instance has access to a callable that can be used
1926 for encoding response objects. This callable receives as its single
1928 for encoding response objects. This callable receives as its single
1927 argument an object emitted by a command function. It returns an iterable
1929 argument an object emitted by a command function. It returns an iterable
1928 of bytes chunks representing the encoded object. Unless the cacher is
1930 of bytes chunks representing the encoded object. Unless the cacher is
1929 caching native Python objects in memory or has a way of reconstructing
1931 caching native Python objects in memory or has a way of reconstructing
1930 the original Python objects, implementations typically call this function
1932 the original Python objects, implementations typically call this function
1931 to produce bytes from the output objects and then store those bytes in
1933 to produce bytes from the output objects and then store those bytes in
1932 the cache. When it comes time to re-emit those bytes, they are wrapped
1934 the cache. When it comes time to re-emit those bytes, they are wrapped
1933 in a ``wireprototypes.encodedresponse`` instance to tell the output
1935 in a ``wireprototypes.encodedresponse`` instance to tell the output
1934 layer that they are pre-encoded.
1936 layer that they are pre-encoded.
1935
1937
1936 When receiving the objects emitted by the command function, instances
1938 When receiving the objects emitted by the command function, instances
1937 can choose what to do with those objects. The simplest thing to do is
1939 can choose what to do with those objects. The simplest thing to do is
1938 re-emit the original objects. They will be forwarded to the output
1940 re-emit the original objects. They will be forwarded to the output
1939 layer and will be processed as if the cacher did not exist.
1941 layer and will be processed as if the cacher did not exist.
1940
1942
1941 Implementations could also choose to not emit objects - instead locally
1943 Implementations could also choose to not emit objects - instead locally
1942 buffering objects or their encoded representation. They could then emit
1944 buffering objects or their encoded representation. They could then emit
1943 a single "coalesced" object when ``onfinished()`` is called. In
1945 a single "coalesced" object when ``onfinished()`` is called. In
1944 this way, the implementation would function as a filtering layer of
1946 this way, the implementation would function as a filtering layer of
1945 sorts.
1947 sorts.
1946
1948
1947 When caching objects, typically the encoded form of the object will
1949 When caching objects, typically the encoded form of the object will
1948 be stored. Keep in mind that if the original object is forwarded to
1950 be stored. Keep in mind that if the original object is forwarded to
1949 the output layer, it will need to be encoded there as well. For large
1951 the output layer, it will need to be encoded there as well. For large
1950 output, this redundant encoding could add overhead. Implementations
1952 output, this redundant encoding could add overhead. Implementations
1951 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1953 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1952 instances to avoid this overhead.
1954 instances to avoid this overhead.
1953 """
1955 """
1954
1956
1955 def __enter__():
1957 def __enter__():
1956 """Marks the instance as active.
1958 """Marks the instance as active.
1957
1959
1958 Should return self.
1960 Should return self.
1959 """
1961 """
1960
1962
1961 def __exit__(exctype, excvalue, exctb):
1963 def __exit__(exctype, excvalue, exctb):
1962 """Called when cacher is no longer used.
1964 """Called when cacher is no longer used.
1963
1965
1964 This can be used by implementations to perform cleanup actions (e.g.
1966 This can be used by implementations to perform cleanup actions (e.g.
1965 disconnecting network sockets, aborting a partially cached response.
1967 disconnecting network sockets, aborting a partially cached response.
1966 """
1968 """
1967
1969
1968 def adjustcachekeystate(state):
1970 def adjustcachekeystate(state):
1969 """Influences cache key derivation by adjusting state to derive key.
1971 """Influences cache key derivation by adjusting state to derive key.
1970
1972
1971 A dict defining the state used to derive the cache key is passed.
1973 A dict defining the state used to derive the cache key is passed.
1972
1974
1973 Implementations can modify this dict to record additional state that
1975 Implementations can modify this dict to record additional state that
1974 is wanted to influence key derivation.
1976 is wanted to influence key derivation.
1975
1977
1976 Implementations are *highly* encouraged to not modify or delete
1978 Implementations are *highly* encouraged to not modify or delete
1977 existing keys.
1979 existing keys.
1978 """
1980 """
1979
1981
1980 def setcachekey(key):
1982 def setcachekey(key):
1981 """Record the derived cache key for this request.
1983 """Record the derived cache key for this request.
1982
1984
1983 Instances may mutate the key for internal usage, as desired. e.g.
1985 Instances may mutate the key for internal usage, as desired. e.g.
1984 instances may wish to prepend the repo name, introduce path
1986 instances may wish to prepend the repo name, introduce path
1985 components for filesystem or URL addressing, etc. Behavior is up to
1987 components for filesystem or URL addressing, etc. Behavior is up to
1986 the cache.
1988 the cache.
1987
1989
1988 Returns a bool indicating if the request is cacheable by this
1990 Returns a bool indicating if the request is cacheable by this
1989 instance.
1991 instance.
1990 """
1992 """
1991
1993
1992 def lookup():
1994 def lookup():
1993 """Attempt to resolve an entry in the cache.
1995 """Attempt to resolve an entry in the cache.
1994
1996
1995 The instance is instructed to look for the cache key that it was
1997 The instance is instructed to look for the cache key that it was
1996 informed about via the call to ``setcachekey()``.
1998 informed about via the call to ``setcachekey()``.
1997
1999
1998 If there's no cache hit or the cacher doesn't wish to use the cached
2000 If there's no cache hit or the cacher doesn't wish to use the cached
1999 entry, ``None`` should be returned.
2001 entry, ``None`` should be returned.
2000
2002
2001 Else, a dict defining the cached result should be returned. The
2003 Else, a dict defining the cached result should be returned. The
2002 dict may have the following keys:
2004 dict may have the following keys:
2003
2005
2004 objs
2006 objs
2005 An iterable of objects that should be sent to the client. That
2007 An iterable of objects that should be sent to the client. That
2006 iterable of objects is expected to be what the command function
2008 iterable of objects is expected to be what the command function
2007 would return if invoked or an equivalent representation thereof.
2009 would return if invoked or an equivalent representation thereof.
2008 """
2010 """
2009
2011
2010 def onobject(obj):
2012 def onobject(obj):
2011 """Called when a new object is emitted from the command function.
2013 """Called when a new object is emitted from the command function.
2012
2014
2013 Receives as its argument the object that was emitted from the
2015 Receives as its argument the object that was emitted from the
2014 command function.
2016 command function.
2015
2017
2016 This method returns an iterator of objects to forward to the output
2018 This method returns an iterator of objects to forward to the output
2017 layer. The easiest implementation is a generator that just
2019 layer. The easiest implementation is a generator that just
2018 ``yield obj``.
2020 ``yield obj``.
2019 """
2021 """
2020
2022
2021 def onfinished():
2023 def onfinished():
2022 """Called after all objects have been emitted from the command function.
2024 """Called after all objects have been emitted from the command function.
2023
2025
2024 Implementations should return an iterator of objects to forward to
2026 Implementations should return an iterator of objects to forward to
2025 the output layer.
2027 the output layer.
2026
2028
2027 This method can be a generator.
2029 This method can be a generator.
2028 """
2030 """
@@ -1,3763 +1,3772 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import functools
11 import functools
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullrev,
22 nullrev,
23 sha1nodeconstants,
23 sha1nodeconstants,
24 short,
24 short,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 delattr,
27 delattr,
28 getattr,
28 getattr,
29 )
29 )
30 from . import (
30 from . import (
31 bookmarks,
31 bookmarks,
32 branchmap,
32 branchmap,
33 bundle2,
33 bundle2,
34 bundlecaches,
34 bundlecaches,
35 changegroup,
35 changegroup,
36 color,
36 color,
37 commit,
37 commit,
38 context,
38 context,
39 dirstate,
39 dirstate,
40 dirstateguard,
40 dirstateguard,
41 discovery,
41 discovery,
42 encoding,
42 encoding,
43 error,
43 error,
44 exchange,
44 exchange,
45 extensions,
45 extensions,
46 filelog,
46 filelog,
47 hook,
47 hook,
48 lock as lockmod,
48 lock as lockmod,
49 match as matchmod,
49 match as matchmod,
50 mergestate as mergestatemod,
50 mergestate as mergestatemod,
51 mergeutil,
51 mergeutil,
52 metadata as metadatamod,
52 metadata as metadatamod,
53 namespaces,
53 namespaces,
54 narrowspec,
54 narrowspec,
55 obsolete,
55 obsolete,
56 pathutil,
56 pathutil,
57 phases,
57 phases,
58 pushkey,
58 pushkey,
59 pycompat,
59 pycompat,
60 rcutil,
60 rcutil,
61 repoview,
61 repoview,
62 requirements as requirementsmod,
62 requirements as requirementsmod,
63 revlog,
63 revlog,
64 revset,
64 revset,
65 revsetlang,
65 revsetlang,
66 scmutil,
66 scmutil,
67 sparse,
67 sparse,
68 store as storemod,
68 store as storemod,
69 subrepoutil,
69 subrepoutil,
70 tags as tagsmod,
70 tags as tagsmod,
71 transaction,
71 transaction,
72 txnutil,
72 txnutil,
73 util,
73 util,
74 vfs as vfsmod,
74 vfs as vfsmod,
75 wireprototypes,
75 wireprototypes,
76 )
76 )
77
77
78 from .interfaces import (
78 from .interfaces import (
79 repository,
79 repository,
80 util as interfaceutil,
80 util as interfaceutil,
81 )
81 )
82
82
83 from .utils import (
83 from .utils import (
84 hashutil,
84 hashutil,
85 procutil,
85 procutil,
86 stringutil,
86 stringutil,
87 urlutil,
87 urlutil,
88 )
88 )
89
89
90 from .revlogutils import (
90 from .revlogutils import (
91 concurrency_checker as revlogchecker,
91 concurrency_checker as revlogchecker,
92 constants as revlogconst,
92 constants as revlogconst,
93 )
93 )
94
94
95 release = lockmod.release
95 release = lockmod.release
96 urlerr = util.urlerr
96 urlerr = util.urlerr
97 urlreq = util.urlreq
97 urlreq = util.urlreq
98
98
99 # set of (path, vfs-location) tuples. vfs-location is:
99 # set of (path, vfs-location) tuples. vfs-location is:
100 # - 'plain for vfs relative paths
100 # - 'plain for vfs relative paths
101 # - '' for svfs relative paths
101 # - '' for svfs relative paths
102 _cachedfiles = set()
102 _cachedfiles = set()
103
103
104
104
105 class _basefilecache(scmutil.filecache):
105 class _basefilecache(scmutil.filecache):
106 """All filecache usage on repo are done for logic that should be unfiltered"""
106 """All filecache usage on repo are done for logic that should be unfiltered"""
107
107
108 def __get__(self, repo, type=None):
108 def __get__(self, repo, type=None):
109 if repo is None:
109 if repo is None:
110 return self
110 return self
111 # proxy to unfiltered __dict__ since filtered repo has no entry
111 # proxy to unfiltered __dict__ since filtered repo has no entry
112 unfi = repo.unfiltered()
112 unfi = repo.unfiltered()
113 try:
113 try:
114 return unfi.__dict__[self.sname]
114 return unfi.__dict__[self.sname]
115 except KeyError:
115 except KeyError:
116 pass
116 pass
117 return super(_basefilecache, self).__get__(unfi, type)
117 return super(_basefilecache, self).__get__(unfi, type)
118
118
119 def set(self, repo, value):
119 def set(self, repo, value):
120 return super(_basefilecache, self).set(repo.unfiltered(), value)
120 return super(_basefilecache, self).set(repo.unfiltered(), value)
121
121
122
122
123 class repofilecache(_basefilecache):
123 class repofilecache(_basefilecache):
124 """filecache for files in .hg but outside of .hg/store"""
124 """filecache for files in .hg but outside of .hg/store"""
125
125
126 def __init__(self, *paths):
126 def __init__(self, *paths):
127 super(repofilecache, self).__init__(*paths)
127 super(repofilecache, self).__init__(*paths)
128 for path in paths:
128 for path in paths:
129 _cachedfiles.add((path, b'plain'))
129 _cachedfiles.add((path, b'plain'))
130
130
131 def join(self, obj, fname):
131 def join(self, obj, fname):
132 return obj.vfs.join(fname)
132 return obj.vfs.join(fname)
133
133
134
134
135 class storecache(_basefilecache):
135 class storecache(_basefilecache):
136 """filecache for files in the store"""
136 """filecache for files in the store"""
137
137
138 def __init__(self, *paths):
138 def __init__(self, *paths):
139 super(storecache, self).__init__(*paths)
139 super(storecache, self).__init__(*paths)
140 for path in paths:
140 for path in paths:
141 _cachedfiles.add((path, b''))
141 _cachedfiles.add((path, b''))
142
142
143 def join(self, obj, fname):
143 def join(self, obj, fname):
144 return obj.sjoin(fname)
144 return obj.sjoin(fname)
145
145
146
146
147 class mixedrepostorecache(_basefilecache):
147 class mixedrepostorecache(_basefilecache):
148 """filecache for a mix files in .hg/store and outside"""
148 """filecache for a mix files in .hg/store and outside"""
149
149
150 def __init__(self, *pathsandlocations):
150 def __init__(self, *pathsandlocations):
151 # scmutil.filecache only uses the path for passing back into our
151 # scmutil.filecache only uses the path for passing back into our
152 # join(), so we can safely pass a list of paths and locations
152 # join(), so we can safely pass a list of paths and locations
153 super(mixedrepostorecache, self).__init__(*pathsandlocations)
153 super(mixedrepostorecache, self).__init__(*pathsandlocations)
154 _cachedfiles.update(pathsandlocations)
154 _cachedfiles.update(pathsandlocations)
155
155
156 def join(self, obj, fnameandlocation):
156 def join(self, obj, fnameandlocation):
157 fname, location = fnameandlocation
157 fname, location = fnameandlocation
158 if location == b'plain':
158 if location == b'plain':
159 return obj.vfs.join(fname)
159 return obj.vfs.join(fname)
160 else:
160 else:
161 if location != b'':
161 if location != b'':
162 raise error.ProgrammingError(
162 raise error.ProgrammingError(
163 b'unexpected location: %s' % location
163 b'unexpected location: %s' % location
164 )
164 )
165 return obj.sjoin(fname)
165 return obj.sjoin(fname)
166
166
167
167
168 def isfilecached(repo, name):
168 def isfilecached(repo, name):
169 """check if a repo has already cached "name" filecache-ed property
169 """check if a repo has already cached "name" filecache-ed property
170
170
171 This returns (cachedobj-or-None, iscached) tuple.
171 This returns (cachedobj-or-None, iscached) tuple.
172 """
172 """
173 cacheentry = repo.unfiltered()._filecache.get(name, None)
173 cacheentry = repo.unfiltered()._filecache.get(name, None)
174 if not cacheentry:
174 if not cacheentry:
175 return None, False
175 return None, False
176 return cacheentry.obj, True
176 return cacheentry.obj, True
177
177
178
178
179 class unfilteredpropertycache(util.propertycache):
179 class unfilteredpropertycache(util.propertycache):
180 """propertycache that apply to unfiltered repo only"""
180 """propertycache that apply to unfiltered repo only"""
181
181
182 def __get__(self, repo, type=None):
182 def __get__(self, repo, type=None):
183 unfi = repo.unfiltered()
183 unfi = repo.unfiltered()
184 if unfi is repo:
184 if unfi is repo:
185 return super(unfilteredpropertycache, self).__get__(unfi)
185 return super(unfilteredpropertycache, self).__get__(unfi)
186 return getattr(unfi, self.name)
186 return getattr(unfi, self.name)
187
187
188
188
189 class filteredpropertycache(util.propertycache):
189 class filteredpropertycache(util.propertycache):
190 """propertycache that must take filtering in account"""
190 """propertycache that must take filtering in account"""
191
191
192 def cachevalue(self, obj, value):
192 def cachevalue(self, obj, value):
193 object.__setattr__(obj, self.name, value)
193 object.__setattr__(obj, self.name, value)
194
194
195
195
196 def hasunfilteredcache(repo, name):
196 def hasunfilteredcache(repo, name):
197 """check if a repo has an unfilteredpropertycache value for <name>"""
197 """check if a repo has an unfilteredpropertycache value for <name>"""
198 return name in vars(repo.unfiltered())
198 return name in vars(repo.unfiltered())
199
199
200
200
201 def unfilteredmethod(orig):
201 def unfilteredmethod(orig):
202 """decorate method that always need to be run on unfiltered version"""
202 """decorate method that always need to be run on unfiltered version"""
203
203
204 @functools.wraps(orig)
204 @functools.wraps(orig)
205 def wrapper(repo, *args, **kwargs):
205 def wrapper(repo, *args, **kwargs):
206 return orig(repo.unfiltered(), *args, **kwargs)
206 return orig(repo.unfiltered(), *args, **kwargs)
207
207
208 return wrapper
208 return wrapper
209
209
210
210
211 moderncaps = {
211 moderncaps = {
212 b'lookup',
212 b'lookup',
213 b'branchmap',
213 b'branchmap',
214 b'pushkey',
214 b'pushkey',
215 b'known',
215 b'known',
216 b'getbundle',
216 b'getbundle',
217 b'unbundle',
217 b'unbundle',
218 }
218 }
219 legacycaps = moderncaps.union({b'changegroupsubset'})
219 legacycaps = moderncaps.union({b'changegroupsubset'})
220
220
221
221
222 @interfaceutil.implementer(repository.ipeercommandexecutor)
222 @interfaceutil.implementer(repository.ipeercommandexecutor)
223 class localcommandexecutor(object):
223 class localcommandexecutor(object):
224 def __init__(self, peer):
224 def __init__(self, peer):
225 self._peer = peer
225 self._peer = peer
226 self._sent = False
226 self._sent = False
227 self._closed = False
227 self._closed = False
228
228
229 def __enter__(self):
229 def __enter__(self):
230 return self
230 return self
231
231
232 def __exit__(self, exctype, excvalue, exctb):
232 def __exit__(self, exctype, excvalue, exctb):
233 self.close()
233 self.close()
234
234
235 def callcommand(self, command, args):
235 def callcommand(self, command, args):
236 if self._sent:
236 if self._sent:
237 raise error.ProgrammingError(
237 raise error.ProgrammingError(
238 b'callcommand() cannot be used after sendcommands()'
238 b'callcommand() cannot be used after sendcommands()'
239 )
239 )
240
240
241 if self._closed:
241 if self._closed:
242 raise error.ProgrammingError(
242 raise error.ProgrammingError(
243 b'callcommand() cannot be used after close()'
243 b'callcommand() cannot be used after close()'
244 )
244 )
245
245
246 # We don't need to support anything fancy. Just call the named
246 # We don't need to support anything fancy. Just call the named
247 # method on the peer and return a resolved future.
247 # method on the peer and return a resolved future.
248 fn = getattr(self._peer, pycompat.sysstr(command))
248 fn = getattr(self._peer, pycompat.sysstr(command))
249
249
250 f = pycompat.futures.Future()
250 f = pycompat.futures.Future()
251
251
252 try:
252 try:
253 result = fn(**pycompat.strkwargs(args))
253 result = fn(**pycompat.strkwargs(args))
254 except Exception:
254 except Exception:
255 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
255 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
256 else:
256 else:
257 f.set_result(result)
257 f.set_result(result)
258
258
259 return f
259 return f
260
260
261 def sendcommands(self):
261 def sendcommands(self):
262 self._sent = True
262 self._sent = True
263
263
264 def close(self):
264 def close(self):
265 self._closed = True
265 self._closed = True
266
266
267
267
268 @interfaceutil.implementer(repository.ipeercommands)
268 @interfaceutil.implementer(repository.ipeercommands)
269 class localpeer(repository.peer):
269 class localpeer(repository.peer):
270 '''peer for a local repo; reflects only the most recent API'''
270 '''peer for a local repo; reflects only the most recent API'''
271
271
272 def __init__(self, repo, caps=None):
272 def __init__(self, repo, caps=None):
273 super(localpeer, self).__init__()
273 super(localpeer, self).__init__()
274
274
275 if caps is None:
275 if caps is None:
276 caps = moderncaps.copy()
276 caps = moderncaps.copy()
277 self._repo = repo.filtered(b'served')
277 self._repo = repo.filtered(b'served')
278 self.ui = repo.ui
278 self.ui = repo.ui
279
279
280 if repo._wanted_sidedata:
280 if repo._wanted_sidedata:
281 formatted = bundle2.format_remote_wanted_sidedata(repo)
281 formatted = bundle2.format_remote_wanted_sidedata(repo)
282 caps.add(b'exp-wanted-sidedata=' + formatted)
282 caps.add(b'exp-wanted-sidedata=' + formatted)
283
283
284 self._caps = repo._restrictcapabilities(caps)
284 self._caps = repo._restrictcapabilities(caps)
285
285
286 # Begin of _basepeer interface.
286 # Begin of _basepeer interface.
287
287
288 def url(self):
288 def url(self):
289 return self._repo.url()
289 return self._repo.url()
290
290
291 def local(self):
291 def local(self):
292 return self._repo
292 return self._repo
293
293
294 def peer(self):
294 def peer(self):
295 return self
295 return self
296
296
297 def canpush(self):
297 def canpush(self):
298 return True
298 return True
299
299
300 def close(self):
300 def close(self):
301 self._repo.close()
301 self._repo.close()
302
302
303 # End of _basepeer interface.
303 # End of _basepeer interface.
304
304
305 # Begin of _basewirecommands interface.
305 # Begin of _basewirecommands interface.
306
306
307 def branchmap(self):
307 def branchmap(self):
308 return self._repo.branchmap()
308 return self._repo.branchmap()
309
309
310 def capabilities(self):
310 def capabilities(self):
311 return self._caps
311 return self._caps
312
312
313 def clonebundles(self):
313 def clonebundles(self):
314 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
314 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
315
315
316 def debugwireargs(self, one, two, three=None, four=None, five=None):
316 def debugwireargs(self, one, two, three=None, four=None, five=None):
317 """Used to test argument passing over the wire"""
317 """Used to test argument passing over the wire"""
318 return b"%s %s %s %s %s" % (
318 return b"%s %s %s %s %s" % (
319 one,
319 one,
320 two,
320 two,
321 pycompat.bytestr(three),
321 pycompat.bytestr(three),
322 pycompat.bytestr(four),
322 pycompat.bytestr(four),
323 pycompat.bytestr(five),
323 pycompat.bytestr(five),
324 )
324 )
325
325
326 def getbundle(
326 def getbundle(
327 self,
327 self,
328 source,
328 source,
329 heads=None,
329 heads=None,
330 common=None,
330 common=None,
331 bundlecaps=None,
331 bundlecaps=None,
332 remote_sidedata=None,
332 remote_sidedata=None,
333 **kwargs
333 **kwargs
334 ):
334 ):
335 chunks = exchange.getbundlechunks(
335 chunks = exchange.getbundlechunks(
336 self._repo,
336 self._repo,
337 source,
337 source,
338 heads=heads,
338 heads=heads,
339 common=common,
339 common=common,
340 bundlecaps=bundlecaps,
340 bundlecaps=bundlecaps,
341 remote_sidedata=remote_sidedata,
341 remote_sidedata=remote_sidedata,
342 **kwargs
342 **kwargs
343 )[1]
343 )[1]
344 cb = util.chunkbuffer(chunks)
344 cb = util.chunkbuffer(chunks)
345
345
346 if exchange.bundle2requested(bundlecaps):
346 if exchange.bundle2requested(bundlecaps):
347 # When requesting a bundle2, getbundle returns a stream to make the
347 # When requesting a bundle2, getbundle returns a stream to make the
348 # wire level function happier. We need to build a proper object
348 # wire level function happier. We need to build a proper object
349 # from it in local peer.
349 # from it in local peer.
350 return bundle2.getunbundler(self.ui, cb)
350 return bundle2.getunbundler(self.ui, cb)
351 else:
351 else:
352 return changegroup.getunbundler(b'01', cb, None)
352 return changegroup.getunbundler(b'01', cb, None)
353
353
354 def heads(self):
354 def heads(self):
355 return self._repo.heads()
355 return self._repo.heads()
356
356
357 def known(self, nodes):
357 def known(self, nodes):
358 return self._repo.known(nodes)
358 return self._repo.known(nodes)
359
359
360 def listkeys(self, namespace):
360 def listkeys(self, namespace):
361 return self._repo.listkeys(namespace)
361 return self._repo.listkeys(namespace)
362
362
363 def lookup(self, key):
363 def lookup(self, key):
364 return self._repo.lookup(key)
364 return self._repo.lookup(key)
365
365
366 def pushkey(self, namespace, key, old, new):
366 def pushkey(self, namespace, key, old, new):
367 return self._repo.pushkey(namespace, key, old, new)
367 return self._repo.pushkey(namespace, key, old, new)
368
368
369 def stream_out(self):
369 def stream_out(self):
370 raise error.Abort(_(b'cannot perform stream clone against local peer'))
370 raise error.Abort(_(b'cannot perform stream clone against local peer'))
371
371
372 def unbundle(self, bundle, heads, url):
372 def unbundle(self, bundle, heads, url):
373 """apply a bundle on a repo
373 """apply a bundle on a repo
374
374
375 This function handles the repo locking itself."""
375 This function handles the repo locking itself."""
376 try:
376 try:
377 try:
377 try:
378 bundle = exchange.readbundle(self.ui, bundle, None)
378 bundle = exchange.readbundle(self.ui, bundle, None)
379 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
379 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
380 if util.safehasattr(ret, b'getchunks'):
380 if util.safehasattr(ret, b'getchunks'):
381 # This is a bundle20 object, turn it into an unbundler.
381 # This is a bundle20 object, turn it into an unbundler.
382 # This little dance should be dropped eventually when the
382 # This little dance should be dropped eventually when the
383 # API is finally improved.
383 # API is finally improved.
384 stream = util.chunkbuffer(ret.getchunks())
384 stream = util.chunkbuffer(ret.getchunks())
385 ret = bundle2.getunbundler(self.ui, stream)
385 ret = bundle2.getunbundler(self.ui, stream)
386 return ret
386 return ret
387 except Exception as exc:
387 except Exception as exc:
388 # If the exception contains output salvaged from a bundle2
388 # If the exception contains output salvaged from a bundle2
389 # reply, we need to make sure it is printed before continuing
389 # reply, we need to make sure it is printed before continuing
390 # to fail. So we build a bundle2 with such output and consume
390 # to fail. So we build a bundle2 with such output and consume
391 # it directly.
391 # it directly.
392 #
392 #
393 # This is not very elegant but allows a "simple" solution for
393 # This is not very elegant but allows a "simple" solution for
394 # issue4594
394 # issue4594
395 output = getattr(exc, '_bundle2salvagedoutput', ())
395 output = getattr(exc, '_bundle2salvagedoutput', ())
396 if output:
396 if output:
397 bundler = bundle2.bundle20(self._repo.ui)
397 bundler = bundle2.bundle20(self._repo.ui)
398 for out in output:
398 for out in output:
399 bundler.addpart(out)
399 bundler.addpart(out)
400 stream = util.chunkbuffer(bundler.getchunks())
400 stream = util.chunkbuffer(bundler.getchunks())
401 b = bundle2.getunbundler(self.ui, stream)
401 b = bundle2.getunbundler(self.ui, stream)
402 bundle2.processbundle(self._repo, b)
402 bundle2.processbundle(self._repo, b)
403 raise
403 raise
404 except error.PushRaced as exc:
404 except error.PushRaced as exc:
405 raise error.ResponseError(
405 raise error.ResponseError(
406 _(b'push failed:'), stringutil.forcebytestr(exc)
406 _(b'push failed:'), stringutil.forcebytestr(exc)
407 )
407 )
408
408
409 # End of _basewirecommands interface.
409 # End of _basewirecommands interface.
410
410
411 # Begin of peer interface.
411 # Begin of peer interface.
412
412
413 def commandexecutor(self):
413 def commandexecutor(self):
414 return localcommandexecutor(self)
414 return localcommandexecutor(self)
415
415
416 # End of peer interface.
416 # End of peer interface.
417
417
418
418
419 @interfaceutil.implementer(repository.ipeerlegacycommands)
419 @interfaceutil.implementer(repository.ipeerlegacycommands)
420 class locallegacypeer(localpeer):
420 class locallegacypeer(localpeer):
421 """peer extension which implements legacy methods too; used for tests with
421 """peer extension which implements legacy methods too; used for tests with
422 restricted capabilities"""
422 restricted capabilities"""
423
423
424 def __init__(self, repo):
424 def __init__(self, repo):
425 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
425 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
426
426
427 # Begin of baselegacywirecommands interface.
427 # Begin of baselegacywirecommands interface.
428
428
429 def between(self, pairs):
429 def between(self, pairs):
430 return self._repo.between(pairs)
430 return self._repo.between(pairs)
431
431
432 def branches(self, nodes):
432 def branches(self, nodes):
433 return self._repo.branches(nodes)
433 return self._repo.branches(nodes)
434
434
435 def changegroup(self, nodes, source):
435 def changegroup(self, nodes, source):
436 outgoing = discovery.outgoing(
436 outgoing = discovery.outgoing(
437 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
437 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
438 )
438 )
439 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
439 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
440
440
441 def changegroupsubset(self, bases, heads, source):
441 def changegroupsubset(self, bases, heads, source):
442 outgoing = discovery.outgoing(
442 outgoing = discovery.outgoing(
443 self._repo, missingroots=bases, ancestorsof=heads
443 self._repo, missingroots=bases, ancestorsof=heads
444 )
444 )
445 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
445 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
446
446
447 # End of baselegacywirecommands interface.
447 # End of baselegacywirecommands interface.
448
448
449
449
450 # Functions receiving (ui, features) that extensions can register to impact
450 # Functions receiving (ui, features) that extensions can register to impact
451 # the ability to load repositories with custom requirements. Only
451 # the ability to load repositories with custom requirements. Only
452 # functions defined in loaded extensions are called.
452 # functions defined in loaded extensions are called.
453 #
453 #
454 # The function receives a set of requirement strings that the repository
454 # The function receives a set of requirement strings that the repository
455 # is capable of opening. Functions will typically add elements to the
455 # is capable of opening. Functions will typically add elements to the
456 # set to reflect that the extension knows how to handle that requirements.
456 # set to reflect that the extension knows how to handle that requirements.
457 featuresetupfuncs = set()
457 featuresetupfuncs = set()
458
458
459
459
460 def _getsharedvfs(hgvfs, requirements):
460 def _getsharedvfs(hgvfs, requirements):
461 """returns the vfs object pointing to root of shared source
461 """returns the vfs object pointing to root of shared source
462 repo for a shared repository
462 repo for a shared repository
463
463
464 hgvfs is vfs pointing at .hg/ of current repo (shared one)
464 hgvfs is vfs pointing at .hg/ of current repo (shared one)
465 requirements is a set of requirements of current repo (shared one)
465 requirements is a set of requirements of current repo (shared one)
466 """
466 """
467 # The ``shared`` or ``relshared`` requirements indicate the
467 # The ``shared`` or ``relshared`` requirements indicate the
468 # store lives in the path contained in the ``.hg/sharedpath`` file.
468 # store lives in the path contained in the ``.hg/sharedpath`` file.
469 # This is an absolute path for ``shared`` and relative to
469 # This is an absolute path for ``shared`` and relative to
470 # ``.hg/`` for ``relshared``.
470 # ``.hg/`` for ``relshared``.
471 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
471 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
472 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
472 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
473 sharedpath = util.normpath(hgvfs.join(sharedpath))
473 sharedpath = util.normpath(hgvfs.join(sharedpath))
474
474
475 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
475 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
476
476
477 if not sharedvfs.exists():
477 if not sharedvfs.exists():
478 raise error.RepoError(
478 raise error.RepoError(
479 _(b'.hg/sharedpath points to nonexistent directory %s')
479 _(b'.hg/sharedpath points to nonexistent directory %s')
480 % sharedvfs.base
480 % sharedvfs.base
481 )
481 )
482 return sharedvfs
482 return sharedvfs
483
483
484
484
485 def _readrequires(vfs, allowmissing):
485 def _readrequires(vfs, allowmissing):
486 """reads the require file present at root of this vfs
486 """reads the require file present at root of this vfs
487 and return a set of requirements
487 and return a set of requirements
488
488
489 If allowmissing is True, we suppress ENOENT if raised"""
489 If allowmissing is True, we suppress ENOENT if raised"""
490 # requires file contains a newline-delimited list of
490 # requires file contains a newline-delimited list of
491 # features/capabilities the opener (us) must have in order to use
491 # features/capabilities the opener (us) must have in order to use
492 # the repository. This file was introduced in Mercurial 0.9.2,
492 # the repository. This file was introduced in Mercurial 0.9.2,
493 # which means very old repositories may not have one. We assume
493 # which means very old repositories may not have one. We assume
494 # a missing file translates to no requirements.
494 # a missing file translates to no requirements.
495 try:
495 try:
496 requirements = set(vfs.read(b'requires').splitlines())
496 requirements = set(vfs.read(b'requires').splitlines())
497 except IOError as e:
497 except IOError as e:
498 if not (allowmissing and e.errno == errno.ENOENT):
498 if not (allowmissing and e.errno == errno.ENOENT):
499 raise
499 raise
500 requirements = set()
500 requirements = set()
501 return requirements
501 return requirements
502
502
503
503
504 def makelocalrepository(baseui, path, intents=None):
504 def makelocalrepository(baseui, path, intents=None):
505 """Create a local repository object.
505 """Create a local repository object.
506
506
507 Given arguments needed to construct a local repository, this function
507 Given arguments needed to construct a local repository, this function
508 performs various early repository loading functionality (such as
508 performs various early repository loading functionality (such as
509 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
509 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
510 the repository can be opened, derives a type suitable for representing
510 the repository can be opened, derives a type suitable for representing
511 that repository, and returns an instance of it.
511 that repository, and returns an instance of it.
512
512
513 The returned object conforms to the ``repository.completelocalrepository``
513 The returned object conforms to the ``repository.completelocalrepository``
514 interface.
514 interface.
515
515
516 The repository type is derived by calling a series of factory functions
516 The repository type is derived by calling a series of factory functions
517 for each aspect/interface of the final repository. These are defined by
517 for each aspect/interface of the final repository. These are defined by
518 ``REPO_INTERFACES``.
518 ``REPO_INTERFACES``.
519
519
520 Each factory function is called to produce a type implementing a specific
520 Each factory function is called to produce a type implementing a specific
521 interface. The cumulative list of returned types will be combined into a
521 interface. The cumulative list of returned types will be combined into a
522 new type and that type will be instantiated to represent the local
522 new type and that type will be instantiated to represent the local
523 repository.
523 repository.
524
524
525 The factory functions each receive various state that may be consulted
525 The factory functions each receive various state that may be consulted
526 as part of deriving a type.
526 as part of deriving a type.
527
527
528 Extensions should wrap these factory functions to customize repository type
528 Extensions should wrap these factory functions to customize repository type
529 creation. Note that an extension's wrapped function may be called even if
529 creation. Note that an extension's wrapped function may be called even if
530 that extension is not loaded for the repo being constructed. Extensions
530 that extension is not loaded for the repo being constructed. Extensions
531 should check if their ``__name__`` appears in the
531 should check if their ``__name__`` appears in the
532 ``extensionmodulenames`` set passed to the factory function and no-op if
532 ``extensionmodulenames`` set passed to the factory function and no-op if
533 not.
533 not.
534 """
534 """
535 ui = baseui.copy()
535 ui = baseui.copy()
536 # Prevent copying repo configuration.
536 # Prevent copying repo configuration.
537 ui.copy = baseui.copy
537 ui.copy = baseui.copy
538
538
539 # Working directory VFS rooted at repository root.
539 # Working directory VFS rooted at repository root.
540 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
540 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
541
541
542 # Main VFS for .hg/ directory.
542 # Main VFS for .hg/ directory.
543 hgpath = wdirvfs.join(b'.hg')
543 hgpath = wdirvfs.join(b'.hg')
544 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
544 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
545 # Whether this repository is shared one or not
545 # Whether this repository is shared one or not
546 shared = False
546 shared = False
547 # If this repository is shared, vfs pointing to shared repo
547 # If this repository is shared, vfs pointing to shared repo
548 sharedvfs = None
548 sharedvfs = None
549
549
550 # The .hg/ path should exist and should be a directory. All other
550 # The .hg/ path should exist and should be a directory. All other
551 # cases are errors.
551 # cases are errors.
552 if not hgvfs.isdir():
552 if not hgvfs.isdir():
553 try:
553 try:
554 hgvfs.stat()
554 hgvfs.stat()
555 except OSError as e:
555 except OSError as e:
556 if e.errno != errno.ENOENT:
556 if e.errno != errno.ENOENT:
557 raise
557 raise
558 except ValueError as e:
558 except ValueError as e:
559 # Can be raised on Python 3.8 when path is invalid.
559 # Can be raised on Python 3.8 when path is invalid.
560 raise error.Abort(
560 raise error.Abort(
561 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
561 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
562 )
562 )
563
563
564 raise error.RepoError(_(b'repository %s not found') % path)
564 raise error.RepoError(_(b'repository %s not found') % path)
565
565
566 requirements = _readrequires(hgvfs, True)
566 requirements = _readrequires(hgvfs, True)
567 shared = (
567 shared = (
568 requirementsmod.SHARED_REQUIREMENT in requirements
568 requirementsmod.SHARED_REQUIREMENT in requirements
569 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
569 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
570 )
570 )
571 storevfs = None
571 storevfs = None
572 if shared:
572 if shared:
573 # This is a shared repo
573 # This is a shared repo
574 sharedvfs = _getsharedvfs(hgvfs, requirements)
574 sharedvfs = _getsharedvfs(hgvfs, requirements)
575 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
575 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
576 else:
576 else:
577 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
577 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
578
578
579 # if .hg/requires contains the sharesafe requirement, it means
579 # if .hg/requires contains the sharesafe requirement, it means
580 # there exists a `.hg/store/requires` too and we should read it
580 # there exists a `.hg/store/requires` too and we should read it
581 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
581 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
582 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
582 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
583 # is not present, refer checkrequirementscompat() for that
583 # is not present, refer checkrequirementscompat() for that
584 #
584 #
585 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
585 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
586 # repository was shared the old way. We check the share source .hg/requires
586 # repository was shared the old way. We check the share source .hg/requires
587 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
587 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
588 # to be reshared
588 # to be reshared
589 hint = _(b"see `hg help config.format.use-share-safe` for more information")
589 hint = _(b"see `hg help config.format.use-share-safe` for more information")
590 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
590 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
591
591
592 if (
592 if (
593 shared
593 shared
594 and requirementsmod.SHARESAFE_REQUIREMENT
594 and requirementsmod.SHARESAFE_REQUIREMENT
595 not in _readrequires(sharedvfs, True)
595 not in _readrequires(sharedvfs, True)
596 ):
596 ):
597 mismatch_warn = ui.configbool(
597 mismatch_warn = ui.configbool(
598 b'share', b'safe-mismatch.source-not-safe.warn'
598 b'share', b'safe-mismatch.source-not-safe.warn'
599 )
599 )
600 mismatch_config = ui.config(
600 mismatch_config = ui.config(
601 b'share', b'safe-mismatch.source-not-safe'
601 b'share', b'safe-mismatch.source-not-safe'
602 )
602 )
603 if mismatch_config in (
603 if mismatch_config in (
604 b'downgrade-allow',
604 b'downgrade-allow',
605 b'allow',
605 b'allow',
606 b'downgrade-abort',
606 b'downgrade-abort',
607 ):
607 ):
608 # prevent cyclic import localrepo -> upgrade -> localrepo
608 # prevent cyclic import localrepo -> upgrade -> localrepo
609 from . import upgrade
609 from . import upgrade
610
610
611 upgrade.downgrade_share_to_non_safe(
611 upgrade.downgrade_share_to_non_safe(
612 ui,
612 ui,
613 hgvfs,
613 hgvfs,
614 sharedvfs,
614 sharedvfs,
615 requirements,
615 requirements,
616 mismatch_config,
616 mismatch_config,
617 mismatch_warn,
617 mismatch_warn,
618 )
618 )
619 elif mismatch_config == b'abort':
619 elif mismatch_config == b'abort':
620 raise error.Abort(
620 raise error.Abort(
621 _(b"share source does not support share-safe requirement"),
621 _(b"share source does not support share-safe requirement"),
622 hint=hint,
622 hint=hint,
623 )
623 )
624 else:
624 else:
625 raise error.Abort(
625 raise error.Abort(
626 _(
626 _(
627 b"share-safe mismatch with source.\nUnrecognized"
627 b"share-safe mismatch with source.\nUnrecognized"
628 b" value '%s' of `share.safe-mismatch.source-not-safe`"
628 b" value '%s' of `share.safe-mismatch.source-not-safe`"
629 b" set."
629 b" set."
630 )
630 )
631 % mismatch_config,
631 % mismatch_config,
632 hint=hint,
632 hint=hint,
633 )
633 )
634 else:
634 else:
635 requirements |= _readrequires(storevfs, False)
635 requirements |= _readrequires(storevfs, False)
636 elif shared:
636 elif shared:
637 sourcerequires = _readrequires(sharedvfs, False)
637 sourcerequires = _readrequires(sharedvfs, False)
638 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
638 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
639 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
639 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
640 mismatch_warn = ui.configbool(
640 mismatch_warn = ui.configbool(
641 b'share', b'safe-mismatch.source-safe.warn'
641 b'share', b'safe-mismatch.source-safe.warn'
642 )
642 )
643 if mismatch_config in (
643 if mismatch_config in (
644 b'upgrade-allow',
644 b'upgrade-allow',
645 b'allow',
645 b'allow',
646 b'upgrade-abort',
646 b'upgrade-abort',
647 ):
647 ):
648 # prevent cyclic import localrepo -> upgrade -> localrepo
648 # prevent cyclic import localrepo -> upgrade -> localrepo
649 from . import upgrade
649 from . import upgrade
650
650
651 upgrade.upgrade_share_to_safe(
651 upgrade.upgrade_share_to_safe(
652 ui,
652 ui,
653 hgvfs,
653 hgvfs,
654 storevfs,
654 storevfs,
655 requirements,
655 requirements,
656 mismatch_config,
656 mismatch_config,
657 mismatch_warn,
657 mismatch_warn,
658 )
658 )
659 elif mismatch_config == b'abort':
659 elif mismatch_config == b'abort':
660 raise error.Abort(
660 raise error.Abort(
661 _(
661 _(
662 b'version mismatch: source uses share-safe'
662 b'version mismatch: source uses share-safe'
663 b' functionality while the current share does not'
663 b' functionality while the current share does not'
664 ),
664 ),
665 hint=hint,
665 hint=hint,
666 )
666 )
667 else:
667 else:
668 raise error.Abort(
668 raise error.Abort(
669 _(
669 _(
670 b"share-safe mismatch with source.\nUnrecognized"
670 b"share-safe mismatch with source.\nUnrecognized"
671 b" value '%s' of `share.safe-mismatch.source-safe` set."
671 b" value '%s' of `share.safe-mismatch.source-safe` set."
672 )
672 )
673 % mismatch_config,
673 % mismatch_config,
674 hint=hint,
674 hint=hint,
675 )
675 )
676
676
677 # The .hg/hgrc file may load extensions or contain config options
677 # The .hg/hgrc file may load extensions or contain config options
678 # that influence repository construction. Attempt to load it and
678 # that influence repository construction. Attempt to load it and
679 # process any new extensions that it may have pulled in.
679 # process any new extensions that it may have pulled in.
680 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
680 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
681 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
681 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
682 extensions.loadall(ui)
682 extensions.loadall(ui)
683 extensions.populateui(ui)
683 extensions.populateui(ui)
684
684
685 # Set of module names of extensions loaded for this repository.
685 # Set of module names of extensions loaded for this repository.
686 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
686 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
687
687
688 supportedrequirements = gathersupportedrequirements(ui)
688 supportedrequirements = gathersupportedrequirements(ui)
689
689
690 # We first validate the requirements are known.
690 # We first validate the requirements are known.
691 ensurerequirementsrecognized(requirements, supportedrequirements)
691 ensurerequirementsrecognized(requirements, supportedrequirements)
692
692
693 # Then we validate that the known set is reasonable to use together.
693 # Then we validate that the known set is reasonable to use together.
694 ensurerequirementscompatible(ui, requirements)
694 ensurerequirementscompatible(ui, requirements)
695
695
696 # TODO there are unhandled edge cases related to opening repositories with
696 # TODO there are unhandled edge cases related to opening repositories with
697 # shared storage. If storage is shared, we should also test for requirements
697 # shared storage. If storage is shared, we should also test for requirements
698 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
698 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
699 # that repo, as that repo may load extensions needed to open it. This is a
699 # that repo, as that repo may load extensions needed to open it. This is a
700 # bit complicated because we don't want the other hgrc to overwrite settings
700 # bit complicated because we don't want the other hgrc to overwrite settings
701 # in this hgrc.
701 # in this hgrc.
702 #
702 #
703 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
703 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
704 # file when sharing repos. But if a requirement is added after the share is
704 # file when sharing repos. But if a requirement is added after the share is
705 # performed, thereby introducing a new requirement for the opener, we may
705 # performed, thereby introducing a new requirement for the opener, we may
706 # will not see that and could encounter a run-time error interacting with
706 # will not see that and could encounter a run-time error interacting with
707 # that shared store since it has an unknown-to-us requirement.
707 # that shared store since it has an unknown-to-us requirement.
708
708
709 # At this point, we know we should be capable of opening the repository.
709 # At this point, we know we should be capable of opening the repository.
710 # Now get on with doing that.
710 # Now get on with doing that.
711
711
712 features = set()
712 features = set()
713
713
714 # The "store" part of the repository holds versioned data. How it is
714 # The "store" part of the repository holds versioned data. How it is
715 # accessed is determined by various requirements. If `shared` or
715 # accessed is determined by various requirements. If `shared` or
716 # `relshared` requirements are present, this indicates current repository
716 # `relshared` requirements are present, this indicates current repository
717 # is a share and store exists in path mentioned in `.hg/sharedpath`
717 # is a share and store exists in path mentioned in `.hg/sharedpath`
718 if shared:
718 if shared:
719 storebasepath = sharedvfs.base
719 storebasepath = sharedvfs.base
720 cachepath = sharedvfs.join(b'cache')
720 cachepath = sharedvfs.join(b'cache')
721 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
721 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
722 else:
722 else:
723 storebasepath = hgvfs.base
723 storebasepath = hgvfs.base
724 cachepath = hgvfs.join(b'cache')
724 cachepath = hgvfs.join(b'cache')
725 wcachepath = hgvfs.join(b'wcache')
725 wcachepath = hgvfs.join(b'wcache')
726
726
727 # The store has changed over time and the exact layout is dictated by
727 # The store has changed over time and the exact layout is dictated by
728 # requirements. The store interface abstracts differences across all
728 # requirements. The store interface abstracts differences across all
729 # of them.
729 # of them.
730 store = makestore(
730 store = makestore(
731 requirements,
731 requirements,
732 storebasepath,
732 storebasepath,
733 lambda base: vfsmod.vfs(base, cacheaudited=True),
733 lambda base: vfsmod.vfs(base, cacheaudited=True),
734 )
734 )
735 hgvfs.createmode = store.createmode
735 hgvfs.createmode = store.createmode
736
736
737 storevfs = store.vfs
737 storevfs = store.vfs
738 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
738 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
739
739
740 # The cache vfs is used to manage cache files.
740 # The cache vfs is used to manage cache files.
741 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
741 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
742 cachevfs.createmode = store.createmode
742 cachevfs.createmode = store.createmode
743 # The cache vfs is used to manage cache files related to the working copy
743 # The cache vfs is used to manage cache files related to the working copy
744 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
744 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
745 wcachevfs.createmode = store.createmode
745 wcachevfs.createmode = store.createmode
746
746
747 # Now resolve the type for the repository object. We do this by repeatedly
747 # Now resolve the type for the repository object. We do this by repeatedly
748 # calling a factory function to produces types for specific aspects of the
748 # calling a factory function to produces types for specific aspects of the
749 # repo's operation. The aggregate returned types are used as base classes
749 # repo's operation. The aggregate returned types are used as base classes
750 # for a dynamically-derived type, which will represent our new repository.
750 # for a dynamically-derived type, which will represent our new repository.
751
751
752 bases = []
752 bases = []
753 extrastate = {}
753 extrastate = {}
754
754
755 for iface, fn in REPO_INTERFACES:
755 for iface, fn in REPO_INTERFACES:
756 # We pass all potentially useful state to give extensions tons of
756 # We pass all potentially useful state to give extensions tons of
757 # flexibility.
757 # flexibility.
758 typ = fn()(
758 typ = fn()(
759 ui=ui,
759 ui=ui,
760 intents=intents,
760 intents=intents,
761 requirements=requirements,
761 requirements=requirements,
762 features=features,
762 features=features,
763 wdirvfs=wdirvfs,
763 wdirvfs=wdirvfs,
764 hgvfs=hgvfs,
764 hgvfs=hgvfs,
765 store=store,
765 store=store,
766 storevfs=storevfs,
766 storevfs=storevfs,
767 storeoptions=storevfs.options,
767 storeoptions=storevfs.options,
768 cachevfs=cachevfs,
768 cachevfs=cachevfs,
769 wcachevfs=wcachevfs,
769 wcachevfs=wcachevfs,
770 extensionmodulenames=extensionmodulenames,
770 extensionmodulenames=extensionmodulenames,
771 extrastate=extrastate,
771 extrastate=extrastate,
772 baseclasses=bases,
772 baseclasses=bases,
773 )
773 )
774
774
775 if not isinstance(typ, type):
775 if not isinstance(typ, type):
776 raise error.ProgrammingError(
776 raise error.ProgrammingError(
777 b'unable to construct type for %s' % iface
777 b'unable to construct type for %s' % iface
778 )
778 )
779
779
780 bases.append(typ)
780 bases.append(typ)
781
781
782 # type() allows you to use characters in type names that wouldn't be
782 # type() allows you to use characters in type names that wouldn't be
783 # recognized as Python symbols in source code. We abuse that to add
783 # recognized as Python symbols in source code. We abuse that to add
784 # rich information about our constructed repo.
784 # rich information about our constructed repo.
785 name = pycompat.sysstr(
785 name = pycompat.sysstr(
786 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
786 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
787 )
787 )
788
788
789 cls = type(name, tuple(bases), {})
789 cls = type(name, tuple(bases), {})
790
790
791 return cls(
791 return cls(
792 baseui=baseui,
792 baseui=baseui,
793 ui=ui,
793 ui=ui,
794 origroot=path,
794 origroot=path,
795 wdirvfs=wdirvfs,
795 wdirvfs=wdirvfs,
796 hgvfs=hgvfs,
796 hgvfs=hgvfs,
797 requirements=requirements,
797 requirements=requirements,
798 supportedrequirements=supportedrequirements,
798 supportedrequirements=supportedrequirements,
799 sharedpath=storebasepath,
799 sharedpath=storebasepath,
800 store=store,
800 store=store,
801 cachevfs=cachevfs,
801 cachevfs=cachevfs,
802 wcachevfs=wcachevfs,
802 wcachevfs=wcachevfs,
803 features=features,
803 features=features,
804 intents=intents,
804 intents=intents,
805 )
805 )
806
806
807
807
808 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
808 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
809 """Load hgrc files/content into a ui instance.
809 """Load hgrc files/content into a ui instance.
810
810
811 This is called during repository opening to load any additional
811 This is called during repository opening to load any additional
812 config files or settings relevant to the current repository.
812 config files or settings relevant to the current repository.
813
813
814 Returns a bool indicating whether any additional configs were loaded.
814 Returns a bool indicating whether any additional configs were loaded.
815
815
816 Extensions should monkeypatch this function to modify how per-repo
816 Extensions should monkeypatch this function to modify how per-repo
817 configs are loaded. For example, an extension may wish to pull in
817 configs are loaded. For example, an extension may wish to pull in
818 configs from alternate files or sources.
818 configs from alternate files or sources.
819
819
820 sharedvfs is vfs object pointing to source repo if the current one is a
820 sharedvfs is vfs object pointing to source repo if the current one is a
821 shared one
821 shared one
822 """
822 """
823 if not rcutil.use_repo_hgrc():
823 if not rcutil.use_repo_hgrc():
824 return False
824 return False
825
825
826 ret = False
826 ret = False
827 # first load config from shared source if we has to
827 # first load config from shared source if we has to
828 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
828 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
829 try:
829 try:
830 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
830 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
831 ret = True
831 ret = True
832 except IOError:
832 except IOError:
833 pass
833 pass
834
834
835 try:
835 try:
836 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
836 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
837 ret = True
837 ret = True
838 except IOError:
838 except IOError:
839 pass
839 pass
840
840
841 try:
841 try:
842 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
842 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
843 ret = True
843 ret = True
844 except IOError:
844 except IOError:
845 pass
845 pass
846
846
847 return ret
847 return ret
848
848
849
849
850 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
850 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
851 """Perform additional actions after .hg/hgrc is loaded.
851 """Perform additional actions after .hg/hgrc is loaded.
852
852
853 This function is called during repository loading immediately after
853 This function is called during repository loading immediately after
854 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
854 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
855
855
856 The function can be used to validate configs, automatically add
856 The function can be used to validate configs, automatically add
857 options (including extensions) based on requirements, etc.
857 options (including extensions) based on requirements, etc.
858 """
858 """
859
859
860 # Map of requirements to list of extensions to load automatically when
860 # Map of requirements to list of extensions to load automatically when
861 # requirement is present.
861 # requirement is present.
862 autoextensions = {
862 autoextensions = {
863 b'git': [b'git'],
863 b'git': [b'git'],
864 b'largefiles': [b'largefiles'],
864 b'largefiles': [b'largefiles'],
865 b'lfs': [b'lfs'],
865 b'lfs': [b'lfs'],
866 }
866 }
867
867
868 for requirement, names in sorted(autoextensions.items()):
868 for requirement, names in sorted(autoextensions.items()):
869 if requirement not in requirements:
869 if requirement not in requirements:
870 continue
870 continue
871
871
872 for name in names:
872 for name in names:
873 if not ui.hasconfig(b'extensions', name):
873 if not ui.hasconfig(b'extensions', name):
874 ui.setconfig(b'extensions', name, b'', source=b'autoload')
874 ui.setconfig(b'extensions', name, b'', source=b'autoload')
875
875
876
876
877 def gathersupportedrequirements(ui):
877 def gathersupportedrequirements(ui):
878 """Determine the complete set of recognized requirements."""
878 """Determine the complete set of recognized requirements."""
879 # Start with all requirements supported by this file.
879 # Start with all requirements supported by this file.
880 supported = set(localrepository._basesupported)
880 supported = set(localrepository._basesupported)
881
881
882 # Execute ``featuresetupfuncs`` entries if they belong to an extension
882 # Execute ``featuresetupfuncs`` entries if they belong to an extension
883 # relevant to this ui instance.
883 # relevant to this ui instance.
884 modules = {m.__name__ for n, m in extensions.extensions(ui)}
884 modules = {m.__name__ for n, m in extensions.extensions(ui)}
885
885
886 for fn in featuresetupfuncs:
886 for fn in featuresetupfuncs:
887 if fn.__module__ in modules:
887 if fn.__module__ in modules:
888 fn(ui, supported)
888 fn(ui, supported)
889
889
890 # Add derived requirements from registered compression engines.
890 # Add derived requirements from registered compression engines.
891 for name in util.compengines:
891 for name in util.compengines:
892 engine = util.compengines[name]
892 engine = util.compengines[name]
893 if engine.available() and engine.revlogheader():
893 if engine.available() and engine.revlogheader():
894 supported.add(b'exp-compression-%s' % name)
894 supported.add(b'exp-compression-%s' % name)
895 if engine.name() == b'zstd':
895 if engine.name() == b'zstd':
896 supported.add(b'revlog-compression-zstd')
896 supported.add(b'revlog-compression-zstd')
897
897
898 return supported
898 return supported
899
899
900
900
901 def ensurerequirementsrecognized(requirements, supported):
901 def ensurerequirementsrecognized(requirements, supported):
902 """Validate that a set of local requirements is recognized.
902 """Validate that a set of local requirements is recognized.
903
903
904 Receives a set of requirements. Raises an ``error.RepoError`` if there
904 Receives a set of requirements. Raises an ``error.RepoError`` if there
905 exists any requirement in that set that currently loaded code doesn't
905 exists any requirement in that set that currently loaded code doesn't
906 recognize.
906 recognize.
907
907
908 Returns a set of supported requirements.
908 Returns a set of supported requirements.
909 """
909 """
910 missing = set()
910 missing = set()
911
911
912 for requirement in requirements:
912 for requirement in requirements:
913 if requirement in supported:
913 if requirement in supported:
914 continue
914 continue
915
915
916 if not requirement or not requirement[0:1].isalnum():
916 if not requirement or not requirement[0:1].isalnum():
917 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
917 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
918
918
919 missing.add(requirement)
919 missing.add(requirement)
920
920
921 if missing:
921 if missing:
922 raise error.RequirementError(
922 raise error.RequirementError(
923 _(b'repository requires features unknown to this Mercurial: %s')
923 _(b'repository requires features unknown to this Mercurial: %s')
924 % b' '.join(sorted(missing)),
924 % b' '.join(sorted(missing)),
925 hint=_(
925 hint=_(
926 b'see https://mercurial-scm.org/wiki/MissingRequirement '
926 b'see https://mercurial-scm.org/wiki/MissingRequirement '
927 b'for more information'
927 b'for more information'
928 ),
928 ),
929 )
929 )
930
930
931
931
932 def ensurerequirementscompatible(ui, requirements):
932 def ensurerequirementscompatible(ui, requirements):
933 """Validates that a set of recognized requirements is mutually compatible.
933 """Validates that a set of recognized requirements is mutually compatible.
934
934
935 Some requirements may not be compatible with others or require
935 Some requirements may not be compatible with others or require
936 config options that aren't enabled. This function is called during
936 config options that aren't enabled. This function is called during
937 repository opening to ensure that the set of requirements needed
937 repository opening to ensure that the set of requirements needed
938 to open a repository is sane and compatible with config options.
938 to open a repository is sane and compatible with config options.
939
939
940 Extensions can monkeypatch this function to perform additional
940 Extensions can monkeypatch this function to perform additional
941 checking.
941 checking.
942
942
943 ``error.RepoError`` should be raised on failure.
943 ``error.RepoError`` should be raised on failure.
944 """
944 """
945 if (
945 if (
946 requirementsmod.SPARSE_REQUIREMENT in requirements
946 requirementsmod.SPARSE_REQUIREMENT in requirements
947 and not sparse.enabled
947 and not sparse.enabled
948 ):
948 ):
949 raise error.RepoError(
949 raise error.RepoError(
950 _(
950 _(
951 b'repository is using sparse feature but '
951 b'repository is using sparse feature but '
952 b'sparse is not enabled; enable the '
952 b'sparse is not enabled; enable the '
953 b'"sparse" extensions to access'
953 b'"sparse" extensions to access'
954 )
954 )
955 )
955 )
956
956
957
957
958 def makestore(requirements, path, vfstype):
958 def makestore(requirements, path, vfstype):
959 """Construct a storage object for a repository."""
959 """Construct a storage object for a repository."""
960 if requirementsmod.STORE_REQUIREMENT in requirements:
960 if requirementsmod.STORE_REQUIREMENT in requirements:
961 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
961 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
962 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
962 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
963 return storemod.fncachestore(path, vfstype, dotencode)
963 return storemod.fncachestore(path, vfstype, dotencode)
964
964
965 return storemod.encodedstore(path, vfstype)
965 return storemod.encodedstore(path, vfstype)
966
966
967 return storemod.basicstore(path, vfstype)
967 return storemod.basicstore(path, vfstype)
968
968
969
969
970 def resolvestorevfsoptions(ui, requirements, features):
970 def resolvestorevfsoptions(ui, requirements, features):
971 """Resolve the options to pass to the store vfs opener.
971 """Resolve the options to pass to the store vfs opener.
972
972
973 The returned dict is used to influence behavior of the storage layer.
973 The returned dict is used to influence behavior of the storage layer.
974 """
974 """
975 options = {}
975 options = {}
976
976
977 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
977 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
978 options[b'treemanifest'] = True
978 options[b'treemanifest'] = True
979
979
980 # experimental config: format.manifestcachesize
980 # experimental config: format.manifestcachesize
981 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
981 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
982 if manifestcachesize is not None:
982 if manifestcachesize is not None:
983 options[b'manifestcachesize'] = manifestcachesize
983 options[b'manifestcachesize'] = manifestcachesize
984
984
985 # In the absence of another requirement superseding a revlog-related
985 # In the absence of another requirement superseding a revlog-related
986 # requirement, we have to assume the repo is using revlog version 0.
986 # requirement, we have to assume the repo is using revlog version 0.
987 # This revlog format is super old and we don't bother trying to parse
987 # This revlog format is super old and we don't bother trying to parse
988 # opener options for it because those options wouldn't do anything
988 # opener options for it because those options wouldn't do anything
989 # meaningful on such old repos.
989 # meaningful on such old repos.
990 if (
990 if (
991 requirementsmod.REVLOGV1_REQUIREMENT in requirements
991 requirementsmod.REVLOGV1_REQUIREMENT in requirements
992 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
992 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
993 ):
993 ):
994 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
994 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
995 else: # explicitly mark repo as using revlogv0
995 else: # explicitly mark repo as using revlogv0
996 options[b'revlogv0'] = True
996 options[b'revlogv0'] = True
997
997
998 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
998 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
999 options[b'copies-storage'] = b'changeset-sidedata'
999 options[b'copies-storage'] = b'changeset-sidedata'
1000 else:
1000 else:
1001 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1001 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1002 copiesextramode = (b'changeset-only', b'compatibility')
1002 copiesextramode = (b'changeset-only', b'compatibility')
1003 if writecopiesto in copiesextramode:
1003 if writecopiesto in copiesextramode:
1004 options[b'copies-storage'] = b'extra'
1004 options[b'copies-storage'] = b'extra'
1005
1005
1006 return options
1006 return options
1007
1007
1008
1008
1009 def resolverevlogstorevfsoptions(ui, requirements, features):
1009 def resolverevlogstorevfsoptions(ui, requirements, features):
1010 """Resolve opener options specific to revlogs."""
1010 """Resolve opener options specific to revlogs."""
1011
1011
1012 options = {}
1012 options = {}
1013 options[b'flagprocessors'] = {}
1013 options[b'flagprocessors'] = {}
1014
1014
1015 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1015 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1016 options[b'revlogv1'] = True
1016 options[b'revlogv1'] = True
1017 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1017 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1018 options[b'revlogv2'] = True
1018 options[b'revlogv2'] = True
1019
1019
1020 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1020 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1021 options[b'generaldelta'] = True
1021 options[b'generaldelta'] = True
1022
1022
1023 # experimental config: format.chunkcachesize
1023 # experimental config: format.chunkcachesize
1024 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1024 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1025 if chunkcachesize is not None:
1025 if chunkcachesize is not None:
1026 options[b'chunkcachesize'] = chunkcachesize
1026 options[b'chunkcachesize'] = chunkcachesize
1027
1027
1028 deltabothparents = ui.configbool(
1028 deltabothparents = ui.configbool(
1029 b'storage', b'revlog.optimize-delta-parent-choice'
1029 b'storage', b'revlog.optimize-delta-parent-choice'
1030 )
1030 )
1031 options[b'deltabothparents'] = deltabothparents
1031 options[b'deltabothparents'] = deltabothparents
1032
1032
1033 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1033 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1034 lazydeltabase = False
1034 lazydeltabase = False
1035 if lazydelta:
1035 if lazydelta:
1036 lazydeltabase = ui.configbool(
1036 lazydeltabase = ui.configbool(
1037 b'storage', b'revlog.reuse-external-delta-parent'
1037 b'storage', b'revlog.reuse-external-delta-parent'
1038 )
1038 )
1039 if lazydeltabase is None:
1039 if lazydeltabase is None:
1040 lazydeltabase = not scmutil.gddeltaconfig(ui)
1040 lazydeltabase = not scmutil.gddeltaconfig(ui)
1041 options[b'lazydelta'] = lazydelta
1041 options[b'lazydelta'] = lazydelta
1042 options[b'lazydeltabase'] = lazydeltabase
1042 options[b'lazydeltabase'] = lazydeltabase
1043
1043
1044 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1044 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1045 if 0 <= chainspan:
1045 if 0 <= chainspan:
1046 options[b'maxdeltachainspan'] = chainspan
1046 options[b'maxdeltachainspan'] = chainspan
1047
1047
1048 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1048 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1049 if mmapindexthreshold is not None:
1049 if mmapindexthreshold is not None:
1050 options[b'mmapindexthreshold'] = mmapindexthreshold
1050 options[b'mmapindexthreshold'] = mmapindexthreshold
1051
1051
1052 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1052 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1053 srdensitythres = float(
1053 srdensitythres = float(
1054 ui.config(b'experimental', b'sparse-read.density-threshold')
1054 ui.config(b'experimental', b'sparse-read.density-threshold')
1055 )
1055 )
1056 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1056 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1057 options[b'with-sparse-read'] = withsparseread
1057 options[b'with-sparse-read'] = withsparseread
1058 options[b'sparse-read-density-threshold'] = srdensitythres
1058 options[b'sparse-read-density-threshold'] = srdensitythres
1059 options[b'sparse-read-min-gap-size'] = srmingapsize
1059 options[b'sparse-read-min-gap-size'] = srmingapsize
1060
1060
1061 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1061 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1062 options[b'sparse-revlog'] = sparserevlog
1062 options[b'sparse-revlog'] = sparserevlog
1063 if sparserevlog:
1063 if sparserevlog:
1064 options[b'generaldelta'] = True
1064 options[b'generaldelta'] = True
1065
1065
1066 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
1066 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
1067 options[b'side-data'] = sidedata
1067 options[b'side-data'] = sidedata
1068
1068
1069 maxchainlen = None
1069 maxchainlen = None
1070 if sparserevlog:
1070 if sparserevlog:
1071 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1071 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1072 # experimental config: format.maxchainlen
1072 # experimental config: format.maxchainlen
1073 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1073 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1074 if maxchainlen is not None:
1074 if maxchainlen is not None:
1075 options[b'maxchainlen'] = maxchainlen
1075 options[b'maxchainlen'] = maxchainlen
1076
1076
1077 for r in requirements:
1077 for r in requirements:
1078 # we allow multiple compression engine requirement to co-exist because
1078 # we allow multiple compression engine requirement to co-exist because
1079 # strickly speaking, revlog seems to support mixed compression style.
1079 # strickly speaking, revlog seems to support mixed compression style.
1080 #
1080 #
1081 # The compression used for new entries will be "the last one"
1081 # The compression used for new entries will be "the last one"
1082 prefix = r.startswith
1082 prefix = r.startswith
1083 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1083 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1084 options[b'compengine'] = r.split(b'-', 2)[2]
1084 options[b'compengine'] = r.split(b'-', 2)[2]
1085
1085
1086 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1086 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1087 if options[b'zlib.level'] is not None:
1087 if options[b'zlib.level'] is not None:
1088 if not (0 <= options[b'zlib.level'] <= 9):
1088 if not (0 <= options[b'zlib.level'] <= 9):
1089 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1089 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1090 raise error.Abort(msg % options[b'zlib.level'])
1090 raise error.Abort(msg % options[b'zlib.level'])
1091 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1091 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1092 if options[b'zstd.level'] is not None:
1092 if options[b'zstd.level'] is not None:
1093 if not (0 <= options[b'zstd.level'] <= 22):
1093 if not (0 <= options[b'zstd.level'] <= 22):
1094 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1094 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1095 raise error.Abort(msg % options[b'zstd.level'])
1095 raise error.Abort(msg % options[b'zstd.level'])
1096
1096
1097 if requirementsmod.NARROW_REQUIREMENT in requirements:
1097 if requirementsmod.NARROW_REQUIREMENT in requirements:
1098 options[b'enableellipsis'] = True
1098 options[b'enableellipsis'] = True
1099
1099
1100 if ui.configbool(b'experimental', b'rust.index'):
1100 if ui.configbool(b'experimental', b'rust.index'):
1101 options[b'rust.index'] = True
1101 options[b'rust.index'] = True
1102 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1102 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1103 slow_path = ui.config(
1103 slow_path = ui.config(
1104 b'storage', b'revlog.persistent-nodemap.slow-path'
1104 b'storage', b'revlog.persistent-nodemap.slow-path'
1105 )
1105 )
1106 if slow_path not in (b'allow', b'warn', b'abort'):
1106 if slow_path not in (b'allow', b'warn', b'abort'):
1107 default = ui.config_default(
1107 default = ui.config_default(
1108 b'storage', b'revlog.persistent-nodemap.slow-path'
1108 b'storage', b'revlog.persistent-nodemap.slow-path'
1109 )
1109 )
1110 msg = _(
1110 msg = _(
1111 b'unknown value for config '
1111 b'unknown value for config '
1112 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1112 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1113 )
1113 )
1114 ui.warn(msg % slow_path)
1114 ui.warn(msg % slow_path)
1115 if not ui.quiet:
1115 if not ui.quiet:
1116 ui.warn(_(b'falling back to default value: %s\n') % default)
1116 ui.warn(_(b'falling back to default value: %s\n') % default)
1117 slow_path = default
1117 slow_path = default
1118
1118
1119 msg = _(
1119 msg = _(
1120 b"accessing `persistent-nodemap` repository without associated "
1120 b"accessing `persistent-nodemap` repository without associated "
1121 b"fast implementation."
1121 b"fast implementation."
1122 )
1122 )
1123 hint = _(
1123 hint = _(
1124 b"check `hg help config.format.use-persistent-nodemap` "
1124 b"check `hg help config.format.use-persistent-nodemap` "
1125 b"for details"
1125 b"for details"
1126 )
1126 )
1127 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1127 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1128 if slow_path == b'warn':
1128 if slow_path == b'warn':
1129 msg = b"warning: " + msg + b'\n'
1129 msg = b"warning: " + msg + b'\n'
1130 ui.warn(msg)
1130 ui.warn(msg)
1131 if not ui.quiet:
1131 if not ui.quiet:
1132 hint = b'(' + hint + b')\n'
1132 hint = b'(' + hint + b')\n'
1133 ui.warn(hint)
1133 ui.warn(hint)
1134 if slow_path == b'abort':
1134 if slow_path == b'abort':
1135 raise error.Abort(msg, hint=hint)
1135 raise error.Abort(msg, hint=hint)
1136 options[b'persistent-nodemap'] = True
1136 options[b'persistent-nodemap'] = True
1137 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1137 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1138 options[b'persistent-nodemap.mmap'] = True
1138 options[b'persistent-nodemap.mmap'] = True
1139 if ui.configbool(b'devel', b'persistent-nodemap'):
1139 if ui.configbool(b'devel', b'persistent-nodemap'):
1140 options[b'devel-force-nodemap'] = True
1140 options[b'devel-force-nodemap'] = True
1141
1141
1142 return options
1142 return options
1143
1143
1144
1144
1145 def makemain(**kwargs):
1145 def makemain(**kwargs):
1146 """Produce a type conforming to ``ilocalrepositorymain``."""
1146 """Produce a type conforming to ``ilocalrepositorymain``."""
1147 return localrepository
1147 return localrepository
1148
1148
1149
1149
1150 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1150 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1151 class revlogfilestorage(object):
1151 class revlogfilestorage(object):
1152 """File storage when using revlogs."""
1152 """File storage when using revlogs."""
1153
1153
1154 def file(self, path):
1154 def file(self, path):
1155 if path.startswith(b'/'):
1155 if path.startswith(b'/'):
1156 path = path[1:]
1156 path = path[1:]
1157
1157
1158 return filelog.filelog(self.svfs, path)
1158 return filelog.filelog(self.svfs, path)
1159
1159
1160
1160
1161 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1161 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1162 class revlognarrowfilestorage(object):
1162 class revlognarrowfilestorage(object):
1163 """File storage when using revlogs and narrow files."""
1163 """File storage when using revlogs and narrow files."""
1164
1164
1165 def file(self, path):
1165 def file(self, path):
1166 if path.startswith(b'/'):
1166 if path.startswith(b'/'):
1167 path = path[1:]
1167 path = path[1:]
1168
1168
1169 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1169 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1170
1170
1171
1171
1172 def makefilestorage(requirements, features, **kwargs):
1172 def makefilestorage(requirements, features, **kwargs):
1173 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1173 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1174 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1174 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1175 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1175 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1176
1176
1177 if requirementsmod.NARROW_REQUIREMENT in requirements:
1177 if requirementsmod.NARROW_REQUIREMENT in requirements:
1178 return revlognarrowfilestorage
1178 return revlognarrowfilestorage
1179 else:
1179 else:
1180 return revlogfilestorage
1180 return revlogfilestorage
1181
1181
1182
1182
1183 # List of repository interfaces and factory functions for them. Each
1183 # List of repository interfaces and factory functions for them. Each
1184 # will be called in order during ``makelocalrepository()`` to iteratively
1184 # will be called in order during ``makelocalrepository()`` to iteratively
1185 # derive the final type for a local repository instance. We capture the
1185 # derive the final type for a local repository instance. We capture the
1186 # function as a lambda so we don't hold a reference and the module-level
1186 # function as a lambda so we don't hold a reference and the module-level
1187 # functions can be wrapped.
1187 # functions can be wrapped.
1188 REPO_INTERFACES = [
1188 REPO_INTERFACES = [
1189 (repository.ilocalrepositorymain, lambda: makemain),
1189 (repository.ilocalrepositorymain, lambda: makemain),
1190 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1190 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1191 ]
1191 ]
1192
1192
1193
1193
1194 @interfaceutil.implementer(repository.ilocalrepositorymain)
1194 @interfaceutil.implementer(repository.ilocalrepositorymain)
1195 class localrepository(object):
1195 class localrepository(object):
1196 """Main class for representing local repositories.
1196 """Main class for representing local repositories.
1197
1197
1198 All local repositories are instances of this class.
1198 All local repositories are instances of this class.
1199
1199
1200 Constructed on its own, instances of this class are not usable as
1200 Constructed on its own, instances of this class are not usable as
1201 repository objects. To obtain a usable repository object, call
1201 repository objects. To obtain a usable repository object, call
1202 ``hg.repository()``, ``localrepo.instance()``, or
1202 ``hg.repository()``, ``localrepo.instance()``, or
1203 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1203 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1204 ``instance()`` adds support for creating new repositories.
1204 ``instance()`` adds support for creating new repositories.
1205 ``hg.repository()`` adds more extension integration, including calling
1205 ``hg.repository()`` adds more extension integration, including calling
1206 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1206 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1207 used.
1207 used.
1208 """
1208 """
1209
1209
1210 # obsolete experimental requirements:
1210 # obsolete experimental requirements:
1211 # - manifestv2: An experimental new manifest format that allowed
1211 # - manifestv2: An experimental new manifest format that allowed
1212 # for stem compression of long paths. Experiment ended up not
1212 # for stem compression of long paths. Experiment ended up not
1213 # being successful (repository sizes went up due to worse delta
1213 # being successful (repository sizes went up due to worse delta
1214 # chains), and the code was deleted in 4.6.
1214 # chains), and the code was deleted in 4.6.
1215 supportedformats = {
1215 supportedformats = {
1216 requirementsmod.REVLOGV1_REQUIREMENT,
1216 requirementsmod.REVLOGV1_REQUIREMENT,
1217 requirementsmod.GENERALDELTA_REQUIREMENT,
1217 requirementsmod.GENERALDELTA_REQUIREMENT,
1218 requirementsmod.TREEMANIFEST_REQUIREMENT,
1218 requirementsmod.TREEMANIFEST_REQUIREMENT,
1219 requirementsmod.COPIESSDC_REQUIREMENT,
1219 requirementsmod.COPIESSDC_REQUIREMENT,
1220 requirementsmod.REVLOGV2_REQUIREMENT,
1220 requirementsmod.REVLOGV2_REQUIREMENT,
1221 requirementsmod.SIDEDATA_REQUIREMENT,
1221 requirementsmod.SIDEDATA_REQUIREMENT,
1222 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1222 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1223 requirementsmod.NODEMAP_REQUIREMENT,
1223 requirementsmod.NODEMAP_REQUIREMENT,
1224 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1224 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1225 requirementsmod.SHARESAFE_REQUIREMENT,
1225 requirementsmod.SHARESAFE_REQUIREMENT,
1226 }
1226 }
1227 _basesupported = supportedformats | {
1227 _basesupported = supportedformats | {
1228 requirementsmod.STORE_REQUIREMENT,
1228 requirementsmod.STORE_REQUIREMENT,
1229 requirementsmod.FNCACHE_REQUIREMENT,
1229 requirementsmod.FNCACHE_REQUIREMENT,
1230 requirementsmod.SHARED_REQUIREMENT,
1230 requirementsmod.SHARED_REQUIREMENT,
1231 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1231 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1232 requirementsmod.DOTENCODE_REQUIREMENT,
1232 requirementsmod.DOTENCODE_REQUIREMENT,
1233 requirementsmod.SPARSE_REQUIREMENT,
1233 requirementsmod.SPARSE_REQUIREMENT,
1234 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1234 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1235 }
1235 }
1236
1236
1237 # list of prefix for file which can be written without 'wlock'
1237 # list of prefix for file which can be written without 'wlock'
1238 # Extensions should extend this list when needed
1238 # Extensions should extend this list when needed
1239 _wlockfreeprefix = {
1239 _wlockfreeprefix = {
1240 # We migh consider requiring 'wlock' for the next
1240 # We migh consider requiring 'wlock' for the next
1241 # two, but pretty much all the existing code assume
1241 # two, but pretty much all the existing code assume
1242 # wlock is not needed so we keep them excluded for
1242 # wlock is not needed so we keep them excluded for
1243 # now.
1243 # now.
1244 b'hgrc',
1244 b'hgrc',
1245 b'requires',
1245 b'requires',
1246 # XXX cache is a complicatged business someone
1246 # XXX cache is a complicatged business someone
1247 # should investigate this in depth at some point
1247 # should investigate this in depth at some point
1248 b'cache/',
1248 b'cache/',
1249 # XXX shouldn't be dirstate covered by the wlock?
1249 # XXX shouldn't be dirstate covered by the wlock?
1250 b'dirstate',
1250 b'dirstate',
1251 # XXX bisect was still a bit too messy at the time
1251 # XXX bisect was still a bit too messy at the time
1252 # this changeset was introduced. Someone should fix
1252 # this changeset was introduced. Someone should fix
1253 # the remainig bit and drop this line
1253 # the remainig bit and drop this line
1254 b'bisect.state',
1254 b'bisect.state',
1255 }
1255 }
1256
1256
1257 def __init__(
1257 def __init__(
1258 self,
1258 self,
1259 baseui,
1259 baseui,
1260 ui,
1260 ui,
1261 origroot,
1261 origroot,
1262 wdirvfs,
1262 wdirvfs,
1263 hgvfs,
1263 hgvfs,
1264 requirements,
1264 requirements,
1265 supportedrequirements,
1265 supportedrequirements,
1266 sharedpath,
1266 sharedpath,
1267 store,
1267 store,
1268 cachevfs,
1268 cachevfs,
1269 wcachevfs,
1269 wcachevfs,
1270 features,
1270 features,
1271 intents=None,
1271 intents=None,
1272 ):
1272 ):
1273 """Create a new local repository instance.
1273 """Create a new local repository instance.
1274
1274
1275 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1275 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1276 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1276 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1277 object.
1277 object.
1278
1278
1279 Arguments:
1279 Arguments:
1280
1280
1281 baseui
1281 baseui
1282 ``ui.ui`` instance that ``ui`` argument was based off of.
1282 ``ui.ui`` instance that ``ui`` argument was based off of.
1283
1283
1284 ui
1284 ui
1285 ``ui.ui`` instance for use by the repository.
1285 ``ui.ui`` instance for use by the repository.
1286
1286
1287 origroot
1287 origroot
1288 ``bytes`` path to working directory root of this repository.
1288 ``bytes`` path to working directory root of this repository.
1289
1289
1290 wdirvfs
1290 wdirvfs
1291 ``vfs.vfs`` rooted at the working directory.
1291 ``vfs.vfs`` rooted at the working directory.
1292
1292
1293 hgvfs
1293 hgvfs
1294 ``vfs.vfs`` rooted at .hg/
1294 ``vfs.vfs`` rooted at .hg/
1295
1295
1296 requirements
1296 requirements
1297 ``set`` of bytestrings representing repository opening requirements.
1297 ``set`` of bytestrings representing repository opening requirements.
1298
1298
1299 supportedrequirements
1299 supportedrequirements
1300 ``set`` of bytestrings representing repository requirements that we
1300 ``set`` of bytestrings representing repository requirements that we
1301 know how to open. May be a supetset of ``requirements``.
1301 know how to open. May be a supetset of ``requirements``.
1302
1302
1303 sharedpath
1303 sharedpath
1304 ``bytes`` Defining path to storage base directory. Points to a
1304 ``bytes`` Defining path to storage base directory. Points to a
1305 ``.hg/`` directory somewhere.
1305 ``.hg/`` directory somewhere.
1306
1306
1307 store
1307 store
1308 ``store.basicstore`` (or derived) instance providing access to
1308 ``store.basicstore`` (or derived) instance providing access to
1309 versioned storage.
1309 versioned storage.
1310
1310
1311 cachevfs
1311 cachevfs
1312 ``vfs.vfs`` used for cache files.
1312 ``vfs.vfs`` used for cache files.
1313
1313
1314 wcachevfs
1314 wcachevfs
1315 ``vfs.vfs`` used for cache files related to the working copy.
1315 ``vfs.vfs`` used for cache files related to the working copy.
1316
1316
1317 features
1317 features
1318 ``set`` of bytestrings defining features/capabilities of this
1318 ``set`` of bytestrings defining features/capabilities of this
1319 instance.
1319 instance.
1320
1320
1321 intents
1321 intents
1322 ``set`` of system strings indicating what this repo will be used
1322 ``set`` of system strings indicating what this repo will be used
1323 for.
1323 for.
1324 """
1324 """
1325 self.baseui = baseui
1325 self.baseui = baseui
1326 self.ui = ui
1326 self.ui = ui
1327 self.origroot = origroot
1327 self.origroot = origroot
1328 # vfs rooted at working directory.
1328 # vfs rooted at working directory.
1329 self.wvfs = wdirvfs
1329 self.wvfs = wdirvfs
1330 self.root = wdirvfs.base
1330 self.root = wdirvfs.base
1331 # vfs rooted at .hg/. Used to access most non-store paths.
1331 # vfs rooted at .hg/. Used to access most non-store paths.
1332 self.vfs = hgvfs
1332 self.vfs = hgvfs
1333 self.path = hgvfs.base
1333 self.path = hgvfs.base
1334 self.requirements = requirements
1334 self.requirements = requirements
1335 self.nodeconstants = sha1nodeconstants
1335 self.nodeconstants = sha1nodeconstants
1336 self.nullid = self.nodeconstants.nullid
1336 self.nullid = self.nodeconstants.nullid
1337 self.supported = supportedrequirements
1337 self.supported = supportedrequirements
1338 self.sharedpath = sharedpath
1338 self.sharedpath = sharedpath
1339 self.store = store
1339 self.store = store
1340 self.cachevfs = cachevfs
1340 self.cachevfs = cachevfs
1341 self.wcachevfs = wcachevfs
1341 self.wcachevfs = wcachevfs
1342 self.features = features
1342 self.features = features
1343
1343
1344 self.filtername = None
1344 self.filtername = None
1345
1345
1346 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1346 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1347 b'devel', b'check-locks'
1347 b'devel', b'check-locks'
1348 ):
1348 ):
1349 self.vfs.audit = self._getvfsward(self.vfs.audit)
1349 self.vfs.audit = self._getvfsward(self.vfs.audit)
1350 # A list of callback to shape the phase if no data were found.
1350 # A list of callback to shape the phase if no data were found.
1351 # Callback are in the form: func(repo, roots) --> processed root.
1351 # Callback are in the form: func(repo, roots) --> processed root.
1352 # This list it to be filled by extension during repo setup
1352 # This list it to be filled by extension during repo setup
1353 self._phasedefaults = []
1353 self._phasedefaults = []
1354
1354
1355 color.setup(self.ui)
1355 color.setup(self.ui)
1356
1356
1357 self.spath = self.store.path
1357 self.spath = self.store.path
1358 self.svfs = self.store.vfs
1358 self.svfs = self.store.vfs
1359 self.sjoin = self.store.join
1359 self.sjoin = self.store.join
1360 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1360 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1361 b'devel', b'check-locks'
1361 b'devel', b'check-locks'
1362 ):
1362 ):
1363 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1363 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1364 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1364 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1365 else: # standard vfs
1365 else: # standard vfs
1366 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1366 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1367
1367
1368 self._dirstatevalidatewarned = False
1368 self._dirstatevalidatewarned = False
1369
1369
1370 self._branchcaches = branchmap.BranchMapCache()
1370 self._branchcaches = branchmap.BranchMapCache()
1371 self._revbranchcache = None
1371 self._revbranchcache = None
1372 self._filterpats = {}
1372 self._filterpats = {}
1373 self._datafilters = {}
1373 self._datafilters = {}
1374 self._transref = self._lockref = self._wlockref = None
1374 self._transref = self._lockref = self._wlockref = None
1375
1375
1376 # A cache for various files under .hg/ that tracks file changes,
1376 # A cache for various files under .hg/ that tracks file changes,
1377 # (used by the filecache decorator)
1377 # (used by the filecache decorator)
1378 #
1378 #
1379 # Maps a property name to its util.filecacheentry
1379 # Maps a property name to its util.filecacheentry
1380 self._filecache = {}
1380 self._filecache = {}
1381
1381
1382 # hold sets of revision to be filtered
1382 # hold sets of revision to be filtered
1383 # should be cleared when something might have changed the filter value:
1383 # should be cleared when something might have changed the filter value:
1384 # - new changesets,
1384 # - new changesets,
1385 # - phase change,
1385 # - phase change,
1386 # - new obsolescence marker,
1386 # - new obsolescence marker,
1387 # - working directory parent change,
1387 # - working directory parent change,
1388 # - bookmark changes
1388 # - bookmark changes
1389 self.filteredrevcache = {}
1389 self.filteredrevcache = {}
1390
1390
1391 # post-dirstate-status hooks
1391 # post-dirstate-status hooks
1392 self._postdsstatus = []
1392 self._postdsstatus = []
1393
1393
1394 # generic mapping between names and nodes
1394 # generic mapping between names and nodes
1395 self.names = namespaces.namespaces()
1395 self.names = namespaces.namespaces()
1396
1396
1397 # Key to signature value.
1397 # Key to signature value.
1398 self._sparsesignaturecache = {}
1398 self._sparsesignaturecache = {}
1399 # Signature to cached matcher instance.
1399 # Signature to cached matcher instance.
1400 self._sparsematchercache = {}
1400 self._sparsematchercache = {}
1401
1401
1402 self._extrafilterid = repoview.extrafilter(ui)
1402 self._extrafilterid = repoview.extrafilter(ui)
1403
1403
1404 self.filecopiesmode = None
1404 self.filecopiesmode = None
1405 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1405 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1406 self.filecopiesmode = b'changeset-sidedata'
1406 self.filecopiesmode = b'changeset-sidedata'
1407
1407
1408 self._wanted_sidedata = set()
1408 self._wanted_sidedata = set()
1409 self._sidedata_computers = {}
1409 self._sidedata_computers = {}
1410 metadatamod.set_sidedata_spec_for_repo(self)
1410 metadatamod.set_sidedata_spec_for_repo(self)
1411
1411
1412 def _getvfsward(self, origfunc):
1412 def _getvfsward(self, origfunc):
1413 """build a ward for self.vfs"""
1413 """build a ward for self.vfs"""
1414 rref = weakref.ref(self)
1414 rref = weakref.ref(self)
1415
1415
1416 def checkvfs(path, mode=None):
1416 def checkvfs(path, mode=None):
1417 ret = origfunc(path, mode=mode)
1417 ret = origfunc(path, mode=mode)
1418 repo = rref()
1418 repo = rref()
1419 if (
1419 if (
1420 repo is None
1420 repo is None
1421 or not util.safehasattr(repo, b'_wlockref')
1421 or not util.safehasattr(repo, b'_wlockref')
1422 or not util.safehasattr(repo, b'_lockref')
1422 or not util.safehasattr(repo, b'_lockref')
1423 ):
1423 ):
1424 return
1424 return
1425 if mode in (None, b'r', b'rb'):
1425 if mode in (None, b'r', b'rb'):
1426 return
1426 return
1427 if path.startswith(repo.path):
1427 if path.startswith(repo.path):
1428 # truncate name relative to the repository (.hg)
1428 # truncate name relative to the repository (.hg)
1429 path = path[len(repo.path) + 1 :]
1429 path = path[len(repo.path) + 1 :]
1430 if path.startswith(b'cache/'):
1430 if path.startswith(b'cache/'):
1431 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1431 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1432 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1432 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1433 # path prefixes covered by 'lock'
1433 # path prefixes covered by 'lock'
1434 vfs_path_prefixes = (
1434 vfs_path_prefixes = (
1435 b'journal.',
1435 b'journal.',
1436 b'undo.',
1436 b'undo.',
1437 b'strip-backup/',
1437 b'strip-backup/',
1438 b'cache/',
1438 b'cache/',
1439 )
1439 )
1440 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1440 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1441 if repo._currentlock(repo._lockref) is None:
1441 if repo._currentlock(repo._lockref) is None:
1442 repo.ui.develwarn(
1442 repo.ui.develwarn(
1443 b'write with no lock: "%s"' % path,
1443 b'write with no lock: "%s"' % path,
1444 stacklevel=3,
1444 stacklevel=3,
1445 config=b'check-locks',
1445 config=b'check-locks',
1446 )
1446 )
1447 elif repo._currentlock(repo._wlockref) is None:
1447 elif repo._currentlock(repo._wlockref) is None:
1448 # rest of vfs files are covered by 'wlock'
1448 # rest of vfs files are covered by 'wlock'
1449 #
1449 #
1450 # exclude special files
1450 # exclude special files
1451 for prefix in self._wlockfreeprefix:
1451 for prefix in self._wlockfreeprefix:
1452 if path.startswith(prefix):
1452 if path.startswith(prefix):
1453 return
1453 return
1454 repo.ui.develwarn(
1454 repo.ui.develwarn(
1455 b'write with no wlock: "%s"' % path,
1455 b'write with no wlock: "%s"' % path,
1456 stacklevel=3,
1456 stacklevel=3,
1457 config=b'check-locks',
1457 config=b'check-locks',
1458 )
1458 )
1459 return ret
1459 return ret
1460
1460
1461 return checkvfs
1461 return checkvfs
1462
1462
1463 def _getsvfsward(self, origfunc):
1463 def _getsvfsward(self, origfunc):
1464 """build a ward for self.svfs"""
1464 """build a ward for self.svfs"""
1465 rref = weakref.ref(self)
1465 rref = weakref.ref(self)
1466
1466
1467 def checksvfs(path, mode=None):
1467 def checksvfs(path, mode=None):
1468 ret = origfunc(path, mode=mode)
1468 ret = origfunc(path, mode=mode)
1469 repo = rref()
1469 repo = rref()
1470 if repo is None or not util.safehasattr(repo, b'_lockref'):
1470 if repo is None or not util.safehasattr(repo, b'_lockref'):
1471 return
1471 return
1472 if mode in (None, b'r', b'rb'):
1472 if mode in (None, b'r', b'rb'):
1473 return
1473 return
1474 if path.startswith(repo.sharedpath):
1474 if path.startswith(repo.sharedpath):
1475 # truncate name relative to the repository (.hg)
1475 # truncate name relative to the repository (.hg)
1476 path = path[len(repo.sharedpath) + 1 :]
1476 path = path[len(repo.sharedpath) + 1 :]
1477 if repo._currentlock(repo._lockref) is None:
1477 if repo._currentlock(repo._lockref) is None:
1478 repo.ui.develwarn(
1478 repo.ui.develwarn(
1479 b'write with no lock: "%s"' % path, stacklevel=4
1479 b'write with no lock: "%s"' % path, stacklevel=4
1480 )
1480 )
1481 return ret
1481 return ret
1482
1482
1483 return checksvfs
1483 return checksvfs
1484
1484
1485 def close(self):
1485 def close(self):
1486 self._writecaches()
1486 self._writecaches()
1487
1487
1488 def _writecaches(self):
1488 def _writecaches(self):
1489 if self._revbranchcache:
1489 if self._revbranchcache:
1490 self._revbranchcache.write()
1490 self._revbranchcache.write()
1491
1491
1492 def _restrictcapabilities(self, caps):
1492 def _restrictcapabilities(self, caps):
1493 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1493 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1494 caps = set(caps)
1494 caps = set(caps)
1495 capsblob = bundle2.encodecaps(
1495 capsblob = bundle2.encodecaps(
1496 bundle2.getrepocaps(self, role=b'client')
1496 bundle2.getrepocaps(self, role=b'client')
1497 )
1497 )
1498 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1498 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1499 if self.ui.configbool(b'experimental', b'narrow'):
1499 if self.ui.configbool(b'experimental', b'narrow'):
1500 caps.add(wireprototypes.NARROWCAP)
1500 caps.add(wireprototypes.NARROWCAP)
1501 return caps
1501 return caps
1502
1502
1503 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1503 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1504 # self -> auditor -> self._checknested -> self
1504 # self -> auditor -> self._checknested -> self
1505
1505
1506 @property
1506 @property
1507 def auditor(self):
1507 def auditor(self):
1508 # This is only used by context.workingctx.match in order to
1508 # This is only used by context.workingctx.match in order to
1509 # detect files in subrepos.
1509 # detect files in subrepos.
1510 return pathutil.pathauditor(self.root, callback=self._checknested)
1510 return pathutil.pathauditor(self.root, callback=self._checknested)
1511
1511
1512 @property
1512 @property
1513 def nofsauditor(self):
1513 def nofsauditor(self):
1514 # This is only used by context.basectx.match in order to detect
1514 # This is only used by context.basectx.match in order to detect
1515 # files in subrepos.
1515 # files in subrepos.
1516 return pathutil.pathauditor(
1516 return pathutil.pathauditor(
1517 self.root, callback=self._checknested, realfs=False, cached=True
1517 self.root, callback=self._checknested, realfs=False, cached=True
1518 )
1518 )
1519
1519
1520 def _checknested(self, path):
1520 def _checknested(self, path):
1521 """Determine if path is a legal nested repository."""
1521 """Determine if path is a legal nested repository."""
1522 if not path.startswith(self.root):
1522 if not path.startswith(self.root):
1523 return False
1523 return False
1524 subpath = path[len(self.root) + 1 :]
1524 subpath = path[len(self.root) + 1 :]
1525 normsubpath = util.pconvert(subpath)
1525 normsubpath = util.pconvert(subpath)
1526
1526
1527 # XXX: Checking against the current working copy is wrong in
1527 # XXX: Checking against the current working copy is wrong in
1528 # the sense that it can reject things like
1528 # the sense that it can reject things like
1529 #
1529 #
1530 # $ hg cat -r 10 sub/x.txt
1530 # $ hg cat -r 10 sub/x.txt
1531 #
1531 #
1532 # if sub/ is no longer a subrepository in the working copy
1532 # if sub/ is no longer a subrepository in the working copy
1533 # parent revision.
1533 # parent revision.
1534 #
1534 #
1535 # However, it can of course also allow things that would have
1535 # However, it can of course also allow things that would have
1536 # been rejected before, such as the above cat command if sub/
1536 # been rejected before, such as the above cat command if sub/
1537 # is a subrepository now, but was a normal directory before.
1537 # is a subrepository now, but was a normal directory before.
1538 # The old path auditor would have rejected by mistake since it
1538 # The old path auditor would have rejected by mistake since it
1539 # panics when it sees sub/.hg/.
1539 # panics when it sees sub/.hg/.
1540 #
1540 #
1541 # All in all, checking against the working copy seems sensible
1541 # All in all, checking against the working copy seems sensible
1542 # since we want to prevent access to nested repositories on
1542 # since we want to prevent access to nested repositories on
1543 # the filesystem *now*.
1543 # the filesystem *now*.
1544 ctx = self[None]
1544 ctx = self[None]
1545 parts = util.splitpath(subpath)
1545 parts = util.splitpath(subpath)
1546 while parts:
1546 while parts:
1547 prefix = b'/'.join(parts)
1547 prefix = b'/'.join(parts)
1548 if prefix in ctx.substate:
1548 if prefix in ctx.substate:
1549 if prefix == normsubpath:
1549 if prefix == normsubpath:
1550 return True
1550 return True
1551 else:
1551 else:
1552 sub = ctx.sub(prefix)
1552 sub = ctx.sub(prefix)
1553 return sub.checknested(subpath[len(prefix) + 1 :])
1553 return sub.checknested(subpath[len(prefix) + 1 :])
1554 else:
1554 else:
1555 parts.pop()
1555 parts.pop()
1556 return False
1556 return False
1557
1557
1558 def peer(self):
1558 def peer(self):
1559 return localpeer(self) # not cached to avoid reference cycle
1559 return localpeer(self) # not cached to avoid reference cycle
1560
1560
1561 def unfiltered(self):
1561 def unfiltered(self):
1562 """Return unfiltered version of the repository
1562 """Return unfiltered version of the repository
1563
1563
1564 Intended to be overwritten by filtered repo."""
1564 Intended to be overwritten by filtered repo."""
1565 return self
1565 return self
1566
1566
1567 def filtered(self, name, visibilityexceptions=None):
1567 def filtered(self, name, visibilityexceptions=None):
1568 """Return a filtered version of a repository
1568 """Return a filtered version of a repository
1569
1569
1570 The `name` parameter is the identifier of the requested view. This
1570 The `name` parameter is the identifier of the requested view. This
1571 will return a repoview object set "exactly" to the specified view.
1571 will return a repoview object set "exactly" to the specified view.
1572
1572
1573 This function does not apply recursive filtering to a repository. For
1573 This function does not apply recursive filtering to a repository. For
1574 example calling `repo.filtered("served")` will return a repoview using
1574 example calling `repo.filtered("served")` will return a repoview using
1575 the "served" view, regardless of the initial view used by `repo`.
1575 the "served" view, regardless of the initial view used by `repo`.
1576
1576
1577 In other word, there is always only one level of `repoview` "filtering".
1577 In other word, there is always only one level of `repoview` "filtering".
1578 """
1578 """
1579 if self._extrafilterid is not None and b'%' not in name:
1579 if self._extrafilterid is not None and b'%' not in name:
1580 name = name + b'%' + self._extrafilterid
1580 name = name + b'%' + self._extrafilterid
1581
1581
1582 cls = repoview.newtype(self.unfiltered().__class__)
1582 cls = repoview.newtype(self.unfiltered().__class__)
1583 return cls(self, name, visibilityexceptions)
1583 return cls(self, name, visibilityexceptions)
1584
1584
1585 @mixedrepostorecache(
1585 @mixedrepostorecache(
1586 (b'bookmarks', b'plain'),
1586 (b'bookmarks', b'plain'),
1587 (b'bookmarks.current', b'plain'),
1587 (b'bookmarks.current', b'plain'),
1588 (b'bookmarks', b''),
1588 (b'bookmarks', b''),
1589 (b'00changelog.i', b''),
1589 (b'00changelog.i', b''),
1590 )
1590 )
1591 def _bookmarks(self):
1591 def _bookmarks(self):
1592 # Since the multiple files involved in the transaction cannot be
1592 # Since the multiple files involved in the transaction cannot be
1593 # written atomically (with current repository format), there is a race
1593 # written atomically (with current repository format), there is a race
1594 # condition here.
1594 # condition here.
1595 #
1595 #
1596 # 1) changelog content A is read
1596 # 1) changelog content A is read
1597 # 2) outside transaction update changelog to content B
1597 # 2) outside transaction update changelog to content B
1598 # 3) outside transaction update bookmark file referring to content B
1598 # 3) outside transaction update bookmark file referring to content B
1599 # 4) bookmarks file content is read and filtered against changelog-A
1599 # 4) bookmarks file content is read and filtered against changelog-A
1600 #
1600 #
1601 # When this happens, bookmarks against nodes missing from A are dropped.
1601 # When this happens, bookmarks against nodes missing from A are dropped.
1602 #
1602 #
1603 # Having this happening during read is not great, but it become worse
1603 # Having this happening during read is not great, but it become worse
1604 # when this happen during write because the bookmarks to the "unknown"
1604 # when this happen during write because the bookmarks to the "unknown"
1605 # nodes will be dropped for good. However, writes happen within locks.
1605 # nodes will be dropped for good. However, writes happen within locks.
1606 # This locking makes it possible to have a race free consistent read.
1606 # This locking makes it possible to have a race free consistent read.
1607 # For this purpose data read from disc before locking are
1607 # For this purpose data read from disc before locking are
1608 # "invalidated" right after the locks are taken. This invalidations are
1608 # "invalidated" right after the locks are taken. This invalidations are
1609 # "light", the `filecache` mechanism keep the data in memory and will
1609 # "light", the `filecache` mechanism keep the data in memory and will
1610 # reuse them if the underlying files did not changed. Not parsing the
1610 # reuse them if the underlying files did not changed. Not parsing the
1611 # same data multiple times helps performances.
1611 # same data multiple times helps performances.
1612 #
1612 #
1613 # Unfortunately in the case describe above, the files tracked by the
1613 # Unfortunately in the case describe above, the files tracked by the
1614 # bookmarks file cache might not have changed, but the in-memory
1614 # bookmarks file cache might not have changed, but the in-memory
1615 # content is still "wrong" because we used an older changelog content
1615 # content is still "wrong" because we used an older changelog content
1616 # to process the on-disk data. So after locking, the changelog would be
1616 # to process the on-disk data. So after locking, the changelog would be
1617 # refreshed but `_bookmarks` would be preserved.
1617 # refreshed but `_bookmarks` would be preserved.
1618 # Adding `00changelog.i` to the list of tracked file is not
1618 # Adding `00changelog.i` to the list of tracked file is not
1619 # enough, because at the time we build the content for `_bookmarks` in
1619 # enough, because at the time we build the content for `_bookmarks` in
1620 # (4), the changelog file has already diverged from the content used
1620 # (4), the changelog file has already diverged from the content used
1621 # for loading `changelog` in (1)
1621 # for loading `changelog` in (1)
1622 #
1622 #
1623 # To prevent the issue, we force the changelog to be explicitly
1623 # To prevent the issue, we force the changelog to be explicitly
1624 # reloaded while computing `_bookmarks`. The data race can still happen
1624 # reloaded while computing `_bookmarks`. The data race can still happen
1625 # without the lock (with a narrower window), but it would no longer go
1625 # without the lock (with a narrower window), but it would no longer go
1626 # undetected during the lock time refresh.
1626 # undetected during the lock time refresh.
1627 #
1627 #
1628 # The new schedule is as follow
1628 # The new schedule is as follow
1629 #
1629 #
1630 # 1) filecache logic detect that `_bookmarks` needs to be computed
1630 # 1) filecache logic detect that `_bookmarks` needs to be computed
1631 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1631 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1632 # 3) We force `changelog` filecache to be tested
1632 # 3) We force `changelog` filecache to be tested
1633 # 4) cachestat for `changelog` are captured (for changelog)
1633 # 4) cachestat for `changelog` are captured (for changelog)
1634 # 5) `_bookmarks` is computed and cached
1634 # 5) `_bookmarks` is computed and cached
1635 #
1635 #
1636 # The step in (3) ensure we have a changelog at least as recent as the
1636 # The step in (3) ensure we have a changelog at least as recent as the
1637 # cache stat computed in (1). As a result at locking time:
1637 # cache stat computed in (1). As a result at locking time:
1638 # * if the changelog did not changed since (1) -> we can reuse the data
1638 # * if the changelog did not changed since (1) -> we can reuse the data
1639 # * otherwise -> the bookmarks get refreshed.
1639 # * otherwise -> the bookmarks get refreshed.
1640 self._refreshchangelog()
1640 self._refreshchangelog()
1641 return bookmarks.bmstore(self)
1641 return bookmarks.bmstore(self)
1642
1642
1643 def _refreshchangelog(self):
1643 def _refreshchangelog(self):
1644 """make sure the in memory changelog match the on-disk one"""
1644 """make sure the in memory changelog match the on-disk one"""
1645 if 'changelog' in vars(self) and self.currenttransaction() is None:
1645 if 'changelog' in vars(self) and self.currenttransaction() is None:
1646 del self.changelog
1646 del self.changelog
1647
1647
1648 @property
1648 @property
1649 def _activebookmark(self):
1649 def _activebookmark(self):
1650 return self._bookmarks.active
1650 return self._bookmarks.active
1651
1651
1652 # _phasesets depend on changelog. what we need is to call
1652 # _phasesets depend on changelog. what we need is to call
1653 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1653 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1654 # can't be easily expressed in filecache mechanism.
1654 # can't be easily expressed in filecache mechanism.
1655 @storecache(b'phaseroots', b'00changelog.i')
1655 @storecache(b'phaseroots', b'00changelog.i')
1656 def _phasecache(self):
1656 def _phasecache(self):
1657 return phases.phasecache(self, self._phasedefaults)
1657 return phases.phasecache(self, self._phasedefaults)
1658
1658
1659 @storecache(b'obsstore')
1659 @storecache(b'obsstore')
1660 def obsstore(self):
1660 def obsstore(self):
1661 return obsolete.makestore(self.ui, self)
1661 return obsolete.makestore(self.ui, self)
1662
1662
1663 @storecache(b'00changelog.i')
1663 @storecache(b'00changelog.i')
1664 def changelog(self):
1664 def changelog(self):
1665 # load dirstate before changelog to avoid race see issue6303
1665 # load dirstate before changelog to avoid race see issue6303
1666 self.dirstate.prefetch_parents()
1666 self.dirstate.prefetch_parents()
1667 return self.store.changelog(
1667 return self.store.changelog(
1668 txnutil.mayhavepending(self.root),
1668 txnutil.mayhavepending(self.root),
1669 concurrencychecker=revlogchecker.get_checker(self.ui, b'changelog'),
1669 concurrencychecker=revlogchecker.get_checker(self.ui, b'changelog'),
1670 )
1670 )
1671
1671
1672 @storecache(b'00manifest.i')
1672 @storecache(b'00manifest.i')
1673 def manifestlog(self):
1673 def manifestlog(self):
1674 return self.store.manifestlog(self, self._storenarrowmatch)
1674 return self.store.manifestlog(self, self._storenarrowmatch)
1675
1675
1676 @repofilecache(b'dirstate')
1676 @repofilecache(b'dirstate')
1677 def dirstate(self):
1677 def dirstate(self):
1678 return self._makedirstate()
1678 return self._makedirstate()
1679
1679
1680 def _makedirstate(self):
1680 def _makedirstate(self):
1681 """Extension point for wrapping the dirstate per-repo."""
1681 """Extension point for wrapping the dirstate per-repo."""
1682 sparsematchfn = lambda: sparse.matcher(self)
1682 sparsematchfn = lambda: sparse.matcher(self)
1683
1683
1684 return dirstate.dirstate(
1684 return dirstate.dirstate(
1685 self.vfs,
1685 self.vfs,
1686 self.ui,
1686 self.ui,
1687 self.root,
1687 self.root,
1688 self._dirstatevalidate,
1688 self._dirstatevalidate,
1689 sparsematchfn,
1689 sparsematchfn,
1690 self.nodeconstants,
1690 self.nodeconstants,
1691 )
1691 )
1692
1692
1693 def _dirstatevalidate(self, node):
1693 def _dirstatevalidate(self, node):
1694 try:
1694 try:
1695 self.changelog.rev(node)
1695 self.changelog.rev(node)
1696 return node
1696 return node
1697 except error.LookupError:
1697 except error.LookupError:
1698 if not self._dirstatevalidatewarned:
1698 if not self._dirstatevalidatewarned:
1699 self._dirstatevalidatewarned = True
1699 self._dirstatevalidatewarned = True
1700 self.ui.warn(
1700 self.ui.warn(
1701 _(b"warning: ignoring unknown working parent %s!\n")
1701 _(b"warning: ignoring unknown working parent %s!\n")
1702 % short(node)
1702 % short(node)
1703 )
1703 )
1704 return self.nullid
1704 return self.nullid
1705
1705
1706 @storecache(narrowspec.FILENAME)
1706 @storecache(narrowspec.FILENAME)
1707 def narrowpats(self):
1707 def narrowpats(self):
1708 """matcher patterns for this repository's narrowspec
1708 """matcher patterns for this repository's narrowspec
1709
1709
1710 A tuple of (includes, excludes).
1710 A tuple of (includes, excludes).
1711 """
1711 """
1712 return narrowspec.load(self)
1712 return narrowspec.load(self)
1713
1713
1714 @storecache(narrowspec.FILENAME)
1714 @storecache(narrowspec.FILENAME)
1715 def _storenarrowmatch(self):
1715 def _storenarrowmatch(self):
1716 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1716 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1717 return matchmod.always()
1717 return matchmod.always()
1718 include, exclude = self.narrowpats
1718 include, exclude = self.narrowpats
1719 return narrowspec.match(self.root, include=include, exclude=exclude)
1719 return narrowspec.match(self.root, include=include, exclude=exclude)
1720
1720
1721 @storecache(narrowspec.FILENAME)
1721 @storecache(narrowspec.FILENAME)
1722 def _narrowmatch(self):
1722 def _narrowmatch(self):
1723 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1723 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1724 return matchmod.always()
1724 return matchmod.always()
1725 narrowspec.checkworkingcopynarrowspec(self)
1725 narrowspec.checkworkingcopynarrowspec(self)
1726 include, exclude = self.narrowpats
1726 include, exclude = self.narrowpats
1727 return narrowspec.match(self.root, include=include, exclude=exclude)
1727 return narrowspec.match(self.root, include=include, exclude=exclude)
1728
1728
1729 def narrowmatch(self, match=None, includeexact=False):
1729 def narrowmatch(self, match=None, includeexact=False):
1730 """matcher corresponding the the repo's narrowspec
1730 """matcher corresponding the the repo's narrowspec
1731
1731
1732 If `match` is given, then that will be intersected with the narrow
1732 If `match` is given, then that will be intersected with the narrow
1733 matcher.
1733 matcher.
1734
1734
1735 If `includeexact` is True, then any exact matches from `match` will
1735 If `includeexact` is True, then any exact matches from `match` will
1736 be included even if they're outside the narrowspec.
1736 be included even if they're outside the narrowspec.
1737 """
1737 """
1738 if match:
1738 if match:
1739 if includeexact and not self._narrowmatch.always():
1739 if includeexact and not self._narrowmatch.always():
1740 # do not exclude explicitly-specified paths so that they can
1740 # do not exclude explicitly-specified paths so that they can
1741 # be warned later on
1741 # be warned later on
1742 em = matchmod.exact(match.files())
1742 em = matchmod.exact(match.files())
1743 nm = matchmod.unionmatcher([self._narrowmatch, em])
1743 nm = matchmod.unionmatcher([self._narrowmatch, em])
1744 return matchmod.intersectmatchers(match, nm)
1744 return matchmod.intersectmatchers(match, nm)
1745 return matchmod.intersectmatchers(match, self._narrowmatch)
1745 return matchmod.intersectmatchers(match, self._narrowmatch)
1746 return self._narrowmatch
1746 return self._narrowmatch
1747
1747
1748 def setnarrowpats(self, newincludes, newexcludes):
1748 def setnarrowpats(self, newincludes, newexcludes):
1749 narrowspec.save(self, newincludes, newexcludes)
1749 narrowspec.save(self, newincludes, newexcludes)
1750 self.invalidate(clearfilecache=True)
1750 self.invalidate(clearfilecache=True)
1751
1751
1752 @unfilteredpropertycache
1752 @unfilteredpropertycache
1753 def _quick_access_changeid_null(self):
1753 def _quick_access_changeid_null(self):
1754 return {
1754 return {
1755 b'null': (nullrev, self.nodeconstants.nullid),
1755 b'null': (nullrev, self.nodeconstants.nullid),
1756 nullrev: (nullrev, self.nodeconstants.nullid),
1756 nullrev: (nullrev, self.nodeconstants.nullid),
1757 self.nullid: (nullrev, self.nullid),
1757 self.nullid: (nullrev, self.nullid),
1758 }
1758 }
1759
1759
1760 @unfilteredpropertycache
1760 @unfilteredpropertycache
1761 def _quick_access_changeid_wc(self):
1761 def _quick_access_changeid_wc(self):
1762 # also fast path access to the working copy parents
1762 # also fast path access to the working copy parents
1763 # however, only do it for filter that ensure wc is visible.
1763 # however, only do it for filter that ensure wc is visible.
1764 quick = self._quick_access_changeid_null.copy()
1764 quick = self._quick_access_changeid_null.copy()
1765 cl = self.unfiltered().changelog
1765 cl = self.unfiltered().changelog
1766 for node in self.dirstate.parents():
1766 for node in self.dirstate.parents():
1767 if node == self.nullid:
1767 if node == self.nullid:
1768 continue
1768 continue
1769 rev = cl.index.get_rev(node)
1769 rev = cl.index.get_rev(node)
1770 if rev is None:
1770 if rev is None:
1771 # unknown working copy parent case:
1771 # unknown working copy parent case:
1772 #
1772 #
1773 # skip the fast path and let higher code deal with it
1773 # skip the fast path and let higher code deal with it
1774 continue
1774 continue
1775 pair = (rev, node)
1775 pair = (rev, node)
1776 quick[rev] = pair
1776 quick[rev] = pair
1777 quick[node] = pair
1777 quick[node] = pair
1778 # also add the parents of the parents
1778 # also add the parents of the parents
1779 for r in cl.parentrevs(rev):
1779 for r in cl.parentrevs(rev):
1780 if r == nullrev:
1780 if r == nullrev:
1781 continue
1781 continue
1782 n = cl.node(r)
1782 n = cl.node(r)
1783 pair = (r, n)
1783 pair = (r, n)
1784 quick[r] = pair
1784 quick[r] = pair
1785 quick[n] = pair
1785 quick[n] = pair
1786 p1node = self.dirstate.p1()
1786 p1node = self.dirstate.p1()
1787 if p1node != self.nullid:
1787 if p1node != self.nullid:
1788 quick[b'.'] = quick[p1node]
1788 quick[b'.'] = quick[p1node]
1789 return quick
1789 return quick
1790
1790
1791 @unfilteredmethod
1791 @unfilteredmethod
1792 def _quick_access_changeid_invalidate(self):
1792 def _quick_access_changeid_invalidate(self):
1793 if '_quick_access_changeid_wc' in vars(self):
1793 if '_quick_access_changeid_wc' in vars(self):
1794 del self.__dict__['_quick_access_changeid_wc']
1794 del self.__dict__['_quick_access_changeid_wc']
1795
1795
1796 @property
1796 @property
1797 def _quick_access_changeid(self):
1797 def _quick_access_changeid(self):
1798 """an helper dictionnary for __getitem__ calls
1798 """an helper dictionnary for __getitem__ calls
1799
1799
1800 This contains a list of symbol we can recognise right away without
1800 This contains a list of symbol we can recognise right away without
1801 further processing.
1801 further processing.
1802 """
1802 """
1803 if self.filtername in repoview.filter_has_wc:
1803 if self.filtername in repoview.filter_has_wc:
1804 return self._quick_access_changeid_wc
1804 return self._quick_access_changeid_wc
1805 return self._quick_access_changeid_null
1805 return self._quick_access_changeid_null
1806
1806
1807 def __getitem__(self, changeid):
1807 def __getitem__(self, changeid):
1808 # dealing with special cases
1808 # dealing with special cases
1809 if changeid is None:
1809 if changeid is None:
1810 return context.workingctx(self)
1810 return context.workingctx(self)
1811 if isinstance(changeid, context.basectx):
1811 if isinstance(changeid, context.basectx):
1812 return changeid
1812 return changeid
1813
1813
1814 # dealing with multiple revisions
1814 # dealing with multiple revisions
1815 if isinstance(changeid, slice):
1815 if isinstance(changeid, slice):
1816 # wdirrev isn't contiguous so the slice shouldn't include it
1816 # wdirrev isn't contiguous so the slice shouldn't include it
1817 return [
1817 return [
1818 self[i]
1818 self[i]
1819 for i in pycompat.xrange(*changeid.indices(len(self)))
1819 for i in pycompat.xrange(*changeid.indices(len(self)))
1820 if i not in self.changelog.filteredrevs
1820 if i not in self.changelog.filteredrevs
1821 ]
1821 ]
1822
1822
1823 # dealing with some special values
1823 # dealing with some special values
1824 quick_access = self._quick_access_changeid.get(changeid)
1824 quick_access = self._quick_access_changeid.get(changeid)
1825 if quick_access is not None:
1825 if quick_access is not None:
1826 rev, node = quick_access
1826 rev, node = quick_access
1827 return context.changectx(self, rev, node, maybe_filtered=False)
1827 return context.changectx(self, rev, node, maybe_filtered=False)
1828 if changeid == b'tip':
1828 if changeid == b'tip':
1829 node = self.changelog.tip()
1829 node = self.changelog.tip()
1830 rev = self.changelog.rev(node)
1830 rev = self.changelog.rev(node)
1831 return context.changectx(self, rev, node)
1831 return context.changectx(self, rev, node)
1832
1832
1833 # dealing with arbitrary values
1833 # dealing with arbitrary values
1834 try:
1834 try:
1835 if isinstance(changeid, int):
1835 if isinstance(changeid, int):
1836 node = self.changelog.node(changeid)
1836 node = self.changelog.node(changeid)
1837 rev = changeid
1837 rev = changeid
1838 elif changeid == b'.':
1838 elif changeid == b'.':
1839 # this is a hack to delay/avoid loading obsmarkers
1839 # this is a hack to delay/avoid loading obsmarkers
1840 # when we know that '.' won't be hidden
1840 # when we know that '.' won't be hidden
1841 node = self.dirstate.p1()
1841 node = self.dirstate.p1()
1842 rev = self.unfiltered().changelog.rev(node)
1842 rev = self.unfiltered().changelog.rev(node)
1843 elif len(changeid) == self.nodeconstants.nodelen:
1843 elif len(changeid) == self.nodeconstants.nodelen:
1844 try:
1844 try:
1845 node = changeid
1845 node = changeid
1846 rev = self.changelog.rev(changeid)
1846 rev = self.changelog.rev(changeid)
1847 except error.FilteredLookupError:
1847 except error.FilteredLookupError:
1848 changeid = hex(changeid) # for the error message
1848 changeid = hex(changeid) # for the error message
1849 raise
1849 raise
1850 except LookupError:
1850 except LookupError:
1851 # check if it might have come from damaged dirstate
1851 # check if it might have come from damaged dirstate
1852 #
1852 #
1853 # XXX we could avoid the unfiltered if we had a recognizable
1853 # XXX we could avoid the unfiltered if we had a recognizable
1854 # exception for filtered changeset access
1854 # exception for filtered changeset access
1855 if (
1855 if (
1856 self.local()
1856 self.local()
1857 and changeid in self.unfiltered().dirstate.parents()
1857 and changeid in self.unfiltered().dirstate.parents()
1858 ):
1858 ):
1859 msg = _(b"working directory has unknown parent '%s'!")
1859 msg = _(b"working directory has unknown parent '%s'!")
1860 raise error.Abort(msg % short(changeid))
1860 raise error.Abort(msg % short(changeid))
1861 changeid = hex(changeid) # for the error message
1861 changeid = hex(changeid) # for the error message
1862 raise
1862 raise
1863
1863
1864 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1864 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1865 node = bin(changeid)
1865 node = bin(changeid)
1866 rev = self.changelog.rev(node)
1866 rev = self.changelog.rev(node)
1867 else:
1867 else:
1868 raise error.ProgrammingError(
1868 raise error.ProgrammingError(
1869 b"unsupported changeid '%s' of type %s"
1869 b"unsupported changeid '%s' of type %s"
1870 % (changeid, pycompat.bytestr(type(changeid)))
1870 % (changeid, pycompat.bytestr(type(changeid)))
1871 )
1871 )
1872
1872
1873 return context.changectx(self, rev, node)
1873 return context.changectx(self, rev, node)
1874
1874
1875 except (error.FilteredIndexError, error.FilteredLookupError):
1875 except (error.FilteredIndexError, error.FilteredLookupError):
1876 raise error.FilteredRepoLookupError(
1876 raise error.FilteredRepoLookupError(
1877 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1877 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1878 )
1878 )
1879 except (IndexError, LookupError):
1879 except (IndexError, LookupError):
1880 raise error.RepoLookupError(
1880 raise error.RepoLookupError(
1881 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1881 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1882 )
1882 )
1883 except error.WdirUnsupported:
1883 except error.WdirUnsupported:
1884 return context.workingctx(self)
1884 return context.workingctx(self)
1885
1885
1886 def __contains__(self, changeid):
1886 def __contains__(self, changeid):
1887 """True if the given changeid exists"""
1887 """True if the given changeid exists"""
1888 try:
1888 try:
1889 self[changeid]
1889 self[changeid]
1890 return True
1890 return True
1891 except error.RepoLookupError:
1891 except error.RepoLookupError:
1892 return False
1892 return False
1893
1893
1894 def __nonzero__(self):
1894 def __nonzero__(self):
1895 return True
1895 return True
1896
1896
1897 __bool__ = __nonzero__
1897 __bool__ = __nonzero__
1898
1898
1899 def __len__(self):
1899 def __len__(self):
1900 # no need to pay the cost of repoview.changelog
1900 # no need to pay the cost of repoview.changelog
1901 unfi = self.unfiltered()
1901 unfi = self.unfiltered()
1902 return len(unfi.changelog)
1902 return len(unfi.changelog)
1903
1903
1904 def __iter__(self):
1904 def __iter__(self):
1905 return iter(self.changelog)
1905 return iter(self.changelog)
1906
1906
1907 def revs(self, expr, *args):
1907 def revs(self, expr, *args):
1908 """Find revisions matching a revset.
1908 """Find revisions matching a revset.
1909
1909
1910 The revset is specified as a string ``expr`` that may contain
1910 The revset is specified as a string ``expr`` that may contain
1911 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1911 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1912
1912
1913 Revset aliases from the configuration are not expanded. To expand
1913 Revset aliases from the configuration are not expanded. To expand
1914 user aliases, consider calling ``scmutil.revrange()`` or
1914 user aliases, consider calling ``scmutil.revrange()`` or
1915 ``repo.anyrevs([expr], user=True)``.
1915 ``repo.anyrevs([expr], user=True)``.
1916
1916
1917 Returns a smartset.abstractsmartset, which is a list-like interface
1917 Returns a smartset.abstractsmartset, which is a list-like interface
1918 that contains integer revisions.
1918 that contains integer revisions.
1919 """
1919 """
1920 tree = revsetlang.spectree(expr, *args)
1920 tree = revsetlang.spectree(expr, *args)
1921 return revset.makematcher(tree)(self)
1921 return revset.makematcher(tree)(self)
1922
1922
1923 def set(self, expr, *args):
1923 def set(self, expr, *args):
1924 """Find revisions matching a revset and emit changectx instances.
1924 """Find revisions matching a revset and emit changectx instances.
1925
1925
1926 This is a convenience wrapper around ``revs()`` that iterates the
1926 This is a convenience wrapper around ``revs()`` that iterates the
1927 result and is a generator of changectx instances.
1927 result and is a generator of changectx instances.
1928
1928
1929 Revset aliases from the configuration are not expanded. To expand
1929 Revset aliases from the configuration are not expanded. To expand
1930 user aliases, consider calling ``scmutil.revrange()``.
1930 user aliases, consider calling ``scmutil.revrange()``.
1931 """
1931 """
1932 for r in self.revs(expr, *args):
1932 for r in self.revs(expr, *args):
1933 yield self[r]
1933 yield self[r]
1934
1934
1935 def anyrevs(self, specs, user=False, localalias=None):
1935 def anyrevs(self, specs, user=False, localalias=None):
1936 """Find revisions matching one of the given revsets.
1936 """Find revisions matching one of the given revsets.
1937
1937
1938 Revset aliases from the configuration are not expanded by default. To
1938 Revset aliases from the configuration are not expanded by default. To
1939 expand user aliases, specify ``user=True``. To provide some local
1939 expand user aliases, specify ``user=True``. To provide some local
1940 definitions overriding user aliases, set ``localalias`` to
1940 definitions overriding user aliases, set ``localalias`` to
1941 ``{name: definitionstring}``.
1941 ``{name: definitionstring}``.
1942 """
1942 """
1943 if specs == [b'null']:
1943 if specs == [b'null']:
1944 return revset.baseset([nullrev])
1944 return revset.baseset([nullrev])
1945 if specs == [b'.']:
1945 if specs == [b'.']:
1946 quick_data = self._quick_access_changeid.get(b'.')
1946 quick_data = self._quick_access_changeid.get(b'.')
1947 if quick_data is not None:
1947 if quick_data is not None:
1948 return revset.baseset([quick_data[0]])
1948 return revset.baseset([quick_data[0]])
1949 if user:
1949 if user:
1950 m = revset.matchany(
1950 m = revset.matchany(
1951 self.ui,
1951 self.ui,
1952 specs,
1952 specs,
1953 lookup=revset.lookupfn(self),
1953 lookup=revset.lookupfn(self),
1954 localalias=localalias,
1954 localalias=localalias,
1955 )
1955 )
1956 else:
1956 else:
1957 m = revset.matchany(None, specs, localalias=localalias)
1957 m = revset.matchany(None, specs, localalias=localalias)
1958 return m(self)
1958 return m(self)
1959
1959
1960 def url(self):
1960 def url(self):
1961 return b'file:' + self.root
1961 return b'file:' + self.root
1962
1962
1963 def hook(self, name, throw=False, **args):
1963 def hook(self, name, throw=False, **args):
1964 """Call a hook, passing this repo instance.
1964 """Call a hook, passing this repo instance.
1965
1965
1966 This a convenience method to aid invoking hooks. Extensions likely
1966 This a convenience method to aid invoking hooks. Extensions likely
1967 won't call this unless they have registered a custom hook or are
1967 won't call this unless they have registered a custom hook or are
1968 replacing code that is expected to call a hook.
1968 replacing code that is expected to call a hook.
1969 """
1969 """
1970 return hook.hook(self.ui, self, name, throw, **args)
1970 return hook.hook(self.ui, self, name, throw, **args)
1971
1971
1972 @filteredpropertycache
1972 @filteredpropertycache
1973 def _tagscache(self):
1973 def _tagscache(self):
1974 """Returns a tagscache object that contains various tags related
1974 """Returns a tagscache object that contains various tags related
1975 caches."""
1975 caches."""
1976
1976
1977 # This simplifies its cache management by having one decorated
1977 # This simplifies its cache management by having one decorated
1978 # function (this one) and the rest simply fetch things from it.
1978 # function (this one) and the rest simply fetch things from it.
1979 class tagscache(object):
1979 class tagscache(object):
1980 def __init__(self):
1980 def __init__(self):
1981 # These two define the set of tags for this repository. tags
1981 # These two define the set of tags for this repository. tags
1982 # maps tag name to node; tagtypes maps tag name to 'global' or
1982 # maps tag name to node; tagtypes maps tag name to 'global' or
1983 # 'local'. (Global tags are defined by .hgtags across all
1983 # 'local'. (Global tags are defined by .hgtags across all
1984 # heads, and local tags are defined in .hg/localtags.)
1984 # heads, and local tags are defined in .hg/localtags.)
1985 # They constitute the in-memory cache of tags.
1985 # They constitute the in-memory cache of tags.
1986 self.tags = self.tagtypes = None
1986 self.tags = self.tagtypes = None
1987
1987
1988 self.nodetagscache = self.tagslist = None
1988 self.nodetagscache = self.tagslist = None
1989
1989
1990 cache = tagscache()
1990 cache = tagscache()
1991 cache.tags, cache.tagtypes = self._findtags()
1991 cache.tags, cache.tagtypes = self._findtags()
1992
1992
1993 return cache
1993 return cache
1994
1994
1995 def tags(self):
1995 def tags(self):
1996 '''return a mapping of tag to node'''
1996 '''return a mapping of tag to node'''
1997 t = {}
1997 t = {}
1998 if self.changelog.filteredrevs:
1998 if self.changelog.filteredrevs:
1999 tags, tt = self._findtags()
1999 tags, tt = self._findtags()
2000 else:
2000 else:
2001 tags = self._tagscache.tags
2001 tags = self._tagscache.tags
2002 rev = self.changelog.rev
2002 rev = self.changelog.rev
2003 for k, v in pycompat.iteritems(tags):
2003 for k, v in pycompat.iteritems(tags):
2004 try:
2004 try:
2005 # ignore tags to unknown nodes
2005 # ignore tags to unknown nodes
2006 rev(v)
2006 rev(v)
2007 t[k] = v
2007 t[k] = v
2008 except (error.LookupError, ValueError):
2008 except (error.LookupError, ValueError):
2009 pass
2009 pass
2010 return t
2010 return t
2011
2011
2012 def _findtags(self):
2012 def _findtags(self):
2013 """Do the hard work of finding tags. Return a pair of dicts
2013 """Do the hard work of finding tags. Return a pair of dicts
2014 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2014 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2015 maps tag name to a string like \'global\' or \'local\'.
2015 maps tag name to a string like \'global\' or \'local\'.
2016 Subclasses or extensions are free to add their own tags, but
2016 Subclasses or extensions are free to add their own tags, but
2017 should be aware that the returned dicts will be retained for the
2017 should be aware that the returned dicts will be retained for the
2018 duration of the localrepo object."""
2018 duration of the localrepo object."""
2019
2019
2020 # XXX what tagtype should subclasses/extensions use? Currently
2020 # XXX what tagtype should subclasses/extensions use? Currently
2021 # mq and bookmarks add tags, but do not set the tagtype at all.
2021 # mq and bookmarks add tags, but do not set the tagtype at all.
2022 # Should each extension invent its own tag type? Should there
2022 # Should each extension invent its own tag type? Should there
2023 # be one tagtype for all such "virtual" tags? Or is the status
2023 # be one tagtype for all such "virtual" tags? Or is the status
2024 # quo fine?
2024 # quo fine?
2025
2025
2026 # map tag name to (node, hist)
2026 # map tag name to (node, hist)
2027 alltags = tagsmod.findglobaltags(self.ui, self)
2027 alltags = tagsmod.findglobaltags(self.ui, self)
2028 # map tag name to tag type
2028 # map tag name to tag type
2029 tagtypes = {tag: b'global' for tag in alltags}
2029 tagtypes = {tag: b'global' for tag in alltags}
2030
2030
2031 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2031 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2032
2032
2033 # Build the return dicts. Have to re-encode tag names because
2033 # Build the return dicts. Have to re-encode tag names because
2034 # the tags module always uses UTF-8 (in order not to lose info
2034 # the tags module always uses UTF-8 (in order not to lose info
2035 # writing to the cache), but the rest of Mercurial wants them in
2035 # writing to the cache), but the rest of Mercurial wants them in
2036 # local encoding.
2036 # local encoding.
2037 tags = {}
2037 tags = {}
2038 for (name, (node, hist)) in pycompat.iteritems(alltags):
2038 for (name, (node, hist)) in pycompat.iteritems(alltags):
2039 if node != self.nullid:
2039 if node != self.nullid:
2040 tags[encoding.tolocal(name)] = node
2040 tags[encoding.tolocal(name)] = node
2041 tags[b'tip'] = self.changelog.tip()
2041 tags[b'tip'] = self.changelog.tip()
2042 tagtypes = {
2042 tagtypes = {
2043 encoding.tolocal(name): value
2043 encoding.tolocal(name): value
2044 for (name, value) in pycompat.iteritems(tagtypes)
2044 for (name, value) in pycompat.iteritems(tagtypes)
2045 }
2045 }
2046 return (tags, tagtypes)
2046 return (tags, tagtypes)
2047
2047
2048 def tagtype(self, tagname):
2048 def tagtype(self, tagname):
2049 """
2049 """
2050 return the type of the given tag. result can be:
2050 return the type of the given tag. result can be:
2051
2051
2052 'local' : a local tag
2052 'local' : a local tag
2053 'global' : a global tag
2053 'global' : a global tag
2054 None : tag does not exist
2054 None : tag does not exist
2055 """
2055 """
2056
2056
2057 return self._tagscache.tagtypes.get(tagname)
2057 return self._tagscache.tagtypes.get(tagname)
2058
2058
2059 def tagslist(self):
2059 def tagslist(self):
2060 '''return a list of tags ordered by revision'''
2060 '''return a list of tags ordered by revision'''
2061 if not self._tagscache.tagslist:
2061 if not self._tagscache.tagslist:
2062 l = []
2062 l = []
2063 for t, n in pycompat.iteritems(self.tags()):
2063 for t, n in pycompat.iteritems(self.tags()):
2064 l.append((self.changelog.rev(n), t, n))
2064 l.append((self.changelog.rev(n), t, n))
2065 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2065 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2066
2066
2067 return self._tagscache.tagslist
2067 return self._tagscache.tagslist
2068
2068
2069 def nodetags(self, node):
2069 def nodetags(self, node):
2070 '''return the tags associated with a node'''
2070 '''return the tags associated with a node'''
2071 if not self._tagscache.nodetagscache:
2071 if not self._tagscache.nodetagscache:
2072 nodetagscache = {}
2072 nodetagscache = {}
2073 for t, n in pycompat.iteritems(self._tagscache.tags):
2073 for t, n in pycompat.iteritems(self._tagscache.tags):
2074 nodetagscache.setdefault(n, []).append(t)
2074 nodetagscache.setdefault(n, []).append(t)
2075 for tags in pycompat.itervalues(nodetagscache):
2075 for tags in pycompat.itervalues(nodetagscache):
2076 tags.sort()
2076 tags.sort()
2077 self._tagscache.nodetagscache = nodetagscache
2077 self._tagscache.nodetagscache = nodetagscache
2078 return self._tagscache.nodetagscache.get(node, [])
2078 return self._tagscache.nodetagscache.get(node, [])
2079
2079
2080 def nodebookmarks(self, node):
2080 def nodebookmarks(self, node):
2081 """return the list of bookmarks pointing to the specified node"""
2081 """return the list of bookmarks pointing to the specified node"""
2082 return self._bookmarks.names(node)
2082 return self._bookmarks.names(node)
2083
2083
2084 def branchmap(self):
2084 def branchmap(self):
2085 """returns a dictionary {branch: [branchheads]} with branchheads
2085 """returns a dictionary {branch: [branchheads]} with branchheads
2086 ordered by increasing revision number"""
2086 ordered by increasing revision number"""
2087 return self._branchcaches[self]
2087 return self._branchcaches[self]
2088
2088
2089 @unfilteredmethod
2089 @unfilteredmethod
2090 def revbranchcache(self):
2090 def revbranchcache(self):
2091 if not self._revbranchcache:
2091 if not self._revbranchcache:
2092 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2092 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2093 return self._revbranchcache
2093 return self._revbranchcache
2094
2094
2095 def register_changeset(self, rev, changelogrevision):
2095 def register_changeset(self, rev, changelogrevision):
2096 self.revbranchcache().setdata(rev, changelogrevision)
2096 self.revbranchcache().setdata(rev, changelogrevision)
2097
2097
2098 def branchtip(self, branch, ignoremissing=False):
2098 def branchtip(self, branch, ignoremissing=False):
2099 """return the tip node for a given branch
2099 """return the tip node for a given branch
2100
2100
2101 If ignoremissing is True, then this method will not raise an error.
2101 If ignoremissing is True, then this method will not raise an error.
2102 This is helpful for callers that only expect None for a missing branch
2102 This is helpful for callers that only expect None for a missing branch
2103 (e.g. namespace).
2103 (e.g. namespace).
2104
2104
2105 """
2105 """
2106 try:
2106 try:
2107 return self.branchmap().branchtip(branch)
2107 return self.branchmap().branchtip(branch)
2108 except KeyError:
2108 except KeyError:
2109 if not ignoremissing:
2109 if not ignoremissing:
2110 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2110 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2111 else:
2111 else:
2112 pass
2112 pass
2113
2113
2114 def lookup(self, key):
2114 def lookup(self, key):
2115 node = scmutil.revsymbol(self, key).node()
2115 node = scmutil.revsymbol(self, key).node()
2116 if node is None:
2116 if node is None:
2117 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2117 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2118 return node
2118 return node
2119
2119
2120 def lookupbranch(self, key):
2120 def lookupbranch(self, key):
2121 if self.branchmap().hasbranch(key):
2121 if self.branchmap().hasbranch(key):
2122 return key
2122 return key
2123
2123
2124 return scmutil.revsymbol(self, key).branch()
2124 return scmutil.revsymbol(self, key).branch()
2125
2125
2126 def known(self, nodes):
2126 def known(self, nodes):
2127 cl = self.changelog
2127 cl = self.changelog
2128 get_rev = cl.index.get_rev
2128 get_rev = cl.index.get_rev
2129 filtered = cl.filteredrevs
2129 filtered = cl.filteredrevs
2130 result = []
2130 result = []
2131 for n in nodes:
2131 for n in nodes:
2132 r = get_rev(n)
2132 r = get_rev(n)
2133 resp = not (r is None or r in filtered)
2133 resp = not (r is None or r in filtered)
2134 result.append(resp)
2134 result.append(resp)
2135 return result
2135 return result
2136
2136
2137 def local(self):
2137 def local(self):
2138 return self
2138 return self
2139
2139
2140 def publishing(self):
2140 def publishing(self):
2141 # it's safe (and desirable) to trust the publish flag unconditionally
2141 # it's safe (and desirable) to trust the publish flag unconditionally
2142 # so that we don't finalize changes shared between users via ssh or nfs
2142 # so that we don't finalize changes shared between users via ssh or nfs
2143 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2143 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2144
2144
2145 def cancopy(self):
2145 def cancopy(self):
2146 # so statichttprepo's override of local() works
2146 # so statichttprepo's override of local() works
2147 if not self.local():
2147 if not self.local():
2148 return False
2148 return False
2149 if not self.publishing():
2149 if not self.publishing():
2150 return True
2150 return True
2151 # if publishing we can't copy if there is filtered content
2151 # if publishing we can't copy if there is filtered content
2152 return not self.filtered(b'visible').changelog.filteredrevs
2152 return not self.filtered(b'visible').changelog.filteredrevs
2153
2153
2154 def shared(self):
2154 def shared(self):
2155 '''the type of shared repository (None if not shared)'''
2155 '''the type of shared repository (None if not shared)'''
2156 if self.sharedpath != self.path:
2156 if self.sharedpath != self.path:
2157 return b'store'
2157 return b'store'
2158 return None
2158 return None
2159
2159
2160 def wjoin(self, f, *insidef):
2160 def wjoin(self, f, *insidef):
2161 return self.vfs.reljoin(self.root, f, *insidef)
2161 return self.vfs.reljoin(self.root, f, *insidef)
2162
2162
2163 def setparents(self, p1, p2=None):
2163 def setparents(self, p1, p2=None):
2164 if p2 is None:
2164 if p2 is None:
2165 p2 = self.nullid
2165 p2 = self.nullid
2166 self[None].setparents(p1, p2)
2166 self[None].setparents(p1, p2)
2167 self._quick_access_changeid_invalidate()
2167 self._quick_access_changeid_invalidate()
2168
2168
2169 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2169 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2170 """changeid must be a changeset revision, if specified.
2170 """changeid must be a changeset revision, if specified.
2171 fileid can be a file revision or node."""
2171 fileid can be a file revision or node."""
2172 return context.filectx(
2172 return context.filectx(
2173 self, path, changeid, fileid, changectx=changectx
2173 self, path, changeid, fileid, changectx=changectx
2174 )
2174 )
2175
2175
2176 def getcwd(self):
2176 def getcwd(self):
2177 return self.dirstate.getcwd()
2177 return self.dirstate.getcwd()
2178
2178
2179 def pathto(self, f, cwd=None):
2179 def pathto(self, f, cwd=None):
2180 return self.dirstate.pathto(f, cwd)
2180 return self.dirstate.pathto(f, cwd)
2181
2181
2182 def _loadfilter(self, filter):
2182 def _loadfilter(self, filter):
2183 if filter not in self._filterpats:
2183 if filter not in self._filterpats:
2184 l = []
2184 l = []
2185 for pat, cmd in self.ui.configitems(filter):
2185 for pat, cmd in self.ui.configitems(filter):
2186 if cmd == b'!':
2186 if cmd == b'!':
2187 continue
2187 continue
2188 mf = matchmod.match(self.root, b'', [pat])
2188 mf = matchmod.match(self.root, b'', [pat])
2189 fn = None
2189 fn = None
2190 params = cmd
2190 params = cmd
2191 for name, filterfn in pycompat.iteritems(self._datafilters):
2191 for name, filterfn in pycompat.iteritems(self._datafilters):
2192 if cmd.startswith(name):
2192 if cmd.startswith(name):
2193 fn = filterfn
2193 fn = filterfn
2194 params = cmd[len(name) :].lstrip()
2194 params = cmd[len(name) :].lstrip()
2195 break
2195 break
2196 if not fn:
2196 if not fn:
2197 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2197 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2198 fn.__name__ = 'commandfilter'
2198 fn.__name__ = 'commandfilter'
2199 # Wrap old filters not supporting keyword arguments
2199 # Wrap old filters not supporting keyword arguments
2200 if not pycompat.getargspec(fn)[2]:
2200 if not pycompat.getargspec(fn)[2]:
2201 oldfn = fn
2201 oldfn = fn
2202 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2202 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2203 fn.__name__ = 'compat-' + oldfn.__name__
2203 fn.__name__ = 'compat-' + oldfn.__name__
2204 l.append((mf, fn, params))
2204 l.append((mf, fn, params))
2205 self._filterpats[filter] = l
2205 self._filterpats[filter] = l
2206 return self._filterpats[filter]
2206 return self._filterpats[filter]
2207
2207
2208 def _filter(self, filterpats, filename, data):
2208 def _filter(self, filterpats, filename, data):
2209 for mf, fn, cmd in filterpats:
2209 for mf, fn, cmd in filterpats:
2210 if mf(filename):
2210 if mf(filename):
2211 self.ui.debug(
2211 self.ui.debug(
2212 b"filtering %s through %s\n"
2212 b"filtering %s through %s\n"
2213 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2213 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2214 )
2214 )
2215 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2215 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2216 break
2216 break
2217
2217
2218 return data
2218 return data
2219
2219
2220 @unfilteredpropertycache
2220 @unfilteredpropertycache
2221 def _encodefilterpats(self):
2221 def _encodefilterpats(self):
2222 return self._loadfilter(b'encode')
2222 return self._loadfilter(b'encode')
2223
2223
2224 @unfilteredpropertycache
2224 @unfilteredpropertycache
2225 def _decodefilterpats(self):
2225 def _decodefilterpats(self):
2226 return self._loadfilter(b'decode')
2226 return self._loadfilter(b'decode')
2227
2227
2228 def adddatafilter(self, name, filter):
2228 def adddatafilter(self, name, filter):
2229 self._datafilters[name] = filter
2229 self._datafilters[name] = filter
2230
2230
2231 def wread(self, filename):
2231 def wread(self, filename):
2232 if self.wvfs.islink(filename):
2232 if self.wvfs.islink(filename):
2233 data = self.wvfs.readlink(filename)
2233 data = self.wvfs.readlink(filename)
2234 else:
2234 else:
2235 data = self.wvfs.read(filename)
2235 data = self.wvfs.read(filename)
2236 return self._filter(self._encodefilterpats, filename, data)
2236 return self._filter(self._encodefilterpats, filename, data)
2237
2237
2238 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2238 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2239 """write ``data`` into ``filename`` in the working directory
2239 """write ``data`` into ``filename`` in the working directory
2240
2240
2241 This returns length of written (maybe decoded) data.
2241 This returns length of written (maybe decoded) data.
2242 """
2242 """
2243 data = self._filter(self._decodefilterpats, filename, data)
2243 data = self._filter(self._decodefilterpats, filename, data)
2244 if b'l' in flags:
2244 if b'l' in flags:
2245 self.wvfs.symlink(data, filename)
2245 self.wvfs.symlink(data, filename)
2246 else:
2246 else:
2247 self.wvfs.write(
2247 self.wvfs.write(
2248 filename, data, backgroundclose=backgroundclose, **kwargs
2248 filename, data, backgroundclose=backgroundclose, **kwargs
2249 )
2249 )
2250 if b'x' in flags:
2250 if b'x' in flags:
2251 self.wvfs.setflags(filename, False, True)
2251 self.wvfs.setflags(filename, False, True)
2252 else:
2252 else:
2253 self.wvfs.setflags(filename, False, False)
2253 self.wvfs.setflags(filename, False, False)
2254 return len(data)
2254 return len(data)
2255
2255
2256 def wwritedata(self, filename, data):
2256 def wwritedata(self, filename, data):
2257 return self._filter(self._decodefilterpats, filename, data)
2257 return self._filter(self._decodefilterpats, filename, data)
2258
2258
2259 def currenttransaction(self):
2259 def currenttransaction(self):
2260 """return the current transaction or None if non exists"""
2260 """return the current transaction or None if non exists"""
2261 if self._transref:
2261 if self._transref:
2262 tr = self._transref()
2262 tr = self._transref()
2263 else:
2263 else:
2264 tr = None
2264 tr = None
2265
2265
2266 if tr and tr.running():
2266 if tr and tr.running():
2267 return tr
2267 return tr
2268 return None
2268 return None
2269
2269
2270 def transaction(self, desc, report=None):
2270 def transaction(self, desc, report=None):
2271 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2271 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2272 b'devel', b'check-locks'
2272 b'devel', b'check-locks'
2273 ):
2273 ):
2274 if self._currentlock(self._lockref) is None:
2274 if self._currentlock(self._lockref) is None:
2275 raise error.ProgrammingError(b'transaction requires locking')
2275 raise error.ProgrammingError(b'transaction requires locking')
2276 tr = self.currenttransaction()
2276 tr = self.currenttransaction()
2277 if tr is not None:
2277 if tr is not None:
2278 return tr.nest(name=desc)
2278 return tr.nest(name=desc)
2279
2279
2280 # abort here if the journal already exists
2280 # abort here if the journal already exists
2281 if self.svfs.exists(b"journal"):
2281 if self.svfs.exists(b"journal"):
2282 raise error.RepoError(
2282 raise error.RepoError(
2283 _(b"abandoned transaction found"),
2283 _(b"abandoned transaction found"),
2284 hint=_(b"run 'hg recover' to clean up transaction"),
2284 hint=_(b"run 'hg recover' to clean up transaction"),
2285 )
2285 )
2286
2286
2287 idbase = b"%.40f#%f" % (random.random(), time.time())
2287 idbase = b"%.40f#%f" % (random.random(), time.time())
2288 ha = hex(hashutil.sha1(idbase).digest())
2288 ha = hex(hashutil.sha1(idbase).digest())
2289 txnid = b'TXN:' + ha
2289 txnid = b'TXN:' + ha
2290 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2290 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2291
2291
2292 self._writejournal(desc)
2292 self._writejournal(desc)
2293 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2293 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2294 if report:
2294 if report:
2295 rp = report
2295 rp = report
2296 else:
2296 else:
2297 rp = self.ui.warn
2297 rp = self.ui.warn
2298 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2298 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2299 # we must avoid cyclic reference between repo and transaction.
2299 # we must avoid cyclic reference between repo and transaction.
2300 reporef = weakref.ref(self)
2300 reporef = weakref.ref(self)
2301 # Code to track tag movement
2301 # Code to track tag movement
2302 #
2302 #
2303 # Since tags are all handled as file content, it is actually quite hard
2303 # Since tags are all handled as file content, it is actually quite hard
2304 # to track these movement from a code perspective. So we fallback to a
2304 # to track these movement from a code perspective. So we fallback to a
2305 # tracking at the repository level. One could envision to track changes
2305 # tracking at the repository level. One could envision to track changes
2306 # to the '.hgtags' file through changegroup apply but that fails to
2306 # to the '.hgtags' file through changegroup apply but that fails to
2307 # cope with case where transaction expose new heads without changegroup
2307 # cope with case where transaction expose new heads without changegroup
2308 # being involved (eg: phase movement).
2308 # being involved (eg: phase movement).
2309 #
2309 #
2310 # For now, We gate the feature behind a flag since this likely comes
2310 # For now, We gate the feature behind a flag since this likely comes
2311 # with performance impacts. The current code run more often than needed
2311 # with performance impacts. The current code run more often than needed
2312 # and do not use caches as much as it could. The current focus is on
2312 # and do not use caches as much as it could. The current focus is on
2313 # the behavior of the feature so we disable it by default. The flag
2313 # the behavior of the feature so we disable it by default. The flag
2314 # will be removed when we are happy with the performance impact.
2314 # will be removed when we are happy with the performance impact.
2315 #
2315 #
2316 # Once this feature is no longer experimental move the following
2316 # Once this feature is no longer experimental move the following
2317 # documentation to the appropriate help section:
2317 # documentation to the appropriate help section:
2318 #
2318 #
2319 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2319 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2320 # tags (new or changed or deleted tags). In addition the details of
2320 # tags (new or changed or deleted tags). In addition the details of
2321 # these changes are made available in a file at:
2321 # these changes are made available in a file at:
2322 # ``REPOROOT/.hg/changes/tags.changes``.
2322 # ``REPOROOT/.hg/changes/tags.changes``.
2323 # Make sure you check for HG_TAG_MOVED before reading that file as it
2323 # Make sure you check for HG_TAG_MOVED before reading that file as it
2324 # might exist from a previous transaction even if no tag were touched
2324 # might exist from a previous transaction even if no tag were touched
2325 # in this one. Changes are recorded in a line base format::
2325 # in this one. Changes are recorded in a line base format::
2326 #
2326 #
2327 # <action> <hex-node> <tag-name>\n
2327 # <action> <hex-node> <tag-name>\n
2328 #
2328 #
2329 # Actions are defined as follow:
2329 # Actions are defined as follow:
2330 # "-R": tag is removed,
2330 # "-R": tag is removed,
2331 # "+A": tag is added,
2331 # "+A": tag is added,
2332 # "-M": tag is moved (old value),
2332 # "-M": tag is moved (old value),
2333 # "+M": tag is moved (new value),
2333 # "+M": tag is moved (new value),
2334 tracktags = lambda x: None
2334 tracktags = lambda x: None
2335 # experimental config: experimental.hook-track-tags
2335 # experimental config: experimental.hook-track-tags
2336 shouldtracktags = self.ui.configbool(
2336 shouldtracktags = self.ui.configbool(
2337 b'experimental', b'hook-track-tags'
2337 b'experimental', b'hook-track-tags'
2338 )
2338 )
2339 if desc != b'strip' and shouldtracktags:
2339 if desc != b'strip' and shouldtracktags:
2340 oldheads = self.changelog.headrevs()
2340 oldheads = self.changelog.headrevs()
2341
2341
2342 def tracktags(tr2):
2342 def tracktags(tr2):
2343 repo = reporef()
2343 repo = reporef()
2344 assert repo is not None # help pytype
2344 assert repo is not None # help pytype
2345 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2345 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2346 newheads = repo.changelog.headrevs()
2346 newheads = repo.changelog.headrevs()
2347 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2347 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2348 # notes: we compare lists here.
2348 # notes: we compare lists here.
2349 # As we do it only once buiding set would not be cheaper
2349 # As we do it only once buiding set would not be cheaper
2350 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2350 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2351 if changes:
2351 if changes:
2352 tr2.hookargs[b'tag_moved'] = b'1'
2352 tr2.hookargs[b'tag_moved'] = b'1'
2353 with repo.vfs(
2353 with repo.vfs(
2354 b'changes/tags.changes', b'w', atomictemp=True
2354 b'changes/tags.changes', b'w', atomictemp=True
2355 ) as changesfile:
2355 ) as changesfile:
2356 # note: we do not register the file to the transaction
2356 # note: we do not register the file to the transaction
2357 # because we needs it to still exist on the transaction
2357 # because we needs it to still exist on the transaction
2358 # is close (for txnclose hooks)
2358 # is close (for txnclose hooks)
2359 tagsmod.writediff(changesfile, changes)
2359 tagsmod.writediff(changesfile, changes)
2360
2360
2361 def validate(tr2):
2361 def validate(tr2):
2362 """will run pre-closing hooks"""
2362 """will run pre-closing hooks"""
2363 # XXX the transaction API is a bit lacking here so we take a hacky
2363 # XXX the transaction API is a bit lacking here so we take a hacky
2364 # path for now
2364 # path for now
2365 #
2365 #
2366 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2366 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2367 # dict is copied before these run. In addition we needs the data
2367 # dict is copied before these run. In addition we needs the data
2368 # available to in memory hooks too.
2368 # available to in memory hooks too.
2369 #
2369 #
2370 # Moreover, we also need to make sure this runs before txnclose
2370 # Moreover, we also need to make sure this runs before txnclose
2371 # hooks and there is no "pending" mechanism that would execute
2371 # hooks and there is no "pending" mechanism that would execute
2372 # logic only if hooks are about to run.
2372 # logic only if hooks are about to run.
2373 #
2373 #
2374 # Fixing this limitation of the transaction is also needed to track
2374 # Fixing this limitation of the transaction is also needed to track
2375 # other families of changes (bookmarks, phases, obsolescence).
2375 # other families of changes (bookmarks, phases, obsolescence).
2376 #
2376 #
2377 # This will have to be fixed before we remove the experimental
2377 # This will have to be fixed before we remove the experimental
2378 # gating.
2378 # gating.
2379 tracktags(tr2)
2379 tracktags(tr2)
2380 repo = reporef()
2380 repo = reporef()
2381 assert repo is not None # help pytype
2381 assert repo is not None # help pytype
2382
2382
2383 singleheadopt = (b'experimental', b'single-head-per-branch')
2383 singleheadopt = (b'experimental', b'single-head-per-branch')
2384 singlehead = repo.ui.configbool(*singleheadopt)
2384 singlehead = repo.ui.configbool(*singleheadopt)
2385 if singlehead:
2385 if singlehead:
2386 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2386 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2387 accountclosed = singleheadsub.get(
2387 accountclosed = singleheadsub.get(
2388 b"account-closed-heads", False
2388 b"account-closed-heads", False
2389 )
2389 )
2390 if singleheadsub.get(b"public-changes-only", False):
2390 if singleheadsub.get(b"public-changes-only", False):
2391 filtername = b"immutable"
2391 filtername = b"immutable"
2392 else:
2392 else:
2393 filtername = b"visible"
2393 filtername = b"visible"
2394 scmutil.enforcesinglehead(
2394 scmutil.enforcesinglehead(
2395 repo, tr2, desc, accountclosed, filtername
2395 repo, tr2, desc, accountclosed, filtername
2396 )
2396 )
2397 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2397 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2398 for name, (old, new) in sorted(
2398 for name, (old, new) in sorted(
2399 tr.changes[b'bookmarks'].items()
2399 tr.changes[b'bookmarks'].items()
2400 ):
2400 ):
2401 args = tr.hookargs.copy()
2401 args = tr.hookargs.copy()
2402 args.update(bookmarks.preparehookargs(name, old, new))
2402 args.update(bookmarks.preparehookargs(name, old, new))
2403 repo.hook(
2403 repo.hook(
2404 b'pretxnclose-bookmark',
2404 b'pretxnclose-bookmark',
2405 throw=True,
2405 throw=True,
2406 **pycompat.strkwargs(args)
2406 **pycompat.strkwargs(args)
2407 )
2407 )
2408 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2408 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2409 cl = repo.unfiltered().changelog
2409 cl = repo.unfiltered().changelog
2410 for revs, (old, new) in tr.changes[b'phases']:
2410 for revs, (old, new) in tr.changes[b'phases']:
2411 for rev in revs:
2411 for rev in revs:
2412 args = tr.hookargs.copy()
2412 args = tr.hookargs.copy()
2413 node = hex(cl.node(rev))
2413 node = hex(cl.node(rev))
2414 args.update(phases.preparehookargs(node, old, new))
2414 args.update(phases.preparehookargs(node, old, new))
2415 repo.hook(
2415 repo.hook(
2416 b'pretxnclose-phase',
2416 b'pretxnclose-phase',
2417 throw=True,
2417 throw=True,
2418 **pycompat.strkwargs(args)
2418 **pycompat.strkwargs(args)
2419 )
2419 )
2420
2420
2421 repo.hook(
2421 repo.hook(
2422 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2422 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2423 )
2423 )
2424
2424
2425 def releasefn(tr, success):
2425 def releasefn(tr, success):
2426 repo = reporef()
2426 repo = reporef()
2427 if repo is None:
2427 if repo is None:
2428 # If the repo has been GC'd (and this release function is being
2428 # If the repo has been GC'd (and this release function is being
2429 # called from transaction.__del__), there's not much we can do,
2429 # called from transaction.__del__), there's not much we can do,
2430 # so just leave the unfinished transaction there and let the
2430 # so just leave the unfinished transaction there and let the
2431 # user run `hg recover`.
2431 # user run `hg recover`.
2432 return
2432 return
2433 if success:
2433 if success:
2434 # this should be explicitly invoked here, because
2434 # this should be explicitly invoked here, because
2435 # in-memory changes aren't written out at closing
2435 # in-memory changes aren't written out at closing
2436 # transaction, if tr.addfilegenerator (via
2436 # transaction, if tr.addfilegenerator (via
2437 # dirstate.write or so) isn't invoked while
2437 # dirstate.write or so) isn't invoked while
2438 # transaction running
2438 # transaction running
2439 repo.dirstate.write(None)
2439 repo.dirstate.write(None)
2440 else:
2440 else:
2441 # discard all changes (including ones already written
2441 # discard all changes (including ones already written
2442 # out) in this transaction
2442 # out) in this transaction
2443 narrowspec.restorebackup(self, b'journal.narrowspec')
2443 narrowspec.restorebackup(self, b'journal.narrowspec')
2444 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2444 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2445 repo.dirstate.restorebackup(None, b'journal.dirstate')
2445 repo.dirstate.restorebackup(None, b'journal.dirstate')
2446
2446
2447 repo.invalidate(clearfilecache=True)
2447 repo.invalidate(clearfilecache=True)
2448
2448
2449 tr = transaction.transaction(
2449 tr = transaction.transaction(
2450 rp,
2450 rp,
2451 self.svfs,
2451 self.svfs,
2452 vfsmap,
2452 vfsmap,
2453 b"journal",
2453 b"journal",
2454 b"undo",
2454 b"undo",
2455 aftertrans(renames),
2455 aftertrans(renames),
2456 self.store.createmode,
2456 self.store.createmode,
2457 validator=validate,
2457 validator=validate,
2458 releasefn=releasefn,
2458 releasefn=releasefn,
2459 checkambigfiles=_cachedfiles,
2459 checkambigfiles=_cachedfiles,
2460 name=desc,
2460 name=desc,
2461 )
2461 )
2462 tr.changes[b'origrepolen'] = len(self)
2462 tr.changes[b'origrepolen'] = len(self)
2463 tr.changes[b'obsmarkers'] = set()
2463 tr.changes[b'obsmarkers'] = set()
2464 tr.changes[b'phases'] = []
2464 tr.changes[b'phases'] = []
2465 tr.changes[b'bookmarks'] = {}
2465 tr.changes[b'bookmarks'] = {}
2466
2466
2467 tr.hookargs[b'txnid'] = txnid
2467 tr.hookargs[b'txnid'] = txnid
2468 tr.hookargs[b'txnname'] = desc
2468 tr.hookargs[b'txnname'] = desc
2469 tr.hookargs[b'changes'] = tr.changes
2469 tr.hookargs[b'changes'] = tr.changes
2470 # note: writing the fncache only during finalize mean that the file is
2470 # note: writing the fncache only during finalize mean that the file is
2471 # outdated when running hooks. As fncache is used for streaming clone,
2471 # outdated when running hooks. As fncache is used for streaming clone,
2472 # this is not expected to break anything that happen during the hooks.
2472 # this is not expected to break anything that happen during the hooks.
2473 tr.addfinalize(b'flush-fncache', self.store.write)
2473 tr.addfinalize(b'flush-fncache', self.store.write)
2474
2474
2475 def txnclosehook(tr2):
2475 def txnclosehook(tr2):
2476 """To be run if transaction is successful, will schedule a hook run"""
2476 """To be run if transaction is successful, will schedule a hook run"""
2477 # Don't reference tr2 in hook() so we don't hold a reference.
2477 # Don't reference tr2 in hook() so we don't hold a reference.
2478 # This reduces memory consumption when there are multiple
2478 # This reduces memory consumption when there are multiple
2479 # transactions per lock. This can likely go away if issue5045
2479 # transactions per lock. This can likely go away if issue5045
2480 # fixes the function accumulation.
2480 # fixes the function accumulation.
2481 hookargs = tr2.hookargs
2481 hookargs = tr2.hookargs
2482
2482
2483 def hookfunc(unused_success):
2483 def hookfunc(unused_success):
2484 repo = reporef()
2484 repo = reporef()
2485 assert repo is not None # help pytype
2485 assert repo is not None # help pytype
2486
2486
2487 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2487 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2488 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2488 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2489 for name, (old, new) in bmchanges:
2489 for name, (old, new) in bmchanges:
2490 args = tr.hookargs.copy()
2490 args = tr.hookargs.copy()
2491 args.update(bookmarks.preparehookargs(name, old, new))
2491 args.update(bookmarks.preparehookargs(name, old, new))
2492 repo.hook(
2492 repo.hook(
2493 b'txnclose-bookmark',
2493 b'txnclose-bookmark',
2494 throw=False,
2494 throw=False,
2495 **pycompat.strkwargs(args)
2495 **pycompat.strkwargs(args)
2496 )
2496 )
2497
2497
2498 if hook.hashook(repo.ui, b'txnclose-phase'):
2498 if hook.hashook(repo.ui, b'txnclose-phase'):
2499 cl = repo.unfiltered().changelog
2499 cl = repo.unfiltered().changelog
2500 phasemv = sorted(
2500 phasemv = sorted(
2501 tr.changes[b'phases'], key=lambda r: r[0][0]
2501 tr.changes[b'phases'], key=lambda r: r[0][0]
2502 )
2502 )
2503 for revs, (old, new) in phasemv:
2503 for revs, (old, new) in phasemv:
2504 for rev in revs:
2504 for rev in revs:
2505 args = tr.hookargs.copy()
2505 args = tr.hookargs.copy()
2506 node = hex(cl.node(rev))
2506 node = hex(cl.node(rev))
2507 args.update(phases.preparehookargs(node, old, new))
2507 args.update(phases.preparehookargs(node, old, new))
2508 repo.hook(
2508 repo.hook(
2509 b'txnclose-phase',
2509 b'txnclose-phase',
2510 throw=False,
2510 throw=False,
2511 **pycompat.strkwargs(args)
2511 **pycompat.strkwargs(args)
2512 )
2512 )
2513
2513
2514 repo.hook(
2514 repo.hook(
2515 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2515 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2516 )
2516 )
2517
2517
2518 repo = reporef()
2518 repo = reporef()
2519 assert repo is not None # help pytype
2519 assert repo is not None # help pytype
2520 repo._afterlock(hookfunc)
2520 repo._afterlock(hookfunc)
2521
2521
2522 tr.addfinalize(b'txnclose-hook', txnclosehook)
2522 tr.addfinalize(b'txnclose-hook', txnclosehook)
2523 # Include a leading "-" to make it happen before the transaction summary
2523 # Include a leading "-" to make it happen before the transaction summary
2524 # reports registered via scmutil.registersummarycallback() whose names
2524 # reports registered via scmutil.registersummarycallback() whose names
2525 # are 00-txnreport etc. That way, the caches will be warm when the
2525 # are 00-txnreport etc. That way, the caches will be warm when the
2526 # callbacks run.
2526 # callbacks run.
2527 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2527 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2528
2528
2529 def txnaborthook(tr2):
2529 def txnaborthook(tr2):
2530 """To be run if transaction is aborted"""
2530 """To be run if transaction is aborted"""
2531 repo = reporef()
2531 repo = reporef()
2532 assert repo is not None # help pytype
2532 assert repo is not None # help pytype
2533 repo.hook(
2533 repo.hook(
2534 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2534 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2535 )
2535 )
2536
2536
2537 tr.addabort(b'txnabort-hook', txnaborthook)
2537 tr.addabort(b'txnabort-hook', txnaborthook)
2538 # avoid eager cache invalidation. in-memory data should be identical
2538 # avoid eager cache invalidation. in-memory data should be identical
2539 # to stored data if transaction has no error.
2539 # to stored data if transaction has no error.
2540 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2540 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2541 self._transref = weakref.ref(tr)
2541 self._transref = weakref.ref(tr)
2542 scmutil.registersummarycallback(self, tr, desc)
2542 scmutil.registersummarycallback(self, tr, desc)
2543 return tr
2543 return tr
2544
2544
2545 def _journalfiles(self):
2545 def _journalfiles(self):
2546 return (
2546 return (
2547 (self.svfs, b'journal'),
2547 (self.svfs, b'journal'),
2548 (self.svfs, b'journal.narrowspec'),
2548 (self.svfs, b'journal.narrowspec'),
2549 (self.vfs, b'journal.narrowspec.dirstate'),
2549 (self.vfs, b'journal.narrowspec.dirstate'),
2550 (self.vfs, b'journal.dirstate'),
2550 (self.vfs, b'journal.dirstate'),
2551 (self.vfs, b'journal.branch'),
2551 (self.vfs, b'journal.branch'),
2552 (self.vfs, b'journal.desc'),
2552 (self.vfs, b'journal.desc'),
2553 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2553 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2554 (self.svfs, b'journal.phaseroots'),
2554 (self.svfs, b'journal.phaseroots'),
2555 )
2555 )
2556
2556
2557 def undofiles(self):
2557 def undofiles(self):
2558 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2558 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2559
2559
2560 @unfilteredmethod
2560 @unfilteredmethod
2561 def _writejournal(self, desc):
2561 def _writejournal(self, desc):
2562 self.dirstate.savebackup(None, b'journal.dirstate')
2562 self.dirstate.savebackup(None, b'journal.dirstate')
2563 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2563 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2564 narrowspec.savebackup(self, b'journal.narrowspec')
2564 narrowspec.savebackup(self, b'journal.narrowspec')
2565 self.vfs.write(
2565 self.vfs.write(
2566 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2566 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2567 )
2567 )
2568 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2568 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2569 bookmarksvfs = bookmarks.bookmarksvfs(self)
2569 bookmarksvfs = bookmarks.bookmarksvfs(self)
2570 bookmarksvfs.write(
2570 bookmarksvfs.write(
2571 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2571 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2572 )
2572 )
2573 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2573 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2574
2574
2575 def recover(self):
2575 def recover(self):
2576 with self.lock():
2576 with self.lock():
2577 if self.svfs.exists(b"journal"):
2577 if self.svfs.exists(b"journal"):
2578 self.ui.status(_(b"rolling back interrupted transaction\n"))
2578 self.ui.status(_(b"rolling back interrupted transaction\n"))
2579 vfsmap = {
2579 vfsmap = {
2580 b'': self.svfs,
2580 b'': self.svfs,
2581 b'plain': self.vfs,
2581 b'plain': self.vfs,
2582 }
2582 }
2583 transaction.rollback(
2583 transaction.rollback(
2584 self.svfs,
2584 self.svfs,
2585 vfsmap,
2585 vfsmap,
2586 b"journal",
2586 b"journal",
2587 self.ui.warn,
2587 self.ui.warn,
2588 checkambigfiles=_cachedfiles,
2588 checkambigfiles=_cachedfiles,
2589 )
2589 )
2590 self.invalidate()
2590 self.invalidate()
2591 return True
2591 return True
2592 else:
2592 else:
2593 self.ui.warn(_(b"no interrupted transaction available\n"))
2593 self.ui.warn(_(b"no interrupted transaction available\n"))
2594 return False
2594 return False
2595
2595
2596 def rollback(self, dryrun=False, force=False):
2596 def rollback(self, dryrun=False, force=False):
2597 wlock = lock = dsguard = None
2597 wlock = lock = dsguard = None
2598 try:
2598 try:
2599 wlock = self.wlock()
2599 wlock = self.wlock()
2600 lock = self.lock()
2600 lock = self.lock()
2601 if self.svfs.exists(b"undo"):
2601 if self.svfs.exists(b"undo"):
2602 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2602 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2603
2603
2604 return self._rollback(dryrun, force, dsguard)
2604 return self._rollback(dryrun, force, dsguard)
2605 else:
2605 else:
2606 self.ui.warn(_(b"no rollback information available\n"))
2606 self.ui.warn(_(b"no rollback information available\n"))
2607 return 1
2607 return 1
2608 finally:
2608 finally:
2609 release(dsguard, lock, wlock)
2609 release(dsguard, lock, wlock)
2610
2610
2611 @unfilteredmethod # Until we get smarter cache management
2611 @unfilteredmethod # Until we get smarter cache management
2612 def _rollback(self, dryrun, force, dsguard):
2612 def _rollback(self, dryrun, force, dsguard):
2613 ui = self.ui
2613 ui = self.ui
2614 try:
2614 try:
2615 args = self.vfs.read(b'undo.desc').splitlines()
2615 args = self.vfs.read(b'undo.desc').splitlines()
2616 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2616 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2617 if len(args) >= 3:
2617 if len(args) >= 3:
2618 detail = args[2]
2618 detail = args[2]
2619 oldtip = oldlen - 1
2619 oldtip = oldlen - 1
2620
2620
2621 if detail and ui.verbose:
2621 if detail and ui.verbose:
2622 msg = _(
2622 msg = _(
2623 b'repository tip rolled back to revision %d'
2623 b'repository tip rolled back to revision %d'
2624 b' (undo %s: %s)\n'
2624 b' (undo %s: %s)\n'
2625 ) % (oldtip, desc, detail)
2625 ) % (oldtip, desc, detail)
2626 else:
2626 else:
2627 msg = _(
2627 msg = _(
2628 b'repository tip rolled back to revision %d (undo %s)\n'
2628 b'repository tip rolled back to revision %d (undo %s)\n'
2629 ) % (oldtip, desc)
2629 ) % (oldtip, desc)
2630 except IOError:
2630 except IOError:
2631 msg = _(b'rolling back unknown transaction\n')
2631 msg = _(b'rolling back unknown transaction\n')
2632 desc = None
2632 desc = None
2633
2633
2634 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2634 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2635 raise error.Abort(
2635 raise error.Abort(
2636 _(
2636 _(
2637 b'rollback of last commit while not checked out '
2637 b'rollback of last commit while not checked out '
2638 b'may lose data'
2638 b'may lose data'
2639 ),
2639 ),
2640 hint=_(b'use -f to force'),
2640 hint=_(b'use -f to force'),
2641 )
2641 )
2642
2642
2643 ui.status(msg)
2643 ui.status(msg)
2644 if dryrun:
2644 if dryrun:
2645 return 0
2645 return 0
2646
2646
2647 parents = self.dirstate.parents()
2647 parents = self.dirstate.parents()
2648 self.destroying()
2648 self.destroying()
2649 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2649 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2650 transaction.rollback(
2650 transaction.rollback(
2651 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2651 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2652 )
2652 )
2653 bookmarksvfs = bookmarks.bookmarksvfs(self)
2653 bookmarksvfs = bookmarks.bookmarksvfs(self)
2654 if bookmarksvfs.exists(b'undo.bookmarks'):
2654 if bookmarksvfs.exists(b'undo.bookmarks'):
2655 bookmarksvfs.rename(
2655 bookmarksvfs.rename(
2656 b'undo.bookmarks', b'bookmarks', checkambig=True
2656 b'undo.bookmarks', b'bookmarks', checkambig=True
2657 )
2657 )
2658 if self.svfs.exists(b'undo.phaseroots'):
2658 if self.svfs.exists(b'undo.phaseroots'):
2659 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2659 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2660 self.invalidate()
2660 self.invalidate()
2661
2661
2662 has_node = self.changelog.index.has_node
2662 has_node = self.changelog.index.has_node
2663 parentgone = any(not has_node(p) for p in parents)
2663 parentgone = any(not has_node(p) for p in parents)
2664 if parentgone:
2664 if parentgone:
2665 # prevent dirstateguard from overwriting already restored one
2665 # prevent dirstateguard from overwriting already restored one
2666 dsguard.close()
2666 dsguard.close()
2667
2667
2668 narrowspec.restorebackup(self, b'undo.narrowspec')
2668 narrowspec.restorebackup(self, b'undo.narrowspec')
2669 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2669 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2670 self.dirstate.restorebackup(None, b'undo.dirstate')
2670 self.dirstate.restorebackup(None, b'undo.dirstate')
2671 try:
2671 try:
2672 branch = self.vfs.read(b'undo.branch')
2672 branch = self.vfs.read(b'undo.branch')
2673 self.dirstate.setbranch(encoding.tolocal(branch))
2673 self.dirstate.setbranch(encoding.tolocal(branch))
2674 except IOError:
2674 except IOError:
2675 ui.warn(
2675 ui.warn(
2676 _(
2676 _(
2677 b'named branch could not be reset: '
2677 b'named branch could not be reset: '
2678 b'current branch is still \'%s\'\n'
2678 b'current branch is still \'%s\'\n'
2679 )
2679 )
2680 % self.dirstate.branch()
2680 % self.dirstate.branch()
2681 )
2681 )
2682
2682
2683 parents = tuple([p.rev() for p in self[None].parents()])
2683 parents = tuple([p.rev() for p in self[None].parents()])
2684 if len(parents) > 1:
2684 if len(parents) > 1:
2685 ui.status(
2685 ui.status(
2686 _(
2686 _(
2687 b'working directory now based on '
2687 b'working directory now based on '
2688 b'revisions %d and %d\n'
2688 b'revisions %d and %d\n'
2689 )
2689 )
2690 % parents
2690 % parents
2691 )
2691 )
2692 else:
2692 else:
2693 ui.status(
2693 ui.status(
2694 _(b'working directory now based on revision %d\n') % parents
2694 _(b'working directory now based on revision %d\n') % parents
2695 )
2695 )
2696 mergestatemod.mergestate.clean(self)
2696 mergestatemod.mergestate.clean(self)
2697
2697
2698 # TODO: if we know which new heads may result from this rollback, pass
2698 # TODO: if we know which new heads may result from this rollback, pass
2699 # them to destroy(), which will prevent the branchhead cache from being
2699 # them to destroy(), which will prevent the branchhead cache from being
2700 # invalidated.
2700 # invalidated.
2701 self.destroyed()
2701 self.destroyed()
2702 return 0
2702 return 0
2703
2703
2704 def _buildcacheupdater(self, newtransaction):
2704 def _buildcacheupdater(self, newtransaction):
2705 """called during transaction to build the callback updating cache
2705 """called during transaction to build the callback updating cache
2706
2706
2707 Lives on the repository to help extension who might want to augment
2707 Lives on the repository to help extension who might want to augment
2708 this logic. For this purpose, the created transaction is passed to the
2708 this logic. For this purpose, the created transaction is passed to the
2709 method.
2709 method.
2710 """
2710 """
2711 # we must avoid cyclic reference between repo and transaction.
2711 # we must avoid cyclic reference between repo and transaction.
2712 reporef = weakref.ref(self)
2712 reporef = weakref.ref(self)
2713
2713
2714 def updater(tr):
2714 def updater(tr):
2715 repo = reporef()
2715 repo = reporef()
2716 assert repo is not None # help pytype
2716 assert repo is not None # help pytype
2717 repo.updatecaches(tr)
2717 repo.updatecaches(tr)
2718
2718
2719 return updater
2719 return updater
2720
2720
2721 @unfilteredmethod
2721 @unfilteredmethod
2722 def updatecaches(self, tr=None, full=False):
2722 def updatecaches(self, tr=None, full=False):
2723 """warm appropriate caches
2723 """warm appropriate caches
2724
2724
2725 If this function is called after a transaction closed. The transaction
2725 If this function is called after a transaction closed. The transaction
2726 will be available in the 'tr' argument. This can be used to selectively
2726 will be available in the 'tr' argument. This can be used to selectively
2727 update caches relevant to the changes in that transaction.
2727 update caches relevant to the changes in that transaction.
2728
2728
2729 If 'full' is set, make sure all caches the function knows about have
2729 If 'full' is set, make sure all caches the function knows about have
2730 up-to-date data. Even the ones usually loaded more lazily.
2730 up-to-date data. Even the ones usually loaded more lazily.
2731 """
2731 """
2732 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2732 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2733 # During strip, many caches are invalid but
2733 # During strip, many caches are invalid but
2734 # later call to `destroyed` will refresh them.
2734 # later call to `destroyed` will refresh them.
2735 return
2735 return
2736
2736
2737 if tr is None or tr.changes[b'origrepolen'] < len(self):
2737 if tr is None or tr.changes[b'origrepolen'] < len(self):
2738 # accessing the 'served' branchmap should refresh all the others,
2738 # accessing the 'served' branchmap should refresh all the others,
2739 self.ui.debug(b'updating the branch cache\n')
2739 self.ui.debug(b'updating the branch cache\n')
2740 self.filtered(b'served').branchmap()
2740 self.filtered(b'served').branchmap()
2741 self.filtered(b'served.hidden').branchmap()
2741 self.filtered(b'served.hidden').branchmap()
2742
2742
2743 if full:
2743 if full:
2744 unfi = self.unfiltered()
2744 unfi = self.unfiltered()
2745
2745
2746 self.changelog.update_caches(transaction=tr)
2746 self.changelog.update_caches(transaction=tr)
2747 self.manifestlog.update_caches(transaction=tr)
2747 self.manifestlog.update_caches(transaction=tr)
2748
2748
2749 rbc = unfi.revbranchcache()
2749 rbc = unfi.revbranchcache()
2750 for r in unfi.changelog:
2750 for r in unfi.changelog:
2751 rbc.branchinfo(r)
2751 rbc.branchinfo(r)
2752 rbc.write()
2752 rbc.write()
2753
2753
2754 # ensure the working copy parents are in the manifestfulltextcache
2754 # ensure the working copy parents are in the manifestfulltextcache
2755 for ctx in self[b'.'].parents():
2755 for ctx in self[b'.'].parents():
2756 ctx.manifest() # accessing the manifest is enough
2756 ctx.manifest() # accessing the manifest is enough
2757
2757
2758 # accessing fnode cache warms the cache
2758 # accessing fnode cache warms the cache
2759 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2759 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2760 # accessing tags warm the cache
2760 # accessing tags warm the cache
2761 self.tags()
2761 self.tags()
2762 self.filtered(b'served').tags()
2762 self.filtered(b'served').tags()
2763
2763
2764 # The `full` arg is documented as updating even the lazily-loaded
2764 # The `full` arg is documented as updating even the lazily-loaded
2765 # caches immediately, so we're forcing a write to cause these caches
2765 # caches immediately, so we're forcing a write to cause these caches
2766 # to be warmed up even if they haven't explicitly been requested
2766 # to be warmed up even if they haven't explicitly been requested
2767 # yet (if they've never been used by hg, they won't ever have been
2767 # yet (if they've never been used by hg, they won't ever have been
2768 # written, even if they're a subset of another kind of cache that
2768 # written, even if they're a subset of another kind of cache that
2769 # *has* been used).
2769 # *has* been used).
2770 for filt in repoview.filtertable.keys():
2770 for filt in repoview.filtertable.keys():
2771 filtered = self.filtered(filt)
2771 filtered = self.filtered(filt)
2772 filtered.branchmap().write(filtered)
2772 filtered.branchmap().write(filtered)
2773
2773
2774 def invalidatecaches(self):
2774 def invalidatecaches(self):
2775
2775
2776 if '_tagscache' in vars(self):
2776 if '_tagscache' in vars(self):
2777 # can't use delattr on proxy
2777 # can't use delattr on proxy
2778 del self.__dict__['_tagscache']
2778 del self.__dict__['_tagscache']
2779
2779
2780 self._branchcaches.clear()
2780 self._branchcaches.clear()
2781 self.invalidatevolatilesets()
2781 self.invalidatevolatilesets()
2782 self._sparsesignaturecache.clear()
2782 self._sparsesignaturecache.clear()
2783
2783
2784 def invalidatevolatilesets(self):
2784 def invalidatevolatilesets(self):
2785 self.filteredrevcache.clear()
2785 self.filteredrevcache.clear()
2786 obsolete.clearobscaches(self)
2786 obsolete.clearobscaches(self)
2787 self._quick_access_changeid_invalidate()
2787 self._quick_access_changeid_invalidate()
2788
2788
2789 def invalidatedirstate(self):
2789 def invalidatedirstate(self):
2790 """Invalidates the dirstate, causing the next call to dirstate
2790 """Invalidates the dirstate, causing the next call to dirstate
2791 to check if it was modified since the last time it was read,
2791 to check if it was modified since the last time it was read,
2792 rereading it if it has.
2792 rereading it if it has.
2793
2793
2794 This is different to dirstate.invalidate() that it doesn't always
2794 This is different to dirstate.invalidate() that it doesn't always
2795 rereads the dirstate. Use dirstate.invalidate() if you want to
2795 rereads the dirstate. Use dirstate.invalidate() if you want to
2796 explicitly read the dirstate again (i.e. restoring it to a previous
2796 explicitly read the dirstate again (i.e. restoring it to a previous
2797 known good state)."""
2797 known good state)."""
2798 if hasunfilteredcache(self, 'dirstate'):
2798 if hasunfilteredcache(self, 'dirstate'):
2799 for k in self.dirstate._filecache:
2799 for k in self.dirstate._filecache:
2800 try:
2800 try:
2801 delattr(self.dirstate, k)
2801 delattr(self.dirstate, k)
2802 except AttributeError:
2802 except AttributeError:
2803 pass
2803 pass
2804 delattr(self.unfiltered(), 'dirstate')
2804 delattr(self.unfiltered(), 'dirstate')
2805
2805
2806 def invalidate(self, clearfilecache=False):
2806 def invalidate(self, clearfilecache=False):
2807 """Invalidates both store and non-store parts other than dirstate
2807 """Invalidates both store and non-store parts other than dirstate
2808
2808
2809 If a transaction is running, invalidation of store is omitted,
2809 If a transaction is running, invalidation of store is omitted,
2810 because discarding in-memory changes might cause inconsistency
2810 because discarding in-memory changes might cause inconsistency
2811 (e.g. incomplete fncache causes unintentional failure, but
2811 (e.g. incomplete fncache causes unintentional failure, but
2812 redundant one doesn't).
2812 redundant one doesn't).
2813 """
2813 """
2814 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2814 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2815 for k in list(self._filecache.keys()):
2815 for k in list(self._filecache.keys()):
2816 # dirstate is invalidated separately in invalidatedirstate()
2816 # dirstate is invalidated separately in invalidatedirstate()
2817 if k == b'dirstate':
2817 if k == b'dirstate':
2818 continue
2818 continue
2819 if (
2819 if (
2820 k == b'changelog'
2820 k == b'changelog'
2821 and self.currenttransaction()
2821 and self.currenttransaction()
2822 and self.changelog._delayed
2822 and self.changelog._delayed
2823 ):
2823 ):
2824 # The changelog object may store unwritten revisions. We don't
2824 # The changelog object may store unwritten revisions. We don't
2825 # want to lose them.
2825 # want to lose them.
2826 # TODO: Solve the problem instead of working around it.
2826 # TODO: Solve the problem instead of working around it.
2827 continue
2827 continue
2828
2828
2829 if clearfilecache:
2829 if clearfilecache:
2830 del self._filecache[k]
2830 del self._filecache[k]
2831 try:
2831 try:
2832 delattr(unfiltered, k)
2832 delattr(unfiltered, k)
2833 except AttributeError:
2833 except AttributeError:
2834 pass
2834 pass
2835 self.invalidatecaches()
2835 self.invalidatecaches()
2836 if not self.currenttransaction():
2836 if not self.currenttransaction():
2837 # TODO: Changing contents of store outside transaction
2837 # TODO: Changing contents of store outside transaction
2838 # causes inconsistency. We should make in-memory store
2838 # causes inconsistency. We should make in-memory store
2839 # changes detectable, and abort if changed.
2839 # changes detectable, and abort if changed.
2840 self.store.invalidatecaches()
2840 self.store.invalidatecaches()
2841
2841
2842 def invalidateall(self):
2842 def invalidateall(self):
2843 """Fully invalidates both store and non-store parts, causing the
2843 """Fully invalidates both store and non-store parts, causing the
2844 subsequent operation to reread any outside changes."""
2844 subsequent operation to reread any outside changes."""
2845 # extension should hook this to invalidate its caches
2845 # extension should hook this to invalidate its caches
2846 self.invalidate()
2846 self.invalidate()
2847 self.invalidatedirstate()
2847 self.invalidatedirstate()
2848
2848
2849 @unfilteredmethod
2849 @unfilteredmethod
2850 def _refreshfilecachestats(self, tr):
2850 def _refreshfilecachestats(self, tr):
2851 """Reload stats of cached files so that they are flagged as valid"""
2851 """Reload stats of cached files so that they are flagged as valid"""
2852 for k, ce in self._filecache.items():
2852 for k, ce in self._filecache.items():
2853 k = pycompat.sysstr(k)
2853 k = pycompat.sysstr(k)
2854 if k == 'dirstate' or k not in self.__dict__:
2854 if k == 'dirstate' or k not in self.__dict__:
2855 continue
2855 continue
2856 ce.refresh()
2856 ce.refresh()
2857
2857
2858 def _lock(
2858 def _lock(
2859 self,
2859 self,
2860 vfs,
2860 vfs,
2861 lockname,
2861 lockname,
2862 wait,
2862 wait,
2863 releasefn,
2863 releasefn,
2864 acquirefn,
2864 acquirefn,
2865 desc,
2865 desc,
2866 ):
2866 ):
2867 timeout = 0
2867 timeout = 0
2868 warntimeout = 0
2868 warntimeout = 0
2869 if wait:
2869 if wait:
2870 timeout = self.ui.configint(b"ui", b"timeout")
2870 timeout = self.ui.configint(b"ui", b"timeout")
2871 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2871 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2872 # internal config: ui.signal-safe-lock
2872 # internal config: ui.signal-safe-lock
2873 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2873 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2874
2874
2875 l = lockmod.trylock(
2875 l = lockmod.trylock(
2876 self.ui,
2876 self.ui,
2877 vfs,
2877 vfs,
2878 lockname,
2878 lockname,
2879 timeout,
2879 timeout,
2880 warntimeout,
2880 warntimeout,
2881 releasefn=releasefn,
2881 releasefn=releasefn,
2882 acquirefn=acquirefn,
2882 acquirefn=acquirefn,
2883 desc=desc,
2883 desc=desc,
2884 signalsafe=signalsafe,
2884 signalsafe=signalsafe,
2885 )
2885 )
2886 return l
2886 return l
2887
2887
2888 def _afterlock(self, callback):
2888 def _afterlock(self, callback):
2889 """add a callback to be run when the repository is fully unlocked
2889 """add a callback to be run when the repository is fully unlocked
2890
2890
2891 The callback will be executed when the outermost lock is released
2891 The callback will be executed when the outermost lock is released
2892 (with wlock being higher level than 'lock')."""
2892 (with wlock being higher level than 'lock')."""
2893 for ref in (self._wlockref, self._lockref):
2893 for ref in (self._wlockref, self._lockref):
2894 l = ref and ref()
2894 l = ref and ref()
2895 if l and l.held:
2895 if l and l.held:
2896 l.postrelease.append(callback)
2896 l.postrelease.append(callback)
2897 break
2897 break
2898 else: # no lock have been found.
2898 else: # no lock have been found.
2899 callback(True)
2899 callback(True)
2900
2900
2901 def lock(self, wait=True):
2901 def lock(self, wait=True):
2902 """Lock the repository store (.hg/store) and return a weak reference
2902 """Lock the repository store (.hg/store) and return a weak reference
2903 to the lock. Use this before modifying the store (e.g. committing or
2903 to the lock. Use this before modifying the store (e.g. committing or
2904 stripping). If you are opening a transaction, get a lock as well.)
2904 stripping). If you are opening a transaction, get a lock as well.)
2905
2905
2906 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2906 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2907 'wlock' first to avoid a dead-lock hazard."""
2907 'wlock' first to avoid a dead-lock hazard."""
2908 l = self._currentlock(self._lockref)
2908 l = self._currentlock(self._lockref)
2909 if l is not None:
2909 if l is not None:
2910 l.lock()
2910 l.lock()
2911 return l
2911 return l
2912
2912
2913 l = self._lock(
2913 l = self._lock(
2914 vfs=self.svfs,
2914 vfs=self.svfs,
2915 lockname=b"lock",
2915 lockname=b"lock",
2916 wait=wait,
2916 wait=wait,
2917 releasefn=None,
2917 releasefn=None,
2918 acquirefn=self.invalidate,
2918 acquirefn=self.invalidate,
2919 desc=_(b'repository %s') % self.origroot,
2919 desc=_(b'repository %s') % self.origroot,
2920 )
2920 )
2921 self._lockref = weakref.ref(l)
2921 self._lockref = weakref.ref(l)
2922 return l
2922 return l
2923
2923
2924 def wlock(self, wait=True):
2924 def wlock(self, wait=True):
2925 """Lock the non-store parts of the repository (everything under
2925 """Lock the non-store parts of the repository (everything under
2926 .hg except .hg/store) and return a weak reference to the lock.
2926 .hg except .hg/store) and return a weak reference to the lock.
2927
2927
2928 Use this before modifying files in .hg.
2928 Use this before modifying files in .hg.
2929
2929
2930 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2930 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2931 'wlock' first to avoid a dead-lock hazard."""
2931 'wlock' first to avoid a dead-lock hazard."""
2932 l = self._wlockref() if self._wlockref else None
2932 l = self._wlockref() if self._wlockref else None
2933 if l is not None and l.held:
2933 if l is not None and l.held:
2934 l.lock()
2934 l.lock()
2935 return l
2935 return l
2936
2936
2937 # We do not need to check for non-waiting lock acquisition. Such
2937 # We do not need to check for non-waiting lock acquisition. Such
2938 # acquisition would not cause dead-lock as they would just fail.
2938 # acquisition would not cause dead-lock as they would just fail.
2939 if wait and (
2939 if wait and (
2940 self.ui.configbool(b'devel', b'all-warnings')
2940 self.ui.configbool(b'devel', b'all-warnings')
2941 or self.ui.configbool(b'devel', b'check-locks')
2941 or self.ui.configbool(b'devel', b'check-locks')
2942 ):
2942 ):
2943 if self._currentlock(self._lockref) is not None:
2943 if self._currentlock(self._lockref) is not None:
2944 self.ui.develwarn(b'"wlock" acquired after "lock"')
2944 self.ui.develwarn(b'"wlock" acquired after "lock"')
2945
2945
2946 def unlock():
2946 def unlock():
2947 if self.dirstate.pendingparentchange():
2947 if self.dirstate.pendingparentchange():
2948 self.dirstate.invalidate()
2948 self.dirstate.invalidate()
2949 else:
2949 else:
2950 self.dirstate.write(None)
2950 self.dirstate.write(None)
2951
2951
2952 self._filecache[b'dirstate'].refresh()
2952 self._filecache[b'dirstate'].refresh()
2953
2953
2954 l = self._lock(
2954 l = self._lock(
2955 self.vfs,
2955 self.vfs,
2956 b"wlock",
2956 b"wlock",
2957 wait,
2957 wait,
2958 unlock,
2958 unlock,
2959 self.invalidatedirstate,
2959 self.invalidatedirstate,
2960 _(b'working directory of %s') % self.origroot,
2960 _(b'working directory of %s') % self.origroot,
2961 )
2961 )
2962 self._wlockref = weakref.ref(l)
2962 self._wlockref = weakref.ref(l)
2963 return l
2963 return l
2964
2964
2965 def _currentlock(self, lockref):
2965 def _currentlock(self, lockref):
2966 """Returns the lock if it's held, or None if it's not."""
2966 """Returns the lock if it's held, or None if it's not."""
2967 if lockref is None:
2967 if lockref is None:
2968 return None
2968 return None
2969 l = lockref()
2969 l = lockref()
2970 if l is None or not l.held:
2970 if l is None or not l.held:
2971 return None
2971 return None
2972 return l
2972 return l
2973
2973
2974 def currentwlock(self):
2974 def currentwlock(self):
2975 """Returns the wlock if it's held, or None if it's not."""
2975 """Returns the wlock if it's held, or None if it's not."""
2976 return self._currentlock(self._wlockref)
2976 return self._currentlock(self._wlockref)
2977
2977
2978 def checkcommitpatterns(self, wctx, match, status, fail):
2978 def checkcommitpatterns(self, wctx, match, status, fail):
2979 """check for commit arguments that aren't committable"""
2979 """check for commit arguments that aren't committable"""
2980 if match.isexact() or match.prefix():
2980 if match.isexact() or match.prefix():
2981 matched = set(status.modified + status.added + status.removed)
2981 matched = set(status.modified + status.added + status.removed)
2982
2982
2983 for f in match.files():
2983 for f in match.files():
2984 f = self.dirstate.normalize(f)
2984 f = self.dirstate.normalize(f)
2985 if f == b'.' or f in matched or f in wctx.substate:
2985 if f == b'.' or f in matched or f in wctx.substate:
2986 continue
2986 continue
2987 if f in status.deleted:
2987 if f in status.deleted:
2988 fail(f, _(b'file not found!'))
2988 fail(f, _(b'file not found!'))
2989 # Is it a directory that exists or used to exist?
2989 # Is it a directory that exists or used to exist?
2990 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2990 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2991 d = f + b'/'
2991 d = f + b'/'
2992 for mf in matched:
2992 for mf in matched:
2993 if mf.startswith(d):
2993 if mf.startswith(d):
2994 break
2994 break
2995 else:
2995 else:
2996 fail(f, _(b"no match under directory!"))
2996 fail(f, _(b"no match under directory!"))
2997 elif f not in self.dirstate:
2997 elif f not in self.dirstate:
2998 fail(f, _(b"file not tracked!"))
2998 fail(f, _(b"file not tracked!"))
2999
2999
3000 @unfilteredmethod
3000 @unfilteredmethod
3001 def commit(
3001 def commit(
3002 self,
3002 self,
3003 text=b"",
3003 text=b"",
3004 user=None,
3004 user=None,
3005 date=None,
3005 date=None,
3006 match=None,
3006 match=None,
3007 force=False,
3007 force=False,
3008 editor=None,
3008 editor=None,
3009 extra=None,
3009 extra=None,
3010 ):
3010 ):
3011 """Add a new revision to current repository.
3011 """Add a new revision to current repository.
3012
3012
3013 Revision information is gathered from the working directory,
3013 Revision information is gathered from the working directory,
3014 match can be used to filter the committed files. If editor is
3014 match can be used to filter the committed files. If editor is
3015 supplied, it is called to get a commit message.
3015 supplied, it is called to get a commit message.
3016 """
3016 """
3017 if extra is None:
3017 if extra is None:
3018 extra = {}
3018 extra = {}
3019
3019
3020 def fail(f, msg):
3020 def fail(f, msg):
3021 raise error.InputError(b'%s: %s' % (f, msg))
3021 raise error.InputError(b'%s: %s' % (f, msg))
3022
3022
3023 if not match:
3023 if not match:
3024 match = matchmod.always()
3024 match = matchmod.always()
3025
3025
3026 if not force:
3026 if not force:
3027 match.bad = fail
3027 match.bad = fail
3028
3028
3029 # lock() for recent changelog (see issue4368)
3029 # lock() for recent changelog (see issue4368)
3030 with self.wlock(), self.lock():
3030 with self.wlock(), self.lock():
3031 wctx = self[None]
3031 wctx = self[None]
3032 merge = len(wctx.parents()) > 1
3032 merge = len(wctx.parents()) > 1
3033
3033
3034 if not force and merge and not match.always():
3034 if not force and merge and not match.always():
3035 raise error.Abort(
3035 raise error.Abort(
3036 _(
3036 _(
3037 b'cannot partially commit a merge '
3037 b'cannot partially commit a merge '
3038 b'(do not specify files or patterns)'
3038 b'(do not specify files or patterns)'
3039 )
3039 )
3040 )
3040 )
3041
3041
3042 status = self.status(match=match, clean=force)
3042 status = self.status(match=match, clean=force)
3043 if force:
3043 if force:
3044 status.modified.extend(
3044 status.modified.extend(
3045 status.clean
3045 status.clean
3046 ) # mq may commit clean files
3046 ) # mq may commit clean files
3047
3047
3048 # check subrepos
3048 # check subrepos
3049 subs, commitsubs, newstate = subrepoutil.precommit(
3049 subs, commitsubs, newstate = subrepoutil.precommit(
3050 self.ui, wctx, status, match, force=force
3050 self.ui, wctx, status, match, force=force
3051 )
3051 )
3052
3052
3053 # make sure all explicit patterns are matched
3053 # make sure all explicit patterns are matched
3054 if not force:
3054 if not force:
3055 self.checkcommitpatterns(wctx, match, status, fail)
3055 self.checkcommitpatterns(wctx, match, status, fail)
3056
3056
3057 cctx = context.workingcommitctx(
3057 cctx = context.workingcommitctx(
3058 self, status, text, user, date, extra
3058 self, status, text, user, date, extra
3059 )
3059 )
3060
3060
3061 ms = mergestatemod.mergestate.read(self)
3061 ms = mergestatemod.mergestate.read(self)
3062 mergeutil.checkunresolved(ms)
3062 mergeutil.checkunresolved(ms)
3063
3063
3064 # internal config: ui.allowemptycommit
3064 # internal config: ui.allowemptycommit
3065 if cctx.isempty() and not self.ui.configbool(
3065 if cctx.isempty() and not self.ui.configbool(
3066 b'ui', b'allowemptycommit'
3066 b'ui', b'allowemptycommit'
3067 ):
3067 ):
3068 self.ui.debug(b'nothing to commit, clearing merge state\n')
3068 self.ui.debug(b'nothing to commit, clearing merge state\n')
3069 ms.reset()
3069 ms.reset()
3070 return None
3070 return None
3071
3071
3072 if merge and cctx.deleted():
3072 if merge and cctx.deleted():
3073 raise error.Abort(_(b"cannot commit merge with missing files"))
3073 raise error.Abort(_(b"cannot commit merge with missing files"))
3074
3074
3075 if editor:
3075 if editor:
3076 cctx._text = editor(self, cctx, subs)
3076 cctx._text = editor(self, cctx, subs)
3077 edited = text != cctx._text
3077 edited = text != cctx._text
3078
3078
3079 # Save commit message in case this transaction gets rolled back
3079 # Save commit message in case this transaction gets rolled back
3080 # (e.g. by a pretxncommit hook). Leave the content alone on
3080 # (e.g. by a pretxncommit hook). Leave the content alone on
3081 # the assumption that the user will use the same editor again.
3081 # the assumption that the user will use the same editor again.
3082 msgfn = self.savecommitmessage(cctx._text)
3082 msgfn = self.savecommitmessage(cctx._text)
3083
3083
3084 # commit subs and write new state
3084 # commit subs and write new state
3085 if subs:
3085 if subs:
3086 uipathfn = scmutil.getuipathfn(self)
3086 uipathfn = scmutil.getuipathfn(self)
3087 for s in sorted(commitsubs):
3087 for s in sorted(commitsubs):
3088 sub = wctx.sub(s)
3088 sub = wctx.sub(s)
3089 self.ui.status(
3089 self.ui.status(
3090 _(b'committing subrepository %s\n')
3090 _(b'committing subrepository %s\n')
3091 % uipathfn(subrepoutil.subrelpath(sub))
3091 % uipathfn(subrepoutil.subrelpath(sub))
3092 )
3092 )
3093 sr = sub.commit(cctx._text, user, date)
3093 sr = sub.commit(cctx._text, user, date)
3094 newstate[s] = (newstate[s][0], sr)
3094 newstate[s] = (newstate[s][0], sr)
3095 subrepoutil.writestate(self, newstate)
3095 subrepoutil.writestate(self, newstate)
3096
3096
3097 p1, p2 = self.dirstate.parents()
3097 p1, p2 = self.dirstate.parents()
3098 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3098 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3099 try:
3099 try:
3100 self.hook(
3100 self.hook(
3101 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3101 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3102 )
3102 )
3103 with self.transaction(b'commit'):
3103 with self.transaction(b'commit'):
3104 ret = self.commitctx(cctx, True)
3104 ret = self.commitctx(cctx, True)
3105 # update bookmarks, dirstate and mergestate
3105 # update bookmarks, dirstate and mergestate
3106 bookmarks.update(self, [p1, p2], ret)
3106 bookmarks.update(self, [p1, p2], ret)
3107 cctx.markcommitted(ret)
3107 cctx.markcommitted(ret)
3108 ms.reset()
3108 ms.reset()
3109 except: # re-raises
3109 except: # re-raises
3110 if edited:
3110 if edited:
3111 self.ui.write(
3111 self.ui.write(
3112 _(b'note: commit message saved in %s\n') % msgfn
3112 _(b'note: commit message saved in %s\n') % msgfn
3113 )
3113 )
3114 self.ui.write(
3114 self.ui.write(
3115 _(
3115 _(
3116 b"note: use 'hg commit --logfile "
3116 b"note: use 'hg commit --logfile "
3117 b".hg/last-message.txt --edit' to reuse it\n"
3117 b".hg/last-message.txt --edit' to reuse it\n"
3118 )
3118 )
3119 )
3119 )
3120 raise
3120 raise
3121
3121
3122 def commithook(unused_success):
3122 def commithook(unused_success):
3123 # hack for command that use a temporary commit (eg: histedit)
3123 # hack for command that use a temporary commit (eg: histedit)
3124 # temporary commit got stripped before hook release
3124 # temporary commit got stripped before hook release
3125 if self.changelog.hasnode(ret):
3125 if self.changelog.hasnode(ret):
3126 self.hook(
3126 self.hook(
3127 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3127 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3128 )
3128 )
3129
3129
3130 self._afterlock(commithook)
3130 self._afterlock(commithook)
3131 return ret
3131 return ret
3132
3132
3133 @unfilteredmethod
3133 @unfilteredmethod
3134 def commitctx(self, ctx, error=False, origctx=None):
3134 def commitctx(self, ctx, error=False, origctx=None):
3135 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3135 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3136
3136
3137 @unfilteredmethod
3137 @unfilteredmethod
3138 def destroying(self):
3138 def destroying(self):
3139 """Inform the repository that nodes are about to be destroyed.
3139 """Inform the repository that nodes are about to be destroyed.
3140 Intended for use by strip and rollback, so there's a common
3140 Intended for use by strip and rollback, so there's a common
3141 place for anything that has to be done before destroying history.
3141 place for anything that has to be done before destroying history.
3142
3142
3143 This is mostly useful for saving state that is in memory and waiting
3143 This is mostly useful for saving state that is in memory and waiting
3144 to be flushed when the current lock is released. Because a call to
3144 to be flushed when the current lock is released. Because a call to
3145 destroyed is imminent, the repo will be invalidated causing those
3145 destroyed is imminent, the repo will be invalidated causing those
3146 changes to stay in memory (waiting for the next unlock), or vanish
3146 changes to stay in memory (waiting for the next unlock), or vanish
3147 completely.
3147 completely.
3148 """
3148 """
3149 # When using the same lock to commit and strip, the phasecache is left
3149 # When using the same lock to commit and strip, the phasecache is left
3150 # dirty after committing. Then when we strip, the repo is invalidated,
3150 # dirty after committing. Then when we strip, the repo is invalidated,
3151 # causing those changes to disappear.
3151 # causing those changes to disappear.
3152 if '_phasecache' in vars(self):
3152 if '_phasecache' in vars(self):
3153 self._phasecache.write()
3153 self._phasecache.write()
3154
3154
3155 @unfilteredmethod
3155 @unfilteredmethod
3156 def destroyed(self):
3156 def destroyed(self):
3157 """Inform the repository that nodes have been destroyed.
3157 """Inform the repository that nodes have been destroyed.
3158 Intended for use by strip and rollback, so there's a common
3158 Intended for use by strip and rollback, so there's a common
3159 place for anything that has to be done after destroying history.
3159 place for anything that has to be done after destroying history.
3160 """
3160 """
3161 # When one tries to:
3161 # When one tries to:
3162 # 1) destroy nodes thus calling this method (e.g. strip)
3162 # 1) destroy nodes thus calling this method (e.g. strip)
3163 # 2) use phasecache somewhere (e.g. commit)
3163 # 2) use phasecache somewhere (e.g. commit)
3164 #
3164 #
3165 # then 2) will fail because the phasecache contains nodes that were
3165 # then 2) will fail because the phasecache contains nodes that were
3166 # removed. We can either remove phasecache from the filecache,
3166 # removed. We can either remove phasecache from the filecache,
3167 # causing it to reload next time it is accessed, or simply filter
3167 # causing it to reload next time it is accessed, or simply filter
3168 # the removed nodes now and write the updated cache.
3168 # the removed nodes now and write the updated cache.
3169 self._phasecache.filterunknown(self)
3169 self._phasecache.filterunknown(self)
3170 self._phasecache.write()
3170 self._phasecache.write()
3171
3171
3172 # refresh all repository caches
3172 # refresh all repository caches
3173 self.updatecaches()
3173 self.updatecaches()
3174
3174
3175 # Ensure the persistent tag cache is updated. Doing it now
3175 # Ensure the persistent tag cache is updated. Doing it now
3176 # means that the tag cache only has to worry about destroyed
3176 # means that the tag cache only has to worry about destroyed
3177 # heads immediately after a strip/rollback. That in turn
3177 # heads immediately after a strip/rollback. That in turn
3178 # guarantees that "cachetip == currenttip" (comparing both rev
3178 # guarantees that "cachetip == currenttip" (comparing both rev
3179 # and node) always means no nodes have been added or destroyed.
3179 # and node) always means no nodes have been added or destroyed.
3180
3180
3181 # XXX this is suboptimal when qrefresh'ing: we strip the current
3181 # XXX this is suboptimal when qrefresh'ing: we strip the current
3182 # head, refresh the tag cache, then immediately add a new head.
3182 # head, refresh the tag cache, then immediately add a new head.
3183 # But I think doing it this way is necessary for the "instant
3183 # But I think doing it this way is necessary for the "instant
3184 # tag cache retrieval" case to work.
3184 # tag cache retrieval" case to work.
3185 self.invalidate()
3185 self.invalidate()
3186
3186
3187 def status(
3187 def status(
3188 self,
3188 self,
3189 node1=b'.',
3189 node1=b'.',
3190 node2=None,
3190 node2=None,
3191 match=None,
3191 match=None,
3192 ignored=False,
3192 ignored=False,
3193 clean=False,
3193 clean=False,
3194 unknown=False,
3194 unknown=False,
3195 listsubrepos=False,
3195 listsubrepos=False,
3196 ):
3196 ):
3197 '''a convenience method that calls node1.status(node2)'''
3197 '''a convenience method that calls node1.status(node2)'''
3198 return self[node1].status(
3198 return self[node1].status(
3199 node2, match, ignored, clean, unknown, listsubrepos
3199 node2, match, ignored, clean, unknown, listsubrepos
3200 )
3200 )
3201
3201
3202 def addpostdsstatus(self, ps):
3202 def addpostdsstatus(self, ps):
3203 """Add a callback to run within the wlock, at the point at which status
3203 """Add a callback to run within the wlock, at the point at which status
3204 fixups happen.
3204 fixups happen.
3205
3205
3206 On status completion, callback(wctx, status) will be called with the
3206 On status completion, callback(wctx, status) will be called with the
3207 wlock held, unless the dirstate has changed from underneath or the wlock
3207 wlock held, unless the dirstate has changed from underneath or the wlock
3208 couldn't be grabbed.
3208 couldn't be grabbed.
3209
3209
3210 Callbacks should not capture and use a cached copy of the dirstate --
3210 Callbacks should not capture and use a cached copy of the dirstate --
3211 it might change in the meanwhile. Instead, they should access the
3211 it might change in the meanwhile. Instead, they should access the
3212 dirstate via wctx.repo().dirstate.
3212 dirstate via wctx.repo().dirstate.
3213
3213
3214 This list is emptied out after each status run -- extensions should
3214 This list is emptied out after each status run -- extensions should
3215 make sure it adds to this list each time dirstate.status is called.
3215 make sure it adds to this list each time dirstate.status is called.
3216 Extensions should also make sure they don't call this for statuses
3216 Extensions should also make sure they don't call this for statuses
3217 that don't involve the dirstate.
3217 that don't involve the dirstate.
3218 """
3218 """
3219
3219
3220 # The list is located here for uniqueness reasons -- it is actually
3220 # The list is located here for uniqueness reasons -- it is actually
3221 # managed by the workingctx, but that isn't unique per-repo.
3221 # managed by the workingctx, but that isn't unique per-repo.
3222 self._postdsstatus.append(ps)
3222 self._postdsstatus.append(ps)
3223
3223
3224 def postdsstatus(self):
3224 def postdsstatus(self):
3225 """Used by workingctx to get the list of post-dirstate-status hooks."""
3225 """Used by workingctx to get the list of post-dirstate-status hooks."""
3226 return self._postdsstatus
3226 return self._postdsstatus
3227
3227
3228 def clearpostdsstatus(self):
3228 def clearpostdsstatus(self):
3229 """Used by workingctx to clear post-dirstate-status hooks."""
3229 """Used by workingctx to clear post-dirstate-status hooks."""
3230 del self._postdsstatus[:]
3230 del self._postdsstatus[:]
3231
3231
3232 def heads(self, start=None):
3232 def heads(self, start=None):
3233 if start is None:
3233 if start is None:
3234 cl = self.changelog
3234 cl = self.changelog
3235 headrevs = reversed(cl.headrevs())
3235 headrevs = reversed(cl.headrevs())
3236 return [cl.node(rev) for rev in headrevs]
3236 return [cl.node(rev) for rev in headrevs]
3237
3237
3238 heads = self.changelog.heads(start)
3238 heads = self.changelog.heads(start)
3239 # sort the output in rev descending order
3239 # sort the output in rev descending order
3240 return sorted(heads, key=self.changelog.rev, reverse=True)
3240 return sorted(heads, key=self.changelog.rev, reverse=True)
3241
3241
3242 def branchheads(self, branch=None, start=None, closed=False):
3242 def branchheads(self, branch=None, start=None, closed=False):
3243 """return a (possibly filtered) list of heads for the given branch
3243 """return a (possibly filtered) list of heads for the given branch
3244
3244
3245 Heads are returned in topological order, from newest to oldest.
3245 Heads are returned in topological order, from newest to oldest.
3246 If branch is None, use the dirstate branch.
3246 If branch is None, use the dirstate branch.
3247 If start is not None, return only heads reachable from start.
3247 If start is not None, return only heads reachable from start.
3248 If closed is True, return heads that are marked as closed as well.
3248 If closed is True, return heads that are marked as closed as well.
3249 """
3249 """
3250 if branch is None:
3250 if branch is None:
3251 branch = self[None].branch()
3251 branch = self[None].branch()
3252 branches = self.branchmap()
3252 branches = self.branchmap()
3253 if not branches.hasbranch(branch):
3253 if not branches.hasbranch(branch):
3254 return []
3254 return []
3255 # the cache returns heads ordered lowest to highest
3255 # the cache returns heads ordered lowest to highest
3256 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3256 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3257 if start is not None:
3257 if start is not None:
3258 # filter out the heads that cannot be reached from startrev
3258 # filter out the heads that cannot be reached from startrev
3259 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3259 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3260 bheads = [h for h in bheads if h in fbheads]
3260 bheads = [h for h in bheads if h in fbheads]
3261 return bheads
3261 return bheads
3262
3262
3263 def branches(self, nodes):
3263 def branches(self, nodes):
3264 if not nodes:
3264 if not nodes:
3265 nodes = [self.changelog.tip()]
3265 nodes = [self.changelog.tip()]
3266 b = []
3266 b = []
3267 for n in nodes:
3267 for n in nodes:
3268 t = n
3268 t = n
3269 while True:
3269 while True:
3270 p = self.changelog.parents(n)
3270 p = self.changelog.parents(n)
3271 if p[1] != self.nullid or p[0] == self.nullid:
3271 if p[1] != self.nullid or p[0] == self.nullid:
3272 b.append((t, n, p[0], p[1]))
3272 b.append((t, n, p[0], p[1]))
3273 break
3273 break
3274 n = p[0]
3274 n = p[0]
3275 return b
3275 return b
3276
3276
3277 def between(self, pairs):
3277 def between(self, pairs):
3278 r = []
3278 r = []
3279
3279
3280 for top, bottom in pairs:
3280 for top, bottom in pairs:
3281 n, l, i = top, [], 0
3281 n, l, i = top, [], 0
3282 f = 1
3282 f = 1
3283
3283
3284 while n != bottom and n != self.nullid:
3284 while n != bottom and n != self.nullid:
3285 p = self.changelog.parents(n)[0]
3285 p = self.changelog.parents(n)[0]
3286 if i == f:
3286 if i == f:
3287 l.append(n)
3287 l.append(n)
3288 f = f * 2
3288 f = f * 2
3289 n = p
3289 n = p
3290 i += 1
3290 i += 1
3291
3291
3292 r.append(l)
3292 r.append(l)
3293
3293
3294 return r
3294 return r
3295
3295
3296 def checkpush(self, pushop):
3296 def checkpush(self, pushop):
3297 """Extensions can override this function if additional checks have
3297 """Extensions can override this function if additional checks have
3298 to be performed before pushing, or call it if they override push
3298 to be performed before pushing, or call it if they override push
3299 command.
3299 command.
3300 """
3300 """
3301
3301
3302 @unfilteredpropertycache
3302 @unfilteredpropertycache
3303 def prepushoutgoinghooks(self):
3303 def prepushoutgoinghooks(self):
3304 """Return util.hooks consists of a pushop with repo, remote, outgoing
3304 """Return util.hooks consists of a pushop with repo, remote, outgoing
3305 methods, which are called before pushing changesets.
3305 methods, which are called before pushing changesets.
3306 """
3306 """
3307 return util.hooks()
3307 return util.hooks()
3308
3308
3309 def pushkey(self, namespace, key, old, new):
3309 def pushkey(self, namespace, key, old, new):
3310 try:
3310 try:
3311 tr = self.currenttransaction()
3311 tr = self.currenttransaction()
3312 hookargs = {}
3312 hookargs = {}
3313 if tr is not None:
3313 if tr is not None:
3314 hookargs.update(tr.hookargs)
3314 hookargs.update(tr.hookargs)
3315 hookargs = pycompat.strkwargs(hookargs)
3315 hookargs = pycompat.strkwargs(hookargs)
3316 hookargs['namespace'] = namespace
3316 hookargs['namespace'] = namespace
3317 hookargs['key'] = key
3317 hookargs['key'] = key
3318 hookargs['old'] = old
3318 hookargs['old'] = old
3319 hookargs['new'] = new
3319 hookargs['new'] = new
3320 self.hook(b'prepushkey', throw=True, **hookargs)
3320 self.hook(b'prepushkey', throw=True, **hookargs)
3321 except error.HookAbort as exc:
3321 except error.HookAbort as exc:
3322 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3322 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3323 if exc.hint:
3323 if exc.hint:
3324 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3324 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3325 return False
3325 return False
3326 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3326 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3327 ret = pushkey.push(self, namespace, key, old, new)
3327 ret = pushkey.push(self, namespace, key, old, new)
3328
3328
3329 def runhook(unused_success):
3329 def runhook(unused_success):
3330 self.hook(
3330 self.hook(
3331 b'pushkey',
3331 b'pushkey',
3332 namespace=namespace,
3332 namespace=namespace,
3333 key=key,
3333 key=key,
3334 old=old,
3334 old=old,
3335 new=new,
3335 new=new,
3336 ret=ret,
3336 ret=ret,
3337 )
3337 )
3338
3338
3339 self._afterlock(runhook)
3339 self._afterlock(runhook)
3340 return ret
3340 return ret
3341
3341
3342 def listkeys(self, namespace):
3342 def listkeys(self, namespace):
3343 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3343 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3344 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3344 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3345 values = pushkey.list(self, namespace)
3345 values = pushkey.list(self, namespace)
3346 self.hook(b'listkeys', namespace=namespace, values=values)
3346 self.hook(b'listkeys', namespace=namespace, values=values)
3347 return values
3347 return values
3348
3348
3349 def debugwireargs(self, one, two, three=None, four=None, five=None):
3349 def debugwireargs(self, one, two, three=None, four=None, five=None):
3350 '''used to test argument passing over the wire'''
3350 '''used to test argument passing over the wire'''
3351 return b"%s %s %s %s %s" % (
3351 return b"%s %s %s %s %s" % (
3352 one,
3352 one,
3353 two,
3353 two,
3354 pycompat.bytestr(three),
3354 pycompat.bytestr(three),
3355 pycompat.bytestr(four),
3355 pycompat.bytestr(four),
3356 pycompat.bytestr(five),
3356 pycompat.bytestr(five),
3357 )
3357 )
3358
3358
3359 def savecommitmessage(self, text):
3359 def savecommitmessage(self, text):
3360 fp = self.vfs(b'last-message.txt', b'wb')
3360 fp = self.vfs(b'last-message.txt', b'wb')
3361 try:
3361 try:
3362 fp.write(text)
3362 fp.write(text)
3363 finally:
3363 finally:
3364 fp.close()
3364 fp.close()
3365 return self.pathto(fp.name[len(self.root) + 1 :])
3365 return self.pathto(fp.name[len(self.root) + 1 :])
3366
3366
3367 def register_wanted_sidedata(self, category):
3367 def register_wanted_sidedata(self, category):
3368 if requirementsmod.REVLOGV2_REQUIREMENT not in self.requirements:
3368 if requirementsmod.REVLOGV2_REQUIREMENT not in self.requirements:
3369 # Only revlogv2 repos can want sidedata.
3369 # Only revlogv2 repos can want sidedata.
3370 return
3370 return
3371 self._wanted_sidedata.add(pycompat.bytestr(category))
3371 self._wanted_sidedata.add(pycompat.bytestr(category))
3372
3372
3373 def register_sidedata_computer(self, kind, category, keys, computer, flags):
3373 def register_sidedata_computer(
3374 self, kind, category, keys, computer, flags, replace=False
3375 ):
3374 if kind not in revlogconst.ALL_KINDS:
3376 if kind not in revlogconst.ALL_KINDS:
3375 msg = _(b"unexpected revlog kind '%s'.")
3377 msg = _(b"unexpected revlog kind '%s'.")
3376 raise error.ProgrammingError(msg % kind)
3378 raise error.ProgrammingError(msg % kind)
3377 category = pycompat.bytestr(category)
3379 category = pycompat.bytestr(category)
3378 if category in self._sidedata_computers.get(kind, []):
3380 already_registered = category in self._sidedata_computers.get(kind, [])
3381 if already_registered and not replace:
3379 msg = _(
3382 msg = _(
3380 b"cannot register a sidedata computer twice for category '%s'."
3383 b"cannot register a sidedata computer twice for category '%s'."
3381 )
3384 )
3382 raise error.ProgrammingError(msg % category)
3385 raise error.ProgrammingError(msg % category)
3386 if replace and not already_registered:
3387 msg = _(
3388 b"cannot replace a sidedata computer that isn't registered "
3389 b"for category '%s'."
3390 )
3391 raise error.ProgrammingError(msg % category)
3383 self._sidedata_computers.setdefault(kind, {})
3392 self._sidedata_computers.setdefault(kind, {})
3384 self._sidedata_computers[kind][category] = (keys, computer, flags)
3393 self._sidedata_computers[kind][category] = (keys, computer, flags)
3385
3394
3386
3395
3387 # used to avoid circular references so destructors work
3396 # used to avoid circular references so destructors work
3388 def aftertrans(files):
3397 def aftertrans(files):
3389 renamefiles = [tuple(t) for t in files]
3398 renamefiles = [tuple(t) for t in files]
3390
3399
3391 def a():
3400 def a():
3392 for vfs, src, dest in renamefiles:
3401 for vfs, src, dest in renamefiles:
3393 # if src and dest refer to a same file, vfs.rename is a no-op,
3402 # if src and dest refer to a same file, vfs.rename is a no-op,
3394 # leaving both src and dest on disk. delete dest to make sure
3403 # leaving both src and dest on disk. delete dest to make sure
3395 # the rename couldn't be such a no-op.
3404 # the rename couldn't be such a no-op.
3396 vfs.tryunlink(dest)
3405 vfs.tryunlink(dest)
3397 try:
3406 try:
3398 vfs.rename(src, dest)
3407 vfs.rename(src, dest)
3399 except OSError: # journal file does not yet exist
3408 except OSError: # journal file does not yet exist
3400 pass
3409 pass
3401
3410
3402 return a
3411 return a
3403
3412
3404
3413
3405 def undoname(fn):
3414 def undoname(fn):
3406 base, name = os.path.split(fn)
3415 base, name = os.path.split(fn)
3407 assert name.startswith(b'journal')
3416 assert name.startswith(b'journal')
3408 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3417 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3409
3418
3410
3419
3411 def instance(ui, path, create, intents=None, createopts=None):
3420 def instance(ui, path, create, intents=None, createopts=None):
3412 localpath = urlutil.urllocalpath(path)
3421 localpath = urlutil.urllocalpath(path)
3413 if create:
3422 if create:
3414 createrepository(ui, localpath, createopts=createopts)
3423 createrepository(ui, localpath, createopts=createopts)
3415
3424
3416 return makelocalrepository(ui, localpath, intents=intents)
3425 return makelocalrepository(ui, localpath, intents=intents)
3417
3426
3418
3427
3419 def islocal(path):
3428 def islocal(path):
3420 return True
3429 return True
3421
3430
3422
3431
3423 def defaultcreateopts(ui, createopts=None):
3432 def defaultcreateopts(ui, createopts=None):
3424 """Populate the default creation options for a repository.
3433 """Populate the default creation options for a repository.
3425
3434
3426 A dictionary of explicitly requested creation options can be passed
3435 A dictionary of explicitly requested creation options can be passed
3427 in. Missing keys will be populated.
3436 in. Missing keys will be populated.
3428 """
3437 """
3429 createopts = dict(createopts or {})
3438 createopts = dict(createopts or {})
3430
3439
3431 if b'backend' not in createopts:
3440 if b'backend' not in createopts:
3432 # experimental config: storage.new-repo-backend
3441 # experimental config: storage.new-repo-backend
3433 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3442 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3434
3443
3435 return createopts
3444 return createopts
3436
3445
3437
3446
3438 def newreporequirements(ui, createopts):
3447 def newreporequirements(ui, createopts):
3439 """Determine the set of requirements for a new local repository.
3448 """Determine the set of requirements for a new local repository.
3440
3449
3441 Extensions can wrap this function to specify custom requirements for
3450 Extensions can wrap this function to specify custom requirements for
3442 new repositories.
3451 new repositories.
3443 """
3452 """
3444 # If the repo is being created from a shared repository, we copy
3453 # If the repo is being created from a shared repository, we copy
3445 # its requirements.
3454 # its requirements.
3446 if b'sharedrepo' in createopts:
3455 if b'sharedrepo' in createopts:
3447 requirements = set(createopts[b'sharedrepo'].requirements)
3456 requirements = set(createopts[b'sharedrepo'].requirements)
3448 if createopts.get(b'sharedrelative'):
3457 if createopts.get(b'sharedrelative'):
3449 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3458 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3450 else:
3459 else:
3451 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3460 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3452
3461
3453 return requirements
3462 return requirements
3454
3463
3455 if b'backend' not in createopts:
3464 if b'backend' not in createopts:
3456 raise error.ProgrammingError(
3465 raise error.ProgrammingError(
3457 b'backend key not present in createopts; '
3466 b'backend key not present in createopts; '
3458 b'was defaultcreateopts() called?'
3467 b'was defaultcreateopts() called?'
3459 )
3468 )
3460
3469
3461 if createopts[b'backend'] != b'revlogv1':
3470 if createopts[b'backend'] != b'revlogv1':
3462 raise error.Abort(
3471 raise error.Abort(
3463 _(
3472 _(
3464 b'unable to determine repository requirements for '
3473 b'unable to determine repository requirements for '
3465 b'storage backend: %s'
3474 b'storage backend: %s'
3466 )
3475 )
3467 % createopts[b'backend']
3476 % createopts[b'backend']
3468 )
3477 )
3469
3478
3470 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3479 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3471 if ui.configbool(b'format', b'usestore'):
3480 if ui.configbool(b'format', b'usestore'):
3472 requirements.add(requirementsmod.STORE_REQUIREMENT)
3481 requirements.add(requirementsmod.STORE_REQUIREMENT)
3473 if ui.configbool(b'format', b'usefncache'):
3482 if ui.configbool(b'format', b'usefncache'):
3474 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3483 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3475 if ui.configbool(b'format', b'dotencode'):
3484 if ui.configbool(b'format', b'dotencode'):
3476 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3485 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3477
3486
3478 compengines = ui.configlist(b'format', b'revlog-compression')
3487 compengines = ui.configlist(b'format', b'revlog-compression')
3479 for compengine in compengines:
3488 for compengine in compengines:
3480 if compengine in util.compengines:
3489 if compengine in util.compengines:
3481 engine = util.compengines[compengine]
3490 engine = util.compengines[compengine]
3482 if engine.available() and engine.revlogheader():
3491 if engine.available() and engine.revlogheader():
3483 break
3492 break
3484 else:
3493 else:
3485 raise error.Abort(
3494 raise error.Abort(
3486 _(
3495 _(
3487 b'compression engines %s defined by '
3496 b'compression engines %s defined by '
3488 b'format.revlog-compression not available'
3497 b'format.revlog-compression not available'
3489 )
3498 )
3490 % b', '.join(b'"%s"' % e for e in compengines),
3499 % b', '.join(b'"%s"' % e for e in compengines),
3491 hint=_(
3500 hint=_(
3492 b'run "hg debuginstall" to list available '
3501 b'run "hg debuginstall" to list available '
3493 b'compression engines'
3502 b'compression engines'
3494 ),
3503 ),
3495 )
3504 )
3496
3505
3497 # zlib is the historical default and doesn't need an explicit requirement.
3506 # zlib is the historical default and doesn't need an explicit requirement.
3498 if compengine == b'zstd':
3507 if compengine == b'zstd':
3499 requirements.add(b'revlog-compression-zstd')
3508 requirements.add(b'revlog-compression-zstd')
3500 elif compengine != b'zlib':
3509 elif compengine != b'zlib':
3501 requirements.add(b'exp-compression-%s' % compengine)
3510 requirements.add(b'exp-compression-%s' % compengine)
3502
3511
3503 if scmutil.gdinitconfig(ui):
3512 if scmutil.gdinitconfig(ui):
3504 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3513 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3505 if ui.configbool(b'format', b'sparse-revlog'):
3514 if ui.configbool(b'format', b'sparse-revlog'):
3506 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3515 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3507
3516
3508 # experimental config: format.exp-use-side-data
3517 # experimental config: format.exp-use-side-data
3509 if ui.configbool(b'format', b'exp-use-side-data'):
3518 if ui.configbool(b'format', b'exp-use-side-data'):
3510 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3519 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3511 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3520 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3512 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3521 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3513 # experimental config: format.exp-use-copies-side-data-changeset
3522 # experimental config: format.exp-use-copies-side-data-changeset
3514 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3523 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3515 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3524 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3516 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3525 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3517 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3526 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3518 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3527 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3519 if ui.configbool(b'experimental', b'treemanifest'):
3528 if ui.configbool(b'experimental', b'treemanifest'):
3520 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3529 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3521
3530
3522 revlogv2 = ui.config(b'experimental', b'revlogv2')
3531 revlogv2 = ui.config(b'experimental', b'revlogv2')
3523 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3532 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3524 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3533 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3525 # generaldelta is implied by revlogv2.
3534 # generaldelta is implied by revlogv2.
3526 requirements.discard(requirementsmod.GENERALDELTA_REQUIREMENT)
3535 requirements.discard(requirementsmod.GENERALDELTA_REQUIREMENT)
3527 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3536 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3528 # experimental config: format.internal-phase
3537 # experimental config: format.internal-phase
3529 if ui.configbool(b'format', b'internal-phase'):
3538 if ui.configbool(b'format', b'internal-phase'):
3530 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3539 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3531
3540
3532 if createopts.get(b'narrowfiles'):
3541 if createopts.get(b'narrowfiles'):
3533 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3542 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3534
3543
3535 if createopts.get(b'lfs'):
3544 if createopts.get(b'lfs'):
3536 requirements.add(b'lfs')
3545 requirements.add(b'lfs')
3537
3546
3538 if ui.configbool(b'format', b'bookmarks-in-store'):
3547 if ui.configbool(b'format', b'bookmarks-in-store'):
3539 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3548 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3540
3549
3541 if ui.configbool(b'format', b'use-persistent-nodemap'):
3550 if ui.configbool(b'format', b'use-persistent-nodemap'):
3542 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3551 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3543
3552
3544 # if share-safe is enabled, let's create the new repository with the new
3553 # if share-safe is enabled, let's create the new repository with the new
3545 # requirement
3554 # requirement
3546 if ui.configbool(b'format', b'use-share-safe'):
3555 if ui.configbool(b'format', b'use-share-safe'):
3547 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3556 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3548
3557
3549 return requirements
3558 return requirements
3550
3559
3551
3560
3552 def checkrequirementscompat(ui, requirements):
3561 def checkrequirementscompat(ui, requirements):
3553 """Checks compatibility of repository requirements enabled and disabled.
3562 """Checks compatibility of repository requirements enabled and disabled.
3554
3563
3555 Returns a set of requirements which needs to be dropped because dependend
3564 Returns a set of requirements which needs to be dropped because dependend
3556 requirements are not enabled. Also warns users about it"""
3565 requirements are not enabled. Also warns users about it"""
3557
3566
3558 dropped = set()
3567 dropped = set()
3559
3568
3560 if requirementsmod.STORE_REQUIREMENT not in requirements:
3569 if requirementsmod.STORE_REQUIREMENT not in requirements:
3561 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3570 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3562 ui.warn(
3571 ui.warn(
3563 _(
3572 _(
3564 b'ignoring enabled \'format.bookmarks-in-store\' config '
3573 b'ignoring enabled \'format.bookmarks-in-store\' config '
3565 b'beacuse it is incompatible with disabled '
3574 b'beacuse it is incompatible with disabled '
3566 b'\'format.usestore\' config\n'
3575 b'\'format.usestore\' config\n'
3567 )
3576 )
3568 )
3577 )
3569 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3578 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3570
3579
3571 if (
3580 if (
3572 requirementsmod.SHARED_REQUIREMENT in requirements
3581 requirementsmod.SHARED_REQUIREMENT in requirements
3573 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3582 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3574 ):
3583 ):
3575 raise error.Abort(
3584 raise error.Abort(
3576 _(
3585 _(
3577 b"cannot create shared repository as source was created"
3586 b"cannot create shared repository as source was created"
3578 b" with 'format.usestore' config disabled"
3587 b" with 'format.usestore' config disabled"
3579 )
3588 )
3580 )
3589 )
3581
3590
3582 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3591 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3583 ui.warn(
3592 ui.warn(
3584 _(
3593 _(
3585 b"ignoring enabled 'format.use-share-safe' config because "
3594 b"ignoring enabled 'format.use-share-safe' config because "
3586 b"it is incompatible with disabled 'format.usestore'"
3595 b"it is incompatible with disabled 'format.usestore'"
3587 b" config\n"
3596 b" config\n"
3588 )
3597 )
3589 )
3598 )
3590 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3599 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3591
3600
3592 return dropped
3601 return dropped
3593
3602
3594
3603
3595 def filterknowncreateopts(ui, createopts):
3604 def filterknowncreateopts(ui, createopts):
3596 """Filters a dict of repo creation options against options that are known.
3605 """Filters a dict of repo creation options against options that are known.
3597
3606
3598 Receives a dict of repo creation options and returns a dict of those
3607 Receives a dict of repo creation options and returns a dict of those
3599 options that we don't know how to handle.
3608 options that we don't know how to handle.
3600
3609
3601 This function is called as part of repository creation. If the
3610 This function is called as part of repository creation. If the
3602 returned dict contains any items, repository creation will not
3611 returned dict contains any items, repository creation will not
3603 be allowed, as it means there was a request to create a repository
3612 be allowed, as it means there was a request to create a repository
3604 with options not recognized by loaded code.
3613 with options not recognized by loaded code.
3605
3614
3606 Extensions can wrap this function to filter out creation options
3615 Extensions can wrap this function to filter out creation options
3607 they know how to handle.
3616 they know how to handle.
3608 """
3617 """
3609 known = {
3618 known = {
3610 b'backend',
3619 b'backend',
3611 b'lfs',
3620 b'lfs',
3612 b'narrowfiles',
3621 b'narrowfiles',
3613 b'sharedrepo',
3622 b'sharedrepo',
3614 b'sharedrelative',
3623 b'sharedrelative',
3615 b'shareditems',
3624 b'shareditems',
3616 b'shallowfilestore',
3625 b'shallowfilestore',
3617 }
3626 }
3618
3627
3619 return {k: v for k, v in createopts.items() if k not in known}
3628 return {k: v for k, v in createopts.items() if k not in known}
3620
3629
3621
3630
3622 def createrepository(ui, path, createopts=None):
3631 def createrepository(ui, path, createopts=None):
3623 """Create a new repository in a vfs.
3632 """Create a new repository in a vfs.
3624
3633
3625 ``path`` path to the new repo's working directory.
3634 ``path`` path to the new repo's working directory.
3626 ``createopts`` options for the new repository.
3635 ``createopts`` options for the new repository.
3627
3636
3628 The following keys for ``createopts`` are recognized:
3637 The following keys for ``createopts`` are recognized:
3629
3638
3630 backend
3639 backend
3631 The storage backend to use.
3640 The storage backend to use.
3632 lfs
3641 lfs
3633 Repository will be created with ``lfs`` requirement. The lfs extension
3642 Repository will be created with ``lfs`` requirement. The lfs extension
3634 will automatically be loaded when the repository is accessed.
3643 will automatically be loaded when the repository is accessed.
3635 narrowfiles
3644 narrowfiles
3636 Set up repository to support narrow file storage.
3645 Set up repository to support narrow file storage.
3637 sharedrepo
3646 sharedrepo
3638 Repository object from which storage should be shared.
3647 Repository object from which storage should be shared.
3639 sharedrelative
3648 sharedrelative
3640 Boolean indicating if the path to the shared repo should be
3649 Boolean indicating if the path to the shared repo should be
3641 stored as relative. By default, the pointer to the "parent" repo
3650 stored as relative. By default, the pointer to the "parent" repo
3642 is stored as an absolute path.
3651 is stored as an absolute path.
3643 shareditems
3652 shareditems
3644 Set of items to share to the new repository (in addition to storage).
3653 Set of items to share to the new repository (in addition to storage).
3645 shallowfilestore
3654 shallowfilestore
3646 Indicates that storage for files should be shallow (not all ancestor
3655 Indicates that storage for files should be shallow (not all ancestor
3647 revisions are known).
3656 revisions are known).
3648 """
3657 """
3649 createopts = defaultcreateopts(ui, createopts=createopts)
3658 createopts = defaultcreateopts(ui, createopts=createopts)
3650
3659
3651 unknownopts = filterknowncreateopts(ui, createopts)
3660 unknownopts = filterknowncreateopts(ui, createopts)
3652
3661
3653 if not isinstance(unknownopts, dict):
3662 if not isinstance(unknownopts, dict):
3654 raise error.ProgrammingError(
3663 raise error.ProgrammingError(
3655 b'filterknowncreateopts() did not return a dict'
3664 b'filterknowncreateopts() did not return a dict'
3656 )
3665 )
3657
3666
3658 if unknownopts:
3667 if unknownopts:
3659 raise error.Abort(
3668 raise error.Abort(
3660 _(
3669 _(
3661 b'unable to create repository because of unknown '
3670 b'unable to create repository because of unknown '
3662 b'creation option: %s'
3671 b'creation option: %s'
3663 )
3672 )
3664 % b', '.join(sorted(unknownopts)),
3673 % b', '.join(sorted(unknownopts)),
3665 hint=_(b'is a required extension not loaded?'),
3674 hint=_(b'is a required extension not loaded?'),
3666 )
3675 )
3667
3676
3668 requirements = newreporequirements(ui, createopts=createopts)
3677 requirements = newreporequirements(ui, createopts=createopts)
3669 requirements -= checkrequirementscompat(ui, requirements)
3678 requirements -= checkrequirementscompat(ui, requirements)
3670
3679
3671 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3680 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3672
3681
3673 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3682 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3674 if hgvfs.exists():
3683 if hgvfs.exists():
3675 raise error.RepoError(_(b'repository %s already exists') % path)
3684 raise error.RepoError(_(b'repository %s already exists') % path)
3676
3685
3677 if b'sharedrepo' in createopts:
3686 if b'sharedrepo' in createopts:
3678 sharedpath = createopts[b'sharedrepo'].sharedpath
3687 sharedpath = createopts[b'sharedrepo'].sharedpath
3679
3688
3680 if createopts.get(b'sharedrelative'):
3689 if createopts.get(b'sharedrelative'):
3681 try:
3690 try:
3682 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3691 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3683 sharedpath = util.pconvert(sharedpath)
3692 sharedpath = util.pconvert(sharedpath)
3684 except (IOError, ValueError) as e:
3693 except (IOError, ValueError) as e:
3685 # ValueError is raised on Windows if the drive letters differ
3694 # ValueError is raised on Windows if the drive letters differ
3686 # on each path.
3695 # on each path.
3687 raise error.Abort(
3696 raise error.Abort(
3688 _(b'cannot calculate relative path'),
3697 _(b'cannot calculate relative path'),
3689 hint=stringutil.forcebytestr(e),
3698 hint=stringutil.forcebytestr(e),
3690 )
3699 )
3691
3700
3692 if not wdirvfs.exists():
3701 if not wdirvfs.exists():
3693 wdirvfs.makedirs()
3702 wdirvfs.makedirs()
3694
3703
3695 hgvfs.makedir(notindexed=True)
3704 hgvfs.makedir(notindexed=True)
3696 if b'sharedrepo' not in createopts:
3705 if b'sharedrepo' not in createopts:
3697 hgvfs.mkdir(b'cache')
3706 hgvfs.mkdir(b'cache')
3698 hgvfs.mkdir(b'wcache')
3707 hgvfs.mkdir(b'wcache')
3699
3708
3700 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3709 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3701 if has_store and b'sharedrepo' not in createopts:
3710 if has_store and b'sharedrepo' not in createopts:
3702 hgvfs.mkdir(b'store')
3711 hgvfs.mkdir(b'store')
3703
3712
3704 # We create an invalid changelog outside the store so very old
3713 # We create an invalid changelog outside the store so very old
3705 # Mercurial versions (which didn't know about the requirements
3714 # Mercurial versions (which didn't know about the requirements
3706 # file) encounter an error on reading the changelog. This
3715 # file) encounter an error on reading the changelog. This
3707 # effectively locks out old clients and prevents them from
3716 # effectively locks out old clients and prevents them from
3708 # mucking with a repo in an unknown format.
3717 # mucking with a repo in an unknown format.
3709 #
3718 #
3710 # The revlog header has version 65535, which won't be recognized by
3719 # The revlog header has version 65535, which won't be recognized by
3711 # such old clients.
3720 # such old clients.
3712 hgvfs.append(
3721 hgvfs.append(
3713 b'00changelog.i',
3722 b'00changelog.i',
3714 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3723 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3715 b'layout',
3724 b'layout',
3716 )
3725 )
3717
3726
3718 # Filter the requirements into working copy and store ones
3727 # Filter the requirements into working copy and store ones
3719 wcreq, storereq = scmutil.filterrequirements(requirements)
3728 wcreq, storereq = scmutil.filterrequirements(requirements)
3720 # write working copy ones
3729 # write working copy ones
3721 scmutil.writerequires(hgvfs, wcreq)
3730 scmutil.writerequires(hgvfs, wcreq)
3722 # If there are store requirements and the current repository
3731 # If there are store requirements and the current repository
3723 # is not a shared one, write stored requirements
3732 # is not a shared one, write stored requirements
3724 # For new shared repository, we don't need to write the store
3733 # For new shared repository, we don't need to write the store
3725 # requirements as they are already present in store requires
3734 # requirements as they are already present in store requires
3726 if storereq and b'sharedrepo' not in createopts:
3735 if storereq and b'sharedrepo' not in createopts:
3727 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3736 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3728 scmutil.writerequires(storevfs, storereq)
3737 scmutil.writerequires(storevfs, storereq)
3729
3738
3730 # Write out file telling readers where to find the shared store.
3739 # Write out file telling readers where to find the shared store.
3731 if b'sharedrepo' in createopts:
3740 if b'sharedrepo' in createopts:
3732 hgvfs.write(b'sharedpath', sharedpath)
3741 hgvfs.write(b'sharedpath', sharedpath)
3733
3742
3734 if createopts.get(b'shareditems'):
3743 if createopts.get(b'shareditems'):
3735 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3744 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3736 hgvfs.write(b'shared', shared)
3745 hgvfs.write(b'shared', shared)
3737
3746
3738
3747
3739 def poisonrepository(repo):
3748 def poisonrepository(repo):
3740 """Poison a repository instance so it can no longer be used."""
3749 """Poison a repository instance so it can no longer be used."""
3741 # Perform any cleanup on the instance.
3750 # Perform any cleanup on the instance.
3742 repo.close()
3751 repo.close()
3743
3752
3744 # Our strategy is to replace the type of the object with one that
3753 # Our strategy is to replace the type of the object with one that
3745 # has all attribute lookups result in error.
3754 # has all attribute lookups result in error.
3746 #
3755 #
3747 # But we have to allow the close() method because some constructors
3756 # But we have to allow the close() method because some constructors
3748 # of repos call close() on repo references.
3757 # of repos call close() on repo references.
3749 class poisonedrepository(object):
3758 class poisonedrepository(object):
3750 def __getattribute__(self, item):
3759 def __getattribute__(self, item):
3751 if item == 'close':
3760 if item == 'close':
3752 return object.__getattribute__(self, item)
3761 return object.__getattribute__(self, item)
3753
3762
3754 raise error.ProgrammingError(
3763 raise error.ProgrammingError(
3755 b'repo instances should not be used after unshare'
3764 b'repo instances should not be used after unshare'
3756 )
3765 )
3757
3766
3758 def close(self):
3767 def close(self):
3759 pass
3768 pass
3760
3769
3761 # We may have a repoview, which intercepts __setattr__. So be sure
3770 # We may have a repoview, which intercepts __setattr__. So be sure
3762 # we operate at the lowest level possible.
3771 # we operate at the lowest level possible.
3763 object.__setattr__(repo, '__class__', poisonedrepository)
3772 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now