##// END OF EJS Templates
repository: introduce constant for sparse repo requirement and use it...
Pulkit Goyal -
r45914:a1f51c7d default
parent child Browse files
Show More
@@ -1,1975 +1,1978 b''
1 # repository.py - Interfaces and base classes for repositories and peers.
1 # repository.py - Interfaces and base classes for repositories and peers.
2 #
2 #
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from ..i18n import _
10 from ..i18n import _
11 from .. import error
11 from .. import error
12 from . import util as interfaceutil
12 from . import util as interfaceutil
13
13
14 # When narrowing is finalized and no longer subject to format changes,
14 # When narrowing is finalized and no longer subject to format changes,
15 # we should move this to just "narrow" or similar.
15 # we should move this to just "narrow" or similar.
16 NARROW_REQUIREMENT = b'narrowhg-experimental'
16 NARROW_REQUIREMENT = b'narrowhg-experimental'
17
17
18 # Enables sparse working directory usage
19 SPARSE_REQUIREMENT = b'exp-sparse'
20
18 # Local repository feature string.
21 # Local repository feature string.
19
22
20 # Revlogs are being used for file storage.
23 # Revlogs are being used for file storage.
21 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
24 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
22 # The storage part of the repository is shared from an external source.
25 # The storage part of the repository is shared from an external source.
23 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
26 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
24 # LFS supported for backing file storage.
27 # LFS supported for backing file storage.
25 REPO_FEATURE_LFS = b'lfs'
28 REPO_FEATURE_LFS = b'lfs'
26 # Repository supports being stream cloned.
29 # Repository supports being stream cloned.
27 REPO_FEATURE_STREAM_CLONE = b'streamclone'
30 REPO_FEATURE_STREAM_CLONE = b'streamclone'
28 # Files storage may lack data for all ancestors.
31 # Files storage may lack data for all ancestors.
29 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
32 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
30
33
31 REVISION_FLAG_CENSORED = 1 << 15
34 REVISION_FLAG_CENSORED = 1 << 15
32 REVISION_FLAG_ELLIPSIS = 1 << 14
35 REVISION_FLAG_ELLIPSIS = 1 << 14
33 REVISION_FLAG_EXTSTORED = 1 << 13
36 REVISION_FLAG_EXTSTORED = 1 << 13
34 REVISION_FLAG_SIDEDATA = 1 << 12
37 REVISION_FLAG_SIDEDATA = 1 << 12
35
38
36 REVISION_FLAGS_KNOWN = (
39 REVISION_FLAGS_KNOWN = (
37 REVISION_FLAG_CENSORED
40 REVISION_FLAG_CENSORED
38 | REVISION_FLAG_ELLIPSIS
41 | REVISION_FLAG_ELLIPSIS
39 | REVISION_FLAG_EXTSTORED
42 | REVISION_FLAG_EXTSTORED
40 | REVISION_FLAG_SIDEDATA
43 | REVISION_FLAG_SIDEDATA
41 )
44 )
42
45
43 CG_DELTAMODE_STD = b'default'
46 CG_DELTAMODE_STD = b'default'
44 CG_DELTAMODE_PREV = b'previous'
47 CG_DELTAMODE_PREV = b'previous'
45 CG_DELTAMODE_FULL = b'fulltext'
48 CG_DELTAMODE_FULL = b'fulltext'
46 CG_DELTAMODE_P1 = b'p1'
49 CG_DELTAMODE_P1 = b'p1'
47
50
48
51
49 class ipeerconnection(interfaceutil.Interface):
52 class ipeerconnection(interfaceutil.Interface):
50 """Represents a "connection" to a repository.
53 """Represents a "connection" to a repository.
51
54
52 This is the base interface for representing a connection to a repository.
55 This is the base interface for representing a connection to a repository.
53 It holds basic properties and methods applicable to all peer types.
56 It holds basic properties and methods applicable to all peer types.
54
57
55 This is not a complete interface definition and should not be used
58 This is not a complete interface definition and should not be used
56 outside of this module.
59 outside of this module.
57 """
60 """
58
61
59 ui = interfaceutil.Attribute("""ui.ui instance""")
62 ui = interfaceutil.Attribute("""ui.ui instance""")
60
63
61 def url():
64 def url():
62 """Returns a URL string representing this peer.
65 """Returns a URL string representing this peer.
63
66
64 Currently, implementations expose the raw URL used to construct the
67 Currently, implementations expose the raw URL used to construct the
65 instance. It may contain credentials as part of the URL. The
68 instance. It may contain credentials as part of the URL. The
66 expectations of the value aren't well-defined and this could lead to
69 expectations of the value aren't well-defined and this could lead to
67 data leakage.
70 data leakage.
68
71
69 TODO audit/clean consumers and more clearly define the contents of this
72 TODO audit/clean consumers and more clearly define the contents of this
70 value.
73 value.
71 """
74 """
72
75
73 def local():
76 def local():
74 """Returns a local repository instance.
77 """Returns a local repository instance.
75
78
76 If the peer represents a local repository, returns an object that
79 If the peer represents a local repository, returns an object that
77 can be used to interface with it. Otherwise returns ``None``.
80 can be used to interface with it. Otherwise returns ``None``.
78 """
81 """
79
82
80 def peer():
83 def peer():
81 """Returns an object conforming to this interface.
84 """Returns an object conforming to this interface.
82
85
83 Most implementations will ``return self``.
86 Most implementations will ``return self``.
84 """
87 """
85
88
86 def canpush():
89 def canpush():
87 """Returns a boolean indicating if this peer can be pushed to."""
90 """Returns a boolean indicating if this peer can be pushed to."""
88
91
89 def close():
92 def close():
90 """Close the connection to this peer.
93 """Close the connection to this peer.
91
94
92 This is called when the peer will no longer be used. Resources
95 This is called when the peer will no longer be used. Resources
93 associated with the peer should be cleaned up.
96 associated with the peer should be cleaned up.
94 """
97 """
95
98
96
99
97 class ipeercapabilities(interfaceutil.Interface):
100 class ipeercapabilities(interfaceutil.Interface):
98 """Peer sub-interface related to capabilities."""
101 """Peer sub-interface related to capabilities."""
99
102
100 def capable(name):
103 def capable(name):
101 """Determine support for a named capability.
104 """Determine support for a named capability.
102
105
103 Returns ``False`` if capability not supported.
106 Returns ``False`` if capability not supported.
104
107
105 Returns ``True`` if boolean capability is supported. Returns a string
108 Returns ``True`` if boolean capability is supported. Returns a string
106 if capability support is non-boolean.
109 if capability support is non-boolean.
107
110
108 Capability strings may or may not map to wire protocol capabilities.
111 Capability strings may or may not map to wire protocol capabilities.
109 """
112 """
110
113
111 def requirecap(name, purpose):
114 def requirecap(name, purpose):
112 """Require a capability to be present.
115 """Require a capability to be present.
113
116
114 Raises a ``CapabilityError`` if the capability isn't present.
117 Raises a ``CapabilityError`` if the capability isn't present.
115 """
118 """
116
119
117
120
118 class ipeercommands(interfaceutil.Interface):
121 class ipeercommands(interfaceutil.Interface):
119 """Client-side interface for communicating over the wire protocol.
122 """Client-side interface for communicating over the wire protocol.
120
123
121 This interface is used as a gateway to the Mercurial wire protocol.
124 This interface is used as a gateway to the Mercurial wire protocol.
122 methods commonly call wire protocol commands of the same name.
125 methods commonly call wire protocol commands of the same name.
123 """
126 """
124
127
125 def branchmap():
128 def branchmap():
126 """Obtain heads in named branches.
129 """Obtain heads in named branches.
127
130
128 Returns a dict mapping branch name to an iterable of nodes that are
131 Returns a dict mapping branch name to an iterable of nodes that are
129 heads on that branch.
132 heads on that branch.
130 """
133 """
131
134
132 def capabilities():
135 def capabilities():
133 """Obtain capabilities of the peer.
136 """Obtain capabilities of the peer.
134
137
135 Returns a set of string capabilities.
138 Returns a set of string capabilities.
136 """
139 """
137
140
138 def clonebundles():
141 def clonebundles():
139 """Obtains the clone bundles manifest for the repo.
142 """Obtains the clone bundles manifest for the repo.
140
143
141 Returns the manifest as unparsed bytes.
144 Returns the manifest as unparsed bytes.
142 """
145 """
143
146
144 def debugwireargs(one, two, three=None, four=None, five=None):
147 def debugwireargs(one, two, three=None, four=None, five=None):
145 """Used to facilitate debugging of arguments passed over the wire."""
148 """Used to facilitate debugging of arguments passed over the wire."""
146
149
147 def getbundle(source, **kwargs):
150 def getbundle(source, **kwargs):
148 """Obtain remote repository data as a bundle.
151 """Obtain remote repository data as a bundle.
149
152
150 This command is how the bulk of repository data is transferred from
153 This command is how the bulk of repository data is transferred from
151 the peer to the local repository
154 the peer to the local repository
152
155
153 Returns a generator of bundle data.
156 Returns a generator of bundle data.
154 """
157 """
155
158
156 def heads():
159 def heads():
157 """Determine all known head revisions in the peer.
160 """Determine all known head revisions in the peer.
158
161
159 Returns an iterable of binary nodes.
162 Returns an iterable of binary nodes.
160 """
163 """
161
164
162 def known(nodes):
165 def known(nodes):
163 """Determine whether multiple nodes are known.
166 """Determine whether multiple nodes are known.
164
167
165 Accepts an iterable of nodes whose presence to check for.
168 Accepts an iterable of nodes whose presence to check for.
166
169
167 Returns an iterable of booleans indicating of the corresponding node
170 Returns an iterable of booleans indicating of the corresponding node
168 at that index is known to the peer.
171 at that index is known to the peer.
169 """
172 """
170
173
171 def listkeys(namespace):
174 def listkeys(namespace):
172 """Obtain all keys in a pushkey namespace.
175 """Obtain all keys in a pushkey namespace.
173
176
174 Returns an iterable of key names.
177 Returns an iterable of key names.
175 """
178 """
176
179
177 def lookup(key):
180 def lookup(key):
178 """Resolve a value to a known revision.
181 """Resolve a value to a known revision.
179
182
180 Returns a binary node of the resolved revision on success.
183 Returns a binary node of the resolved revision on success.
181 """
184 """
182
185
183 def pushkey(namespace, key, old, new):
186 def pushkey(namespace, key, old, new):
184 """Set a value using the ``pushkey`` protocol.
187 """Set a value using the ``pushkey`` protocol.
185
188
186 Arguments correspond to the pushkey namespace and key to operate on and
189 Arguments correspond to the pushkey namespace and key to operate on and
187 the old and new values for that key.
190 the old and new values for that key.
188
191
189 Returns a string with the peer result. The value inside varies by the
192 Returns a string with the peer result. The value inside varies by the
190 namespace.
193 namespace.
191 """
194 """
192
195
193 def stream_out():
196 def stream_out():
194 """Obtain streaming clone data.
197 """Obtain streaming clone data.
195
198
196 Successful result should be a generator of data chunks.
199 Successful result should be a generator of data chunks.
197 """
200 """
198
201
199 def unbundle(bundle, heads, url):
202 def unbundle(bundle, heads, url):
200 """Transfer repository data to the peer.
203 """Transfer repository data to the peer.
201
204
202 This is how the bulk of data during a push is transferred.
205 This is how the bulk of data during a push is transferred.
203
206
204 Returns the integer number of heads added to the peer.
207 Returns the integer number of heads added to the peer.
205 """
208 """
206
209
207
210
208 class ipeerlegacycommands(interfaceutil.Interface):
211 class ipeerlegacycommands(interfaceutil.Interface):
209 """Interface for implementing support for legacy wire protocol commands.
212 """Interface for implementing support for legacy wire protocol commands.
210
213
211 Wire protocol commands transition to legacy status when they are no longer
214 Wire protocol commands transition to legacy status when they are no longer
212 used by modern clients. To facilitate identifying which commands are
215 used by modern clients. To facilitate identifying which commands are
213 legacy, the interfaces are split.
216 legacy, the interfaces are split.
214 """
217 """
215
218
216 def between(pairs):
219 def between(pairs):
217 """Obtain nodes between pairs of nodes.
220 """Obtain nodes between pairs of nodes.
218
221
219 ``pairs`` is an iterable of node pairs.
222 ``pairs`` is an iterable of node pairs.
220
223
221 Returns an iterable of iterables of nodes corresponding to each
224 Returns an iterable of iterables of nodes corresponding to each
222 requested pair.
225 requested pair.
223 """
226 """
224
227
225 def branches(nodes):
228 def branches(nodes):
226 """Obtain ancestor changesets of specific nodes back to a branch point.
229 """Obtain ancestor changesets of specific nodes back to a branch point.
227
230
228 For each requested node, the peer finds the first ancestor node that is
231 For each requested node, the peer finds the first ancestor node that is
229 a DAG root or is a merge.
232 a DAG root or is a merge.
230
233
231 Returns an iterable of iterables with the resolved values for each node.
234 Returns an iterable of iterables with the resolved values for each node.
232 """
235 """
233
236
234 def changegroup(nodes, source):
237 def changegroup(nodes, source):
235 """Obtain a changegroup with data for descendants of specified nodes."""
238 """Obtain a changegroup with data for descendants of specified nodes."""
236
239
237 def changegroupsubset(bases, heads, source):
240 def changegroupsubset(bases, heads, source):
238 pass
241 pass
239
242
240
243
241 class ipeercommandexecutor(interfaceutil.Interface):
244 class ipeercommandexecutor(interfaceutil.Interface):
242 """Represents a mechanism to execute remote commands.
245 """Represents a mechanism to execute remote commands.
243
246
244 This is the primary interface for requesting that wire protocol commands
247 This is the primary interface for requesting that wire protocol commands
245 be executed. Instances of this interface are active in a context manager
248 be executed. Instances of this interface are active in a context manager
246 and have a well-defined lifetime. When the context manager exits, all
249 and have a well-defined lifetime. When the context manager exits, all
247 outstanding requests are waited on.
250 outstanding requests are waited on.
248 """
251 """
249
252
250 def callcommand(name, args):
253 def callcommand(name, args):
251 """Request that a named command be executed.
254 """Request that a named command be executed.
252
255
253 Receives the command name and a dictionary of command arguments.
256 Receives the command name and a dictionary of command arguments.
254
257
255 Returns a ``concurrent.futures.Future`` that will resolve to the
258 Returns a ``concurrent.futures.Future`` that will resolve to the
256 result of that command request. That exact value is left up to
259 result of that command request. That exact value is left up to
257 the implementation and possibly varies by command.
260 the implementation and possibly varies by command.
258
261
259 Not all commands can coexist with other commands in an executor
262 Not all commands can coexist with other commands in an executor
260 instance: it depends on the underlying wire protocol transport being
263 instance: it depends on the underlying wire protocol transport being
261 used and the command itself.
264 used and the command itself.
262
265
263 Implementations MAY call ``sendcommands()`` automatically if the
266 Implementations MAY call ``sendcommands()`` automatically if the
264 requested command can not coexist with other commands in this executor.
267 requested command can not coexist with other commands in this executor.
265
268
266 Implementations MAY call ``sendcommands()`` automatically when the
269 Implementations MAY call ``sendcommands()`` automatically when the
267 future's ``result()`` is called. So, consumers using multiple
270 future's ``result()`` is called. So, consumers using multiple
268 commands with an executor MUST ensure that ``result()`` is not called
271 commands with an executor MUST ensure that ``result()`` is not called
269 until all command requests have been issued.
272 until all command requests have been issued.
270 """
273 """
271
274
272 def sendcommands():
275 def sendcommands():
273 """Trigger submission of queued command requests.
276 """Trigger submission of queued command requests.
274
277
275 Not all transports submit commands as soon as they are requested to
278 Not all transports submit commands as soon as they are requested to
276 run. When called, this method forces queued command requests to be
279 run. When called, this method forces queued command requests to be
277 issued. It will no-op if all commands have already been sent.
280 issued. It will no-op if all commands have already been sent.
278
281
279 When called, no more new commands may be issued with this executor.
282 When called, no more new commands may be issued with this executor.
280 """
283 """
281
284
282 def close():
285 def close():
283 """Signal that this command request is finished.
286 """Signal that this command request is finished.
284
287
285 When called, no more new commands may be issued. All outstanding
288 When called, no more new commands may be issued. All outstanding
286 commands that have previously been issued are waited on before
289 commands that have previously been issued are waited on before
287 returning. This not only includes waiting for the futures to resolve,
290 returning. This not only includes waiting for the futures to resolve,
288 but also waiting for all response data to arrive. In other words,
291 but also waiting for all response data to arrive. In other words,
289 calling this waits for all on-wire state for issued command requests
292 calling this waits for all on-wire state for issued command requests
290 to finish.
293 to finish.
291
294
292 When used as a context manager, this method is called when exiting the
295 When used as a context manager, this method is called when exiting the
293 context manager.
296 context manager.
294
297
295 This method may call ``sendcommands()`` if there are buffered commands.
298 This method may call ``sendcommands()`` if there are buffered commands.
296 """
299 """
297
300
298
301
299 class ipeerrequests(interfaceutil.Interface):
302 class ipeerrequests(interfaceutil.Interface):
300 """Interface for executing commands on a peer."""
303 """Interface for executing commands on a peer."""
301
304
302 limitedarguments = interfaceutil.Attribute(
305 limitedarguments = interfaceutil.Attribute(
303 """True if the peer cannot receive large argument value for commands."""
306 """True if the peer cannot receive large argument value for commands."""
304 )
307 )
305
308
306 def commandexecutor():
309 def commandexecutor():
307 """A context manager that resolves to an ipeercommandexecutor.
310 """A context manager that resolves to an ipeercommandexecutor.
308
311
309 The object this resolves to can be used to issue command requests
312 The object this resolves to can be used to issue command requests
310 to the peer.
313 to the peer.
311
314
312 Callers should call its ``callcommand`` method to issue command
315 Callers should call its ``callcommand`` method to issue command
313 requests.
316 requests.
314
317
315 A new executor should be obtained for each distinct set of commands
318 A new executor should be obtained for each distinct set of commands
316 (possibly just a single command) that the consumer wants to execute
319 (possibly just a single command) that the consumer wants to execute
317 as part of a single operation or round trip. This is because some
320 as part of a single operation or round trip. This is because some
318 peers are half-duplex and/or don't support persistent connections.
321 peers are half-duplex and/or don't support persistent connections.
319 e.g. in the case of HTTP peers, commands sent to an executor represent
322 e.g. in the case of HTTP peers, commands sent to an executor represent
320 a single HTTP request. While some peers may support multiple command
323 a single HTTP request. While some peers may support multiple command
321 sends over the wire per executor, consumers need to code to the least
324 sends over the wire per executor, consumers need to code to the least
322 capable peer. So it should be assumed that command executors buffer
325 capable peer. So it should be assumed that command executors buffer
323 called commands until they are told to send them and that each
326 called commands until they are told to send them and that each
324 command executor could result in a new connection or wire-level request
327 command executor could result in a new connection or wire-level request
325 being issued.
328 being issued.
326 """
329 """
327
330
328
331
329 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
332 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
330 """Unified interface for peer repositories.
333 """Unified interface for peer repositories.
331
334
332 All peer instances must conform to this interface.
335 All peer instances must conform to this interface.
333 """
336 """
334
337
335
338
336 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
339 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
337 """Unified peer interface for wire protocol version 2 peers."""
340 """Unified peer interface for wire protocol version 2 peers."""
338
341
339 apidescriptor = interfaceutil.Attribute(
342 apidescriptor = interfaceutil.Attribute(
340 """Data structure holding description of server API."""
343 """Data structure holding description of server API."""
341 )
344 )
342
345
343
346
344 @interfaceutil.implementer(ipeerbase)
347 @interfaceutil.implementer(ipeerbase)
345 class peer(object):
348 class peer(object):
346 """Base class for peer repositories."""
349 """Base class for peer repositories."""
347
350
348 limitedarguments = False
351 limitedarguments = False
349
352
350 def capable(self, name):
353 def capable(self, name):
351 caps = self.capabilities()
354 caps = self.capabilities()
352 if name in caps:
355 if name in caps:
353 return True
356 return True
354
357
355 name = b'%s=' % name
358 name = b'%s=' % name
356 for cap in caps:
359 for cap in caps:
357 if cap.startswith(name):
360 if cap.startswith(name):
358 return cap[len(name) :]
361 return cap[len(name) :]
359
362
360 return False
363 return False
361
364
362 def requirecap(self, name, purpose):
365 def requirecap(self, name, purpose):
363 if self.capable(name):
366 if self.capable(name):
364 return
367 return
365
368
366 raise error.CapabilityError(
369 raise error.CapabilityError(
367 _(
370 _(
368 b'cannot %s; remote repository does not support the '
371 b'cannot %s; remote repository does not support the '
369 b'\'%s\' capability'
372 b'\'%s\' capability'
370 )
373 )
371 % (purpose, name)
374 % (purpose, name)
372 )
375 )
373
376
374
377
375 class iverifyproblem(interfaceutil.Interface):
378 class iverifyproblem(interfaceutil.Interface):
376 """Represents a problem with the integrity of the repository.
379 """Represents a problem with the integrity of the repository.
377
380
378 Instances of this interface are emitted to describe an integrity issue
381 Instances of this interface are emitted to describe an integrity issue
379 with a repository (e.g. corrupt storage, missing data, etc).
382 with a repository (e.g. corrupt storage, missing data, etc).
380
383
381 Instances are essentially messages associated with severity.
384 Instances are essentially messages associated with severity.
382 """
385 """
383
386
384 warning = interfaceutil.Attribute(
387 warning = interfaceutil.Attribute(
385 """Message indicating a non-fatal problem."""
388 """Message indicating a non-fatal problem."""
386 )
389 )
387
390
388 error = interfaceutil.Attribute("""Message indicating a fatal problem.""")
391 error = interfaceutil.Attribute("""Message indicating a fatal problem.""")
389
392
390 node = interfaceutil.Attribute(
393 node = interfaceutil.Attribute(
391 """Revision encountering the problem.
394 """Revision encountering the problem.
392
395
393 ``None`` means the problem doesn't apply to a single revision.
396 ``None`` means the problem doesn't apply to a single revision.
394 """
397 """
395 )
398 )
396
399
397
400
398 class irevisiondelta(interfaceutil.Interface):
401 class irevisiondelta(interfaceutil.Interface):
399 """Represents a delta between one revision and another.
402 """Represents a delta between one revision and another.
400
403
401 Instances convey enough information to allow a revision to be exchanged
404 Instances convey enough information to allow a revision to be exchanged
402 with another repository.
405 with another repository.
403
406
404 Instances represent the fulltext revision data or a delta against
407 Instances represent the fulltext revision data or a delta against
405 another revision. Therefore the ``revision`` and ``delta`` attributes
408 another revision. Therefore the ``revision`` and ``delta`` attributes
406 are mutually exclusive.
409 are mutually exclusive.
407
410
408 Typically used for changegroup generation.
411 Typically used for changegroup generation.
409 """
412 """
410
413
411 node = interfaceutil.Attribute("""20 byte node of this revision.""")
414 node = interfaceutil.Attribute("""20 byte node of this revision.""")
412
415
413 p1node = interfaceutil.Attribute(
416 p1node = interfaceutil.Attribute(
414 """20 byte node of 1st parent of this revision."""
417 """20 byte node of 1st parent of this revision."""
415 )
418 )
416
419
417 p2node = interfaceutil.Attribute(
420 p2node = interfaceutil.Attribute(
418 """20 byte node of 2nd parent of this revision."""
421 """20 byte node of 2nd parent of this revision."""
419 )
422 )
420
423
421 linknode = interfaceutil.Attribute(
424 linknode = interfaceutil.Attribute(
422 """20 byte node of the changelog revision this node is linked to."""
425 """20 byte node of the changelog revision this node is linked to."""
423 )
426 )
424
427
425 flags = interfaceutil.Attribute(
428 flags = interfaceutil.Attribute(
426 """2 bytes of integer flags that apply to this revision.
429 """2 bytes of integer flags that apply to this revision.
427
430
428 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
431 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
429 """
432 """
430 )
433 )
431
434
432 basenode = interfaceutil.Attribute(
435 basenode = interfaceutil.Attribute(
433 """20 byte node of the revision this data is a delta against.
436 """20 byte node of the revision this data is a delta against.
434
437
435 ``nullid`` indicates that the revision is a full revision and not
438 ``nullid`` indicates that the revision is a full revision and not
436 a delta.
439 a delta.
437 """
440 """
438 )
441 )
439
442
440 baserevisionsize = interfaceutil.Attribute(
443 baserevisionsize = interfaceutil.Attribute(
441 """Size of base revision this delta is against.
444 """Size of base revision this delta is against.
442
445
443 May be ``None`` if ``basenode`` is ``nullid``.
446 May be ``None`` if ``basenode`` is ``nullid``.
444 """
447 """
445 )
448 )
446
449
447 revision = interfaceutil.Attribute(
450 revision = interfaceutil.Attribute(
448 """Raw fulltext of revision data for this node."""
451 """Raw fulltext of revision data for this node."""
449 )
452 )
450
453
451 delta = interfaceutil.Attribute(
454 delta = interfaceutil.Attribute(
452 """Delta between ``basenode`` and ``node``.
455 """Delta between ``basenode`` and ``node``.
453
456
454 Stored in the bdiff delta format.
457 Stored in the bdiff delta format.
455 """
458 """
456 )
459 )
457
460
458
461
459 class ifilerevisionssequence(interfaceutil.Interface):
462 class ifilerevisionssequence(interfaceutil.Interface):
460 """Contains index data for all revisions of a file.
463 """Contains index data for all revisions of a file.
461
464
462 Types implementing this behave like lists of tuples. The index
465 Types implementing this behave like lists of tuples. The index
463 in the list corresponds to the revision number. The values contain
466 in the list corresponds to the revision number. The values contain
464 index metadata.
467 index metadata.
465
468
466 The *null* revision (revision number -1) is always the last item
469 The *null* revision (revision number -1) is always the last item
467 in the index.
470 in the index.
468 """
471 """
469
472
470 def __len__():
473 def __len__():
471 """The total number of revisions."""
474 """The total number of revisions."""
472
475
473 def __getitem__(rev):
476 def __getitem__(rev):
474 """Returns the object having a specific revision number.
477 """Returns the object having a specific revision number.
475
478
476 Returns an 8-tuple with the following fields:
479 Returns an 8-tuple with the following fields:
477
480
478 offset+flags
481 offset+flags
479 Contains the offset and flags for the revision. 64-bit unsigned
482 Contains the offset and flags for the revision. 64-bit unsigned
480 integer where first 6 bytes are the offset and the next 2 bytes
483 integer where first 6 bytes are the offset and the next 2 bytes
481 are flags. The offset can be 0 if it is not used by the store.
484 are flags. The offset can be 0 if it is not used by the store.
482 compressed size
485 compressed size
483 Size of the revision data in the store. It can be 0 if it isn't
486 Size of the revision data in the store. It can be 0 if it isn't
484 needed by the store.
487 needed by the store.
485 uncompressed size
488 uncompressed size
486 Fulltext size. It can be 0 if it isn't needed by the store.
489 Fulltext size. It can be 0 if it isn't needed by the store.
487 base revision
490 base revision
488 Revision number of revision the delta for storage is encoded
491 Revision number of revision the delta for storage is encoded
489 against. -1 indicates not encoded against a base revision.
492 against. -1 indicates not encoded against a base revision.
490 link revision
493 link revision
491 Revision number of changelog revision this entry is related to.
494 Revision number of changelog revision this entry is related to.
492 p1 revision
495 p1 revision
493 Revision number of 1st parent. -1 if no 1st parent.
496 Revision number of 1st parent. -1 if no 1st parent.
494 p2 revision
497 p2 revision
495 Revision number of 2nd parent. -1 if no 1st parent.
498 Revision number of 2nd parent. -1 if no 1st parent.
496 node
499 node
497 Binary node value for this revision number.
500 Binary node value for this revision number.
498
501
499 Negative values should index off the end of the sequence. ``-1``
502 Negative values should index off the end of the sequence. ``-1``
500 should return the null revision. ``-2`` should return the most
503 should return the null revision. ``-2`` should return the most
501 recent revision.
504 recent revision.
502 """
505 """
503
506
504 def __contains__(rev):
507 def __contains__(rev):
505 """Whether a revision number exists."""
508 """Whether a revision number exists."""
506
509
507 def insert(self, i, entry):
510 def insert(self, i, entry):
508 """Add an item to the index at specific revision."""
511 """Add an item to the index at specific revision."""
509
512
510
513
511 class ifileindex(interfaceutil.Interface):
514 class ifileindex(interfaceutil.Interface):
512 """Storage interface for index data of a single file.
515 """Storage interface for index data of a single file.
513
516
514 File storage data is divided into index metadata and data storage.
517 File storage data is divided into index metadata and data storage.
515 This interface defines the index portion of the interface.
518 This interface defines the index portion of the interface.
516
519
517 The index logically consists of:
520 The index logically consists of:
518
521
519 * A mapping between revision numbers and nodes.
522 * A mapping between revision numbers and nodes.
520 * DAG data (storing and querying the relationship between nodes).
523 * DAG data (storing and querying the relationship between nodes).
521 * Metadata to facilitate storage.
524 * Metadata to facilitate storage.
522 """
525 """
523
526
524 def __len__():
527 def __len__():
525 """Obtain the number of revisions stored for this file."""
528 """Obtain the number of revisions stored for this file."""
526
529
527 def __iter__():
530 def __iter__():
528 """Iterate over revision numbers for this file."""
531 """Iterate over revision numbers for this file."""
529
532
530 def hasnode(node):
533 def hasnode(node):
531 """Returns a bool indicating if a node is known to this store.
534 """Returns a bool indicating if a node is known to this store.
532
535
533 Implementations must only return True for full, binary node values:
536 Implementations must only return True for full, binary node values:
534 hex nodes, revision numbers, and partial node matches must be
537 hex nodes, revision numbers, and partial node matches must be
535 rejected.
538 rejected.
536
539
537 The null node is never present.
540 The null node is never present.
538 """
541 """
539
542
540 def revs(start=0, stop=None):
543 def revs(start=0, stop=None):
541 """Iterate over revision numbers for this file, with control."""
544 """Iterate over revision numbers for this file, with control."""
542
545
543 def parents(node):
546 def parents(node):
544 """Returns a 2-tuple of parent nodes for a revision.
547 """Returns a 2-tuple of parent nodes for a revision.
545
548
546 Values will be ``nullid`` if the parent is empty.
549 Values will be ``nullid`` if the parent is empty.
547 """
550 """
548
551
549 def parentrevs(rev):
552 def parentrevs(rev):
550 """Like parents() but operates on revision numbers."""
553 """Like parents() but operates on revision numbers."""
551
554
552 def rev(node):
555 def rev(node):
553 """Obtain the revision number given a node.
556 """Obtain the revision number given a node.
554
557
555 Raises ``error.LookupError`` if the node is not known.
558 Raises ``error.LookupError`` if the node is not known.
556 """
559 """
557
560
558 def node(rev):
561 def node(rev):
559 """Obtain the node value given a revision number.
562 """Obtain the node value given a revision number.
560
563
561 Raises ``IndexError`` if the node is not known.
564 Raises ``IndexError`` if the node is not known.
562 """
565 """
563
566
564 def lookup(node):
567 def lookup(node):
565 """Attempt to resolve a value to a node.
568 """Attempt to resolve a value to a node.
566
569
567 Value can be a binary node, hex node, revision number, or a string
570 Value can be a binary node, hex node, revision number, or a string
568 that can be converted to an integer.
571 that can be converted to an integer.
569
572
570 Raises ``error.LookupError`` if a node could not be resolved.
573 Raises ``error.LookupError`` if a node could not be resolved.
571 """
574 """
572
575
573 def linkrev(rev):
576 def linkrev(rev):
574 """Obtain the changeset revision number a revision is linked to."""
577 """Obtain the changeset revision number a revision is linked to."""
575
578
576 def iscensored(rev):
579 def iscensored(rev):
577 """Return whether a revision's content has been censored."""
580 """Return whether a revision's content has been censored."""
578
581
579 def commonancestorsheads(node1, node2):
582 def commonancestorsheads(node1, node2):
580 """Obtain an iterable of nodes containing heads of common ancestors.
583 """Obtain an iterable of nodes containing heads of common ancestors.
581
584
582 See ``ancestor.commonancestorsheads()``.
585 See ``ancestor.commonancestorsheads()``.
583 """
586 """
584
587
585 def descendants(revs):
588 def descendants(revs):
586 """Obtain descendant revision numbers for a set of revision numbers.
589 """Obtain descendant revision numbers for a set of revision numbers.
587
590
588 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
591 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
589 """
592 """
590
593
591 def heads(start=None, stop=None):
594 def heads(start=None, stop=None):
592 """Obtain a list of nodes that are DAG heads, with control.
595 """Obtain a list of nodes that are DAG heads, with control.
593
596
594 The set of revisions examined can be limited by specifying
597 The set of revisions examined can be limited by specifying
595 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
598 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
596 iterable of nodes. DAG traversal starts at earlier revision
599 iterable of nodes. DAG traversal starts at earlier revision
597 ``start`` and iterates forward until any node in ``stop`` is
600 ``start`` and iterates forward until any node in ``stop`` is
598 encountered.
601 encountered.
599 """
602 """
600
603
601 def children(node):
604 def children(node):
602 """Obtain nodes that are children of a node.
605 """Obtain nodes that are children of a node.
603
606
604 Returns a list of nodes.
607 Returns a list of nodes.
605 """
608 """
606
609
607
610
608 class ifiledata(interfaceutil.Interface):
611 class ifiledata(interfaceutil.Interface):
609 """Storage interface for data storage of a specific file.
612 """Storage interface for data storage of a specific file.
610
613
611 This complements ``ifileindex`` and provides an interface for accessing
614 This complements ``ifileindex`` and provides an interface for accessing
612 data for a tracked file.
615 data for a tracked file.
613 """
616 """
614
617
615 def size(rev):
618 def size(rev):
616 """Obtain the fulltext size of file data.
619 """Obtain the fulltext size of file data.
617
620
618 Any metadata is excluded from size measurements.
621 Any metadata is excluded from size measurements.
619 """
622 """
620
623
621 def revision(node, raw=False):
624 def revision(node, raw=False):
622 """"Obtain fulltext data for a node.
625 """"Obtain fulltext data for a node.
623
626
624 By default, any storage transformations are applied before the data
627 By default, any storage transformations are applied before the data
625 is returned. If ``raw`` is True, non-raw storage transformations
628 is returned. If ``raw`` is True, non-raw storage transformations
626 are not applied.
629 are not applied.
627
630
628 The fulltext data may contain a header containing metadata. Most
631 The fulltext data may contain a header containing metadata. Most
629 consumers should use ``read()`` to obtain the actual file data.
632 consumers should use ``read()`` to obtain the actual file data.
630 """
633 """
631
634
632 def rawdata(node):
635 def rawdata(node):
633 """Obtain raw data for a node.
636 """Obtain raw data for a node.
634 """
637 """
635
638
636 def read(node):
639 def read(node):
637 """Resolve file fulltext data.
640 """Resolve file fulltext data.
638
641
639 This is similar to ``revision()`` except any metadata in the data
642 This is similar to ``revision()`` except any metadata in the data
640 headers is stripped.
643 headers is stripped.
641 """
644 """
642
645
643 def renamed(node):
646 def renamed(node):
644 """Obtain copy metadata for a node.
647 """Obtain copy metadata for a node.
645
648
646 Returns ``False`` if no copy metadata is stored or a 2-tuple of
649 Returns ``False`` if no copy metadata is stored or a 2-tuple of
647 (path, node) from which this revision was copied.
650 (path, node) from which this revision was copied.
648 """
651 """
649
652
650 def cmp(node, fulltext):
653 def cmp(node, fulltext):
651 """Compare fulltext to another revision.
654 """Compare fulltext to another revision.
652
655
653 Returns True if the fulltext is different from what is stored.
656 Returns True if the fulltext is different from what is stored.
654
657
655 This takes copy metadata into account.
658 This takes copy metadata into account.
656
659
657 TODO better document the copy metadata and censoring logic.
660 TODO better document the copy metadata and censoring logic.
658 """
661 """
659
662
660 def emitrevisions(
663 def emitrevisions(
661 nodes,
664 nodes,
662 nodesorder=None,
665 nodesorder=None,
663 revisiondata=False,
666 revisiondata=False,
664 assumehaveparentrevisions=False,
667 assumehaveparentrevisions=False,
665 deltamode=CG_DELTAMODE_STD,
668 deltamode=CG_DELTAMODE_STD,
666 ):
669 ):
667 """Produce ``irevisiondelta`` for revisions.
670 """Produce ``irevisiondelta`` for revisions.
668
671
669 Given an iterable of nodes, emits objects conforming to the
672 Given an iterable of nodes, emits objects conforming to the
670 ``irevisiondelta`` interface that describe revisions in storage.
673 ``irevisiondelta`` interface that describe revisions in storage.
671
674
672 This method is a generator.
675 This method is a generator.
673
676
674 The input nodes may be unordered. Implementations must ensure that a
677 The input nodes may be unordered. Implementations must ensure that a
675 node's parents are emitted before the node itself. Transitively, this
678 node's parents are emitted before the node itself. Transitively, this
676 means that a node may only be emitted once all its ancestors in
679 means that a node may only be emitted once all its ancestors in
677 ``nodes`` have also been emitted.
680 ``nodes`` have also been emitted.
678
681
679 By default, emits "index" data (the ``node``, ``p1node``, and
682 By default, emits "index" data (the ``node``, ``p1node``, and
680 ``p2node`` attributes). If ``revisiondata`` is set, revision data
683 ``p2node`` attributes). If ``revisiondata`` is set, revision data
681 will also be present on the emitted objects.
684 will also be present on the emitted objects.
682
685
683 With default argument values, implementations can choose to emit
686 With default argument values, implementations can choose to emit
684 either fulltext revision data or a delta. When emitting deltas,
687 either fulltext revision data or a delta. When emitting deltas,
685 implementations must consider whether the delta's base revision
688 implementations must consider whether the delta's base revision
686 fulltext is available to the receiver.
689 fulltext is available to the receiver.
687
690
688 The base revision fulltext is guaranteed to be available if any of
691 The base revision fulltext is guaranteed to be available if any of
689 the following are met:
692 the following are met:
690
693
691 * Its fulltext revision was emitted by this method call.
694 * Its fulltext revision was emitted by this method call.
692 * A delta for that revision was emitted by this method call.
695 * A delta for that revision was emitted by this method call.
693 * ``assumehaveparentrevisions`` is True and the base revision is a
696 * ``assumehaveparentrevisions`` is True and the base revision is a
694 parent of the node.
697 parent of the node.
695
698
696 ``nodesorder`` can be used to control the order that revisions are
699 ``nodesorder`` can be used to control the order that revisions are
697 emitted. By default, revisions can be reordered as long as they are
700 emitted. By default, revisions can be reordered as long as they are
698 in DAG topological order (see above). If the value is ``nodes``,
701 in DAG topological order (see above). If the value is ``nodes``,
699 the iteration order from ``nodes`` should be used. If the value is
702 the iteration order from ``nodes`` should be used. If the value is
700 ``storage``, then the native order from the backing storage layer
703 ``storage``, then the native order from the backing storage layer
701 is used. (Not all storage layers will have strong ordering and behavior
704 is used. (Not all storage layers will have strong ordering and behavior
702 of this mode is storage-dependent.) ``nodes`` ordering can force
705 of this mode is storage-dependent.) ``nodes`` ordering can force
703 revisions to be emitted before their ancestors, so consumers should
706 revisions to be emitted before their ancestors, so consumers should
704 use it with care.
707 use it with care.
705
708
706 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
709 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
707 be set and it is the caller's responsibility to resolve it, if needed.
710 be set and it is the caller's responsibility to resolve it, if needed.
708
711
709 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
712 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
710 all revision data should be emitted as deltas against the revision
713 all revision data should be emitted as deltas against the revision
711 emitted just prior. The initial revision should be a delta against its
714 emitted just prior. The initial revision should be a delta against its
712 1st parent.
715 1st parent.
713 """
716 """
714
717
715
718
716 class ifilemutation(interfaceutil.Interface):
719 class ifilemutation(interfaceutil.Interface):
717 """Storage interface for mutation events of a tracked file."""
720 """Storage interface for mutation events of a tracked file."""
718
721
719 def add(filedata, meta, transaction, linkrev, p1, p2):
722 def add(filedata, meta, transaction, linkrev, p1, p2):
720 """Add a new revision to the store.
723 """Add a new revision to the store.
721
724
722 Takes file data, dictionary of metadata, a transaction, linkrev,
725 Takes file data, dictionary of metadata, a transaction, linkrev,
723 and parent nodes.
726 and parent nodes.
724
727
725 Returns the node that was added.
728 Returns the node that was added.
726
729
727 May no-op if a revision matching the supplied data is already stored.
730 May no-op if a revision matching the supplied data is already stored.
728 """
731 """
729
732
730 def addrevision(
733 def addrevision(
731 revisiondata,
734 revisiondata,
732 transaction,
735 transaction,
733 linkrev,
736 linkrev,
734 p1,
737 p1,
735 p2,
738 p2,
736 node=None,
739 node=None,
737 flags=0,
740 flags=0,
738 cachedelta=None,
741 cachedelta=None,
739 ):
742 ):
740 """Add a new revision to the store.
743 """Add a new revision to the store.
741
744
742 This is similar to ``add()`` except it operates at a lower level.
745 This is similar to ``add()`` except it operates at a lower level.
743
746
744 The data passed in already contains a metadata header, if any.
747 The data passed in already contains a metadata header, if any.
745
748
746 ``node`` and ``flags`` can be used to define the expected node and
749 ``node`` and ``flags`` can be used to define the expected node and
747 the flags to use with storage. ``flags`` is a bitwise value composed
750 the flags to use with storage. ``flags`` is a bitwise value composed
748 of the various ``REVISION_FLAG_*`` constants.
751 of the various ``REVISION_FLAG_*`` constants.
749
752
750 ``add()`` is usually called when adding files from e.g. the working
753 ``add()`` is usually called when adding files from e.g. the working
751 directory. ``addrevision()`` is often called by ``add()`` and for
754 directory. ``addrevision()`` is often called by ``add()`` and for
752 scenarios where revision data has already been computed, such as when
755 scenarios where revision data has already been computed, such as when
753 applying raw data from a peer repo.
756 applying raw data from a peer repo.
754 """
757 """
755
758
756 def addgroup(
759 def addgroup(
757 deltas,
760 deltas,
758 linkmapper,
761 linkmapper,
759 transaction,
762 transaction,
760 addrevisioncb=None,
763 addrevisioncb=None,
761 maybemissingparents=False,
764 maybemissingparents=False,
762 ):
765 ):
763 """Process a series of deltas for storage.
766 """Process a series of deltas for storage.
764
767
765 ``deltas`` is an iterable of 7-tuples of
768 ``deltas`` is an iterable of 7-tuples of
766 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
769 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
767 to add.
770 to add.
768
771
769 The ``delta`` field contains ``mpatch`` data to apply to a base
772 The ``delta`` field contains ``mpatch`` data to apply to a base
770 revision, identified by ``deltabase``. The base node can be
773 revision, identified by ``deltabase``. The base node can be
771 ``nullid``, in which case the header from the delta can be ignored
774 ``nullid``, in which case the header from the delta can be ignored
772 and the delta used as the fulltext.
775 and the delta used as the fulltext.
773
776
774 ``addrevisioncb`` should be called for each node as it is committed.
777 ``addrevisioncb`` should be called for each node as it is committed.
775
778
776 ``maybemissingparents`` is a bool indicating whether the incoming
779 ``maybemissingparents`` is a bool indicating whether the incoming
777 data may reference parents/ancestor revisions that aren't present.
780 data may reference parents/ancestor revisions that aren't present.
778 This flag is set when receiving data into a "shallow" store that
781 This flag is set when receiving data into a "shallow" store that
779 doesn't hold all history.
782 doesn't hold all history.
780
783
781 Returns a list of nodes that were processed. A node will be in the list
784 Returns a list of nodes that were processed. A node will be in the list
782 even if it existed in the store previously.
785 even if it existed in the store previously.
783 """
786 """
784
787
785 def censorrevision(tr, node, tombstone=b''):
788 def censorrevision(tr, node, tombstone=b''):
786 """Remove the content of a single revision.
789 """Remove the content of a single revision.
787
790
788 The specified ``node`` will have its content purged from storage.
791 The specified ``node`` will have its content purged from storage.
789 Future attempts to access the revision data for this node will
792 Future attempts to access the revision data for this node will
790 result in failure.
793 result in failure.
791
794
792 A ``tombstone`` message can optionally be stored. This message may be
795 A ``tombstone`` message can optionally be stored. This message may be
793 displayed to users when they attempt to access the missing revision
796 displayed to users when they attempt to access the missing revision
794 data.
797 data.
795
798
796 Storage backends may have stored deltas against the previous content
799 Storage backends may have stored deltas against the previous content
797 in this revision. As part of censoring a revision, these storage
800 in this revision. As part of censoring a revision, these storage
798 backends are expected to rewrite any internally stored deltas such
801 backends are expected to rewrite any internally stored deltas such
799 that they no longer reference the deleted content.
802 that they no longer reference the deleted content.
800 """
803 """
801
804
802 def getstrippoint(minlink):
805 def getstrippoint(minlink):
803 """Find the minimum revision that must be stripped to strip a linkrev.
806 """Find the minimum revision that must be stripped to strip a linkrev.
804
807
805 Returns a 2-tuple containing the minimum revision number and a set
808 Returns a 2-tuple containing the minimum revision number and a set
806 of all revisions numbers that would be broken by this strip.
809 of all revisions numbers that would be broken by this strip.
807
810
808 TODO this is highly revlog centric and should be abstracted into
811 TODO this is highly revlog centric and should be abstracted into
809 a higher-level deletion API. ``repair.strip()`` relies on this.
812 a higher-level deletion API. ``repair.strip()`` relies on this.
810 """
813 """
811
814
812 def strip(minlink, transaction):
815 def strip(minlink, transaction):
813 """Remove storage of items starting at a linkrev.
816 """Remove storage of items starting at a linkrev.
814
817
815 This uses ``getstrippoint()`` to determine the first node to remove.
818 This uses ``getstrippoint()`` to determine the first node to remove.
816 Then it effectively truncates storage for all revisions after that.
819 Then it effectively truncates storage for all revisions after that.
817
820
818 TODO this is highly revlog centric and should be abstracted into a
821 TODO this is highly revlog centric and should be abstracted into a
819 higher-level deletion API.
822 higher-level deletion API.
820 """
823 """
821
824
822
825
823 class ifilestorage(ifileindex, ifiledata, ifilemutation):
826 class ifilestorage(ifileindex, ifiledata, ifilemutation):
824 """Complete storage interface for a single tracked file."""
827 """Complete storage interface for a single tracked file."""
825
828
826 def files():
829 def files():
827 """Obtain paths that are backing storage for this file.
830 """Obtain paths that are backing storage for this file.
828
831
829 TODO this is used heavily by verify code and there should probably
832 TODO this is used heavily by verify code and there should probably
830 be a better API for that.
833 be a better API for that.
831 """
834 """
832
835
833 def storageinfo(
836 def storageinfo(
834 exclusivefiles=False,
837 exclusivefiles=False,
835 sharedfiles=False,
838 sharedfiles=False,
836 revisionscount=False,
839 revisionscount=False,
837 trackedsize=False,
840 trackedsize=False,
838 storedsize=False,
841 storedsize=False,
839 ):
842 ):
840 """Obtain information about storage for this file's data.
843 """Obtain information about storage for this file's data.
841
844
842 Returns a dict describing storage for this tracked path. The keys
845 Returns a dict describing storage for this tracked path. The keys
843 in the dict map to arguments of the same. The arguments are bools
846 in the dict map to arguments of the same. The arguments are bools
844 indicating whether to calculate and obtain that data.
847 indicating whether to calculate and obtain that data.
845
848
846 exclusivefiles
849 exclusivefiles
847 Iterable of (vfs, path) describing files that are exclusively
850 Iterable of (vfs, path) describing files that are exclusively
848 used to back storage for this tracked path.
851 used to back storage for this tracked path.
849
852
850 sharedfiles
853 sharedfiles
851 Iterable of (vfs, path) describing files that are used to back
854 Iterable of (vfs, path) describing files that are used to back
852 storage for this tracked path. Those files may also provide storage
855 storage for this tracked path. Those files may also provide storage
853 for other stored entities.
856 for other stored entities.
854
857
855 revisionscount
858 revisionscount
856 Number of revisions available for retrieval.
859 Number of revisions available for retrieval.
857
860
858 trackedsize
861 trackedsize
859 Total size in bytes of all tracked revisions. This is a sum of the
862 Total size in bytes of all tracked revisions. This is a sum of the
860 length of the fulltext of all revisions.
863 length of the fulltext of all revisions.
861
864
862 storedsize
865 storedsize
863 Total size in bytes used to store data for all tracked revisions.
866 Total size in bytes used to store data for all tracked revisions.
864 This is commonly less than ``trackedsize`` due to internal usage
867 This is commonly less than ``trackedsize`` due to internal usage
865 of deltas rather than fulltext revisions.
868 of deltas rather than fulltext revisions.
866
869
867 Not all storage backends may support all queries are have a reasonable
870 Not all storage backends may support all queries are have a reasonable
868 value to use. In that case, the value should be set to ``None`` and
871 value to use. In that case, the value should be set to ``None`` and
869 callers are expected to handle this special value.
872 callers are expected to handle this special value.
870 """
873 """
871
874
872 def verifyintegrity(state):
875 def verifyintegrity(state):
873 """Verifies the integrity of file storage.
876 """Verifies the integrity of file storage.
874
877
875 ``state`` is a dict holding state of the verifier process. It can be
878 ``state`` is a dict holding state of the verifier process. It can be
876 used to communicate data between invocations of multiple storage
879 used to communicate data between invocations of multiple storage
877 primitives.
880 primitives.
878
881
879 If individual revisions cannot have their revision content resolved,
882 If individual revisions cannot have their revision content resolved,
880 the method is expected to set the ``skipread`` key to a set of nodes
883 the method is expected to set the ``skipread`` key to a set of nodes
881 that encountered problems. If set, the method can also add the node(s)
884 that encountered problems. If set, the method can also add the node(s)
882 to ``safe_renamed`` in order to indicate nodes that may perform the
885 to ``safe_renamed`` in order to indicate nodes that may perform the
883 rename checks with currently accessible data.
886 rename checks with currently accessible data.
884
887
885 The method yields objects conforming to the ``iverifyproblem``
888 The method yields objects conforming to the ``iverifyproblem``
886 interface.
889 interface.
887 """
890 """
888
891
889
892
890 class idirs(interfaceutil.Interface):
893 class idirs(interfaceutil.Interface):
891 """Interface representing a collection of directories from paths.
894 """Interface representing a collection of directories from paths.
892
895
893 This interface is essentially a derived data structure representing
896 This interface is essentially a derived data structure representing
894 directories from a collection of paths.
897 directories from a collection of paths.
895 """
898 """
896
899
897 def addpath(path):
900 def addpath(path):
898 """Add a path to the collection.
901 """Add a path to the collection.
899
902
900 All directories in the path will be added to the collection.
903 All directories in the path will be added to the collection.
901 """
904 """
902
905
903 def delpath(path):
906 def delpath(path):
904 """Remove a path from the collection.
907 """Remove a path from the collection.
905
908
906 If the removal was the last path in a particular directory, the
909 If the removal was the last path in a particular directory, the
907 directory is removed from the collection.
910 directory is removed from the collection.
908 """
911 """
909
912
910 def __iter__():
913 def __iter__():
911 """Iterate over the directories in this collection of paths."""
914 """Iterate over the directories in this collection of paths."""
912
915
913 def __contains__(path):
916 def __contains__(path):
914 """Whether a specific directory is in this collection."""
917 """Whether a specific directory is in this collection."""
915
918
916
919
917 class imanifestdict(interfaceutil.Interface):
920 class imanifestdict(interfaceutil.Interface):
918 """Interface representing a manifest data structure.
921 """Interface representing a manifest data structure.
919
922
920 A manifest is effectively a dict mapping paths to entries. Each entry
923 A manifest is effectively a dict mapping paths to entries. Each entry
921 consists of a binary node and extra flags affecting that entry.
924 consists of a binary node and extra flags affecting that entry.
922 """
925 """
923
926
924 def __getitem__(path):
927 def __getitem__(path):
925 """Returns the binary node value for a path in the manifest.
928 """Returns the binary node value for a path in the manifest.
926
929
927 Raises ``KeyError`` if the path does not exist in the manifest.
930 Raises ``KeyError`` if the path does not exist in the manifest.
928
931
929 Equivalent to ``self.find(path)[0]``.
932 Equivalent to ``self.find(path)[0]``.
930 """
933 """
931
934
932 def find(path):
935 def find(path):
933 """Returns the entry for a path in the manifest.
936 """Returns the entry for a path in the manifest.
934
937
935 Returns a 2-tuple of (node, flags).
938 Returns a 2-tuple of (node, flags).
936
939
937 Raises ``KeyError`` if the path does not exist in the manifest.
940 Raises ``KeyError`` if the path does not exist in the manifest.
938 """
941 """
939
942
940 def __len__():
943 def __len__():
941 """Return the number of entries in the manifest."""
944 """Return the number of entries in the manifest."""
942
945
943 def __nonzero__():
946 def __nonzero__():
944 """Returns True if the manifest has entries, False otherwise."""
947 """Returns True if the manifest has entries, False otherwise."""
945
948
946 __bool__ = __nonzero__
949 __bool__ = __nonzero__
947
950
948 def __setitem__(path, node):
951 def __setitem__(path, node):
949 """Define the node value for a path in the manifest.
952 """Define the node value for a path in the manifest.
950
953
951 If the path is already in the manifest, its flags will be copied to
954 If the path is already in the manifest, its flags will be copied to
952 the new entry.
955 the new entry.
953 """
956 """
954
957
955 def __contains__(path):
958 def __contains__(path):
956 """Whether a path exists in the manifest."""
959 """Whether a path exists in the manifest."""
957
960
958 def __delitem__(path):
961 def __delitem__(path):
959 """Remove a path from the manifest.
962 """Remove a path from the manifest.
960
963
961 Raises ``KeyError`` if the path is not in the manifest.
964 Raises ``KeyError`` if the path is not in the manifest.
962 """
965 """
963
966
964 def __iter__():
967 def __iter__():
965 """Iterate over paths in the manifest."""
968 """Iterate over paths in the manifest."""
966
969
967 def iterkeys():
970 def iterkeys():
968 """Iterate over paths in the manifest."""
971 """Iterate over paths in the manifest."""
969
972
970 def keys():
973 def keys():
971 """Obtain a list of paths in the manifest."""
974 """Obtain a list of paths in the manifest."""
972
975
973 def filesnotin(other, match=None):
976 def filesnotin(other, match=None):
974 """Obtain the set of paths in this manifest but not in another.
977 """Obtain the set of paths in this manifest but not in another.
975
978
976 ``match`` is an optional matcher function to be applied to both
979 ``match`` is an optional matcher function to be applied to both
977 manifests.
980 manifests.
978
981
979 Returns a set of paths.
982 Returns a set of paths.
980 """
983 """
981
984
982 def dirs():
985 def dirs():
983 """Returns an object implementing the ``idirs`` interface."""
986 """Returns an object implementing the ``idirs`` interface."""
984
987
985 def hasdir(dir):
988 def hasdir(dir):
986 """Returns a bool indicating if a directory is in this manifest."""
989 """Returns a bool indicating if a directory is in this manifest."""
987
990
988 def walk(match):
991 def walk(match):
989 """Generator of paths in manifest satisfying a matcher.
992 """Generator of paths in manifest satisfying a matcher.
990
993
991 If the matcher has explicit files listed and they don't exist in
994 If the matcher has explicit files listed and they don't exist in
992 the manifest, ``match.bad()`` is called for each missing file.
995 the manifest, ``match.bad()`` is called for each missing file.
993 """
996 """
994
997
995 def diff(other, match=None, clean=False):
998 def diff(other, match=None, clean=False):
996 """Find differences between this manifest and another.
999 """Find differences between this manifest and another.
997
1000
998 This manifest is compared to ``other``.
1001 This manifest is compared to ``other``.
999
1002
1000 If ``match`` is provided, the two manifests are filtered against this
1003 If ``match`` is provided, the two manifests are filtered against this
1001 matcher and only entries satisfying the matcher are compared.
1004 matcher and only entries satisfying the matcher are compared.
1002
1005
1003 If ``clean`` is True, unchanged files are included in the returned
1006 If ``clean`` is True, unchanged files are included in the returned
1004 object.
1007 object.
1005
1008
1006 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
1009 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
1007 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
1010 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
1008 represents the node and flags for this manifest and ``(node2, flag2)``
1011 represents the node and flags for this manifest and ``(node2, flag2)``
1009 are the same for the other manifest.
1012 are the same for the other manifest.
1010 """
1013 """
1011
1014
1012 def setflag(path, flag):
1015 def setflag(path, flag):
1013 """Set the flag value for a given path.
1016 """Set the flag value for a given path.
1014
1017
1015 Raises ``KeyError`` if the path is not already in the manifest.
1018 Raises ``KeyError`` if the path is not already in the manifest.
1016 """
1019 """
1017
1020
1018 def get(path, default=None):
1021 def get(path, default=None):
1019 """Obtain the node value for a path or a default value if missing."""
1022 """Obtain the node value for a path or a default value if missing."""
1020
1023
1021 def flags(path):
1024 def flags(path):
1022 """Return the flags value for a path (default: empty bytestring)."""
1025 """Return the flags value for a path (default: empty bytestring)."""
1023
1026
1024 def copy():
1027 def copy():
1025 """Return a copy of this manifest."""
1028 """Return a copy of this manifest."""
1026
1029
1027 def items():
1030 def items():
1028 """Returns an iterable of (path, node) for items in this manifest."""
1031 """Returns an iterable of (path, node) for items in this manifest."""
1029
1032
1030 def iteritems():
1033 def iteritems():
1031 """Identical to items()."""
1034 """Identical to items()."""
1032
1035
1033 def iterentries():
1036 def iterentries():
1034 """Returns an iterable of (path, node, flags) for this manifest.
1037 """Returns an iterable of (path, node, flags) for this manifest.
1035
1038
1036 Similar to ``iteritems()`` except items are a 3-tuple and include
1039 Similar to ``iteritems()`` except items are a 3-tuple and include
1037 flags.
1040 flags.
1038 """
1041 """
1039
1042
1040 def text():
1043 def text():
1041 """Obtain the raw data representation for this manifest.
1044 """Obtain the raw data representation for this manifest.
1042
1045
1043 Result is used to create a manifest revision.
1046 Result is used to create a manifest revision.
1044 """
1047 """
1045
1048
1046 def fastdelta(base, changes):
1049 def fastdelta(base, changes):
1047 """Obtain a delta between this manifest and another given changes.
1050 """Obtain a delta between this manifest and another given changes.
1048
1051
1049 ``base`` in the raw data representation for another manifest.
1052 ``base`` in the raw data representation for another manifest.
1050
1053
1051 ``changes`` is an iterable of ``(path, to_delete)``.
1054 ``changes`` is an iterable of ``(path, to_delete)``.
1052
1055
1053 Returns a 2-tuple containing ``bytearray(self.text())`` and the
1056 Returns a 2-tuple containing ``bytearray(self.text())`` and the
1054 delta between ``base`` and this manifest.
1057 delta between ``base`` and this manifest.
1055
1058
1056 If this manifest implementation can't support ``fastdelta()``,
1059 If this manifest implementation can't support ``fastdelta()``,
1057 raise ``mercurial.manifest.FastdeltaUnavailable``.
1060 raise ``mercurial.manifest.FastdeltaUnavailable``.
1058 """
1061 """
1059
1062
1060
1063
1061 class imanifestrevisionbase(interfaceutil.Interface):
1064 class imanifestrevisionbase(interfaceutil.Interface):
1062 """Base interface representing a single revision of a manifest.
1065 """Base interface representing a single revision of a manifest.
1063
1066
1064 Should not be used as a primary interface: should always be inherited
1067 Should not be used as a primary interface: should always be inherited
1065 as part of a larger interface.
1068 as part of a larger interface.
1066 """
1069 """
1067
1070
1068 def copy():
1071 def copy():
1069 """Obtain a copy of this manifest instance.
1072 """Obtain a copy of this manifest instance.
1070
1073
1071 Returns an object conforming to the ``imanifestrevisionwritable``
1074 Returns an object conforming to the ``imanifestrevisionwritable``
1072 interface. The instance will be associated with the same
1075 interface. The instance will be associated with the same
1073 ``imanifestlog`` collection as this instance.
1076 ``imanifestlog`` collection as this instance.
1074 """
1077 """
1075
1078
1076 def read():
1079 def read():
1077 """Obtain the parsed manifest data structure.
1080 """Obtain the parsed manifest data structure.
1078
1081
1079 The returned object conforms to the ``imanifestdict`` interface.
1082 The returned object conforms to the ``imanifestdict`` interface.
1080 """
1083 """
1081
1084
1082
1085
1083 class imanifestrevisionstored(imanifestrevisionbase):
1086 class imanifestrevisionstored(imanifestrevisionbase):
1084 """Interface representing a manifest revision committed to storage."""
1087 """Interface representing a manifest revision committed to storage."""
1085
1088
1086 def node():
1089 def node():
1087 """The binary node for this manifest."""
1090 """The binary node for this manifest."""
1088
1091
1089 parents = interfaceutil.Attribute(
1092 parents = interfaceutil.Attribute(
1090 """List of binary nodes that are parents for this manifest revision."""
1093 """List of binary nodes that are parents for this manifest revision."""
1091 )
1094 )
1092
1095
1093 def readdelta(shallow=False):
1096 def readdelta(shallow=False):
1094 """Obtain the manifest data structure representing changes from parent.
1097 """Obtain the manifest data structure representing changes from parent.
1095
1098
1096 This manifest is compared to its 1st parent. A new manifest representing
1099 This manifest is compared to its 1st parent. A new manifest representing
1097 those differences is constructed.
1100 those differences is constructed.
1098
1101
1099 The returned object conforms to the ``imanifestdict`` interface.
1102 The returned object conforms to the ``imanifestdict`` interface.
1100 """
1103 """
1101
1104
1102 def readfast(shallow=False):
1105 def readfast(shallow=False):
1103 """Calls either ``read()`` or ``readdelta()``.
1106 """Calls either ``read()`` or ``readdelta()``.
1104
1107
1105 The faster of the two options is called.
1108 The faster of the two options is called.
1106 """
1109 """
1107
1110
1108 def find(key):
1111 def find(key):
1109 """Calls self.read().find(key)``.
1112 """Calls self.read().find(key)``.
1110
1113
1111 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1114 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1112 """
1115 """
1113
1116
1114
1117
1115 class imanifestrevisionwritable(imanifestrevisionbase):
1118 class imanifestrevisionwritable(imanifestrevisionbase):
1116 """Interface representing a manifest revision that can be committed."""
1119 """Interface representing a manifest revision that can be committed."""
1117
1120
1118 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1121 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1119 """Add this revision to storage.
1122 """Add this revision to storage.
1120
1123
1121 Takes a transaction object, the changeset revision number it will
1124 Takes a transaction object, the changeset revision number it will
1122 be associated with, its parent nodes, and lists of added and
1125 be associated with, its parent nodes, and lists of added and
1123 removed paths.
1126 removed paths.
1124
1127
1125 If match is provided, storage can choose not to inspect or write out
1128 If match is provided, storage can choose not to inspect or write out
1126 items that do not match. Storage is still required to be able to provide
1129 items that do not match. Storage is still required to be able to provide
1127 the full manifest in the future for any directories written (these
1130 the full manifest in the future for any directories written (these
1128 manifests should not be "narrowed on disk").
1131 manifests should not be "narrowed on disk").
1129
1132
1130 Returns the binary node of the created revision.
1133 Returns the binary node of the created revision.
1131 """
1134 """
1132
1135
1133
1136
1134 class imanifeststorage(interfaceutil.Interface):
1137 class imanifeststorage(interfaceutil.Interface):
1135 """Storage interface for manifest data."""
1138 """Storage interface for manifest data."""
1136
1139
1137 tree = interfaceutil.Attribute(
1140 tree = interfaceutil.Attribute(
1138 """The path to the directory this manifest tracks.
1141 """The path to the directory this manifest tracks.
1139
1142
1140 The empty bytestring represents the root manifest.
1143 The empty bytestring represents the root manifest.
1141 """
1144 """
1142 )
1145 )
1143
1146
1144 index = interfaceutil.Attribute(
1147 index = interfaceutil.Attribute(
1145 """An ``ifilerevisionssequence`` instance."""
1148 """An ``ifilerevisionssequence`` instance."""
1146 )
1149 )
1147
1150
1148 indexfile = interfaceutil.Attribute(
1151 indexfile = interfaceutil.Attribute(
1149 """Path of revlog index file.
1152 """Path of revlog index file.
1150
1153
1151 TODO this is revlog specific and should not be exposed.
1154 TODO this is revlog specific and should not be exposed.
1152 """
1155 """
1153 )
1156 )
1154
1157
1155 opener = interfaceutil.Attribute(
1158 opener = interfaceutil.Attribute(
1156 """VFS opener to use to access underlying files used for storage.
1159 """VFS opener to use to access underlying files used for storage.
1157
1160
1158 TODO this is revlog specific and should not be exposed.
1161 TODO this is revlog specific and should not be exposed.
1159 """
1162 """
1160 )
1163 )
1161
1164
1162 version = interfaceutil.Attribute(
1165 version = interfaceutil.Attribute(
1163 """Revlog version number.
1166 """Revlog version number.
1164
1167
1165 TODO this is revlog specific and should not be exposed.
1168 TODO this is revlog specific and should not be exposed.
1166 """
1169 """
1167 )
1170 )
1168
1171
1169 _generaldelta = interfaceutil.Attribute(
1172 _generaldelta = interfaceutil.Attribute(
1170 """Whether generaldelta storage is being used.
1173 """Whether generaldelta storage is being used.
1171
1174
1172 TODO this is revlog specific and should not be exposed.
1175 TODO this is revlog specific and should not be exposed.
1173 """
1176 """
1174 )
1177 )
1175
1178
1176 fulltextcache = interfaceutil.Attribute(
1179 fulltextcache = interfaceutil.Attribute(
1177 """Dict with cache of fulltexts.
1180 """Dict with cache of fulltexts.
1178
1181
1179 TODO this doesn't feel appropriate for the storage interface.
1182 TODO this doesn't feel appropriate for the storage interface.
1180 """
1183 """
1181 )
1184 )
1182
1185
1183 def __len__():
1186 def __len__():
1184 """Obtain the number of revisions stored for this manifest."""
1187 """Obtain the number of revisions stored for this manifest."""
1185
1188
1186 def __iter__():
1189 def __iter__():
1187 """Iterate over revision numbers for this manifest."""
1190 """Iterate over revision numbers for this manifest."""
1188
1191
1189 def rev(node):
1192 def rev(node):
1190 """Obtain the revision number given a binary node.
1193 """Obtain the revision number given a binary node.
1191
1194
1192 Raises ``error.LookupError`` if the node is not known.
1195 Raises ``error.LookupError`` if the node is not known.
1193 """
1196 """
1194
1197
1195 def node(rev):
1198 def node(rev):
1196 """Obtain the node value given a revision number.
1199 """Obtain the node value given a revision number.
1197
1200
1198 Raises ``error.LookupError`` if the revision is not known.
1201 Raises ``error.LookupError`` if the revision is not known.
1199 """
1202 """
1200
1203
1201 def lookup(value):
1204 def lookup(value):
1202 """Attempt to resolve a value to a node.
1205 """Attempt to resolve a value to a node.
1203
1206
1204 Value can be a binary node, hex node, revision number, or a bytes
1207 Value can be a binary node, hex node, revision number, or a bytes
1205 that can be converted to an integer.
1208 that can be converted to an integer.
1206
1209
1207 Raises ``error.LookupError`` if a ndoe could not be resolved.
1210 Raises ``error.LookupError`` if a ndoe could not be resolved.
1208 """
1211 """
1209
1212
1210 def parents(node):
1213 def parents(node):
1211 """Returns a 2-tuple of parent nodes for a node.
1214 """Returns a 2-tuple of parent nodes for a node.
1212
1215
1213 Values will be ``nullid`` if the parent is empty.
1216 Values will be ``nullid`` if the parent is empty.
1214 """
1217 """
1215
1218
1216 def parentrevs(rev):
1219 def parentrevs(rev):
1217 """Like parents() but operates on revision numbers."""
1220 """Like parents() but operates on revision numbers."""
1218
1221
1219 def linkrev(rev):
1222 def linkrev(rev):
1220 """Obtain the changeset revision number a revision is linked to."""
1223 """Obtain the changeset revision number a revision is linked to."""
1221
1224
1222 def revision(node, _df=None, raw=False):
1225 def revision(node, _df=None, raw=False):
1223 """Obtain fulltext data for a node."""
1226 """Obtain fulltext data for a node."""
1224
1227
1225 def rawdata(node, _df=None):
1228 def rawdata(node, _df=None):
1226 """Obtain raw data for a node."""
1229 """Obtain raw data for a node."""
1227
1230
1228 def revdiff(rev1, rev2):
1231 def revdiff(rev1, rev2):
1229 """Obtain a delta between two revision numbers.
1232 """Obtain a delta between two revision numbers.
1230
1233
1231 The returned data is the result of ``bdiff.bdiff()`` on the raw
1234 The returned data is the result of ``bdiff.bdiff()`` on the raw
1232 revision data.
1235 revision data.
1233 """
1236 """
1234
1237
1235 def cmp(node, fulltext):
1238 def cmp(node, fulltext):
1236 """Compare fulltext to another revision.
1239 """Compare fulltext to another revision.
1237
1240
1238 Returns True if the fulltext is different from what is stored.
1241 Returns True if the fulltext is different from what is stored.
1239 """
1242 """
1240
1243
1241 def emitrevisions(
1244 def emitrevisions(
1242 nodes,
1245 nodes,
1243 nodesorder=None,
1246 nodesorder=None,
1244 revisiondata=False,
1247 revisiondata=False,
1245 assumehaveparentrevisions=False,
1248 assumehaveparentrevisions=False,
1246 ):
1249 ):
1247 """Produce ``irevisiondelta`` describing revisions.
1250 """Produce ``irevisiondelta`` describing revisions.
1248
1251
1249 See the documentation for ``ifiledata`` for more.
1252 See the documentation for ``ifiledata`` for more.
1250 """
1253 """
1251
1254
1252 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1255 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1253 """Process a series of deltas for storage.
1256 """Process a series of deltas for storage.
1254
1257
1255 See the documentation in ``ifilemutation`` for more.
1258 See the documentation in ``ifilemutation`` for more.
1256 """
1259 """
1257
1260
1258 def rawsize(rev):
1261 def rawsize(rev):
1259 """Obtain the size of tracked data.
1262 """Obtain the size of tracked data.
1260
1263
1261 Is equivalent to ``len(m.rawdata(node))``.
1264 Is equivalent to ``len(m.rawdata(node))``.
1262
1265
1263 TODO this method is only used by upgrade code and may be removed.
1266 TODO this method is only used by upgrade code and may be removed.
1264 """
1267 """
1265
1268
1266 def getstrippoint(minlink):
1269 def getstrippoint(minlink):
1267 """Find minimum revision that must be stripped to strip a linkrev.
1270 """Find minimum revision that must be stripped to strip a linkrev.
1268
1271
1269 See the documentation in ``ifilemutation`` for more.
1272 See the documentation in ``ifilemutation`` for more.
1270 """
1273 """
1271
1274
1272 def strip(minlink, transaction):
1275 def strip(minlink, transaction):
1273 """Remove storage of items starting at a linkrev.
1276 """Remove storage of items starting at a linkrev.
1274
1277
1275 See the documentation in ``ifilemutation`` for more.
1278 See the documentation in ``ifilemutation`` for more.
1276 """
1279 """
1277
1280
1278 def checksize():
1281 def checksize():
1279 """Obtain the expected sizes of backing files.
1282 """Obtain the expected sizes of backing files.
1280
1283
1281 TODO this is used by verify and it should not be part of the interface.
1284 TODO this is used by verify and it should not be part of the interface.
1282 """
1285 """
1283
1286
1284 def files():
1287 def files():
1285 """Obtain paths that are backing storage for this manifest.
1288 """Obtain paths that are backing storage for this manifest.
1286
1289
1287 TODO this is used by verify and there should probably be a better API
1290 TODO this is used by verify and there should probably be a better API
1288 for this functionality.
1291 for this functionality.
1289 """
1292 """
1290
1293
1291 def deltaparent(rev):
1294 def deltaparent(rev):
1292 """Obtain the revision that a revision is delta'd against.
1295 """Obtain the revision that a revision is delta'd against.
1293
1296
1294 TODO delta encoding is an implementation detail of storage and should
1297 TODO delta encoding is an implementation detail of storage and should
1295 not be exposed to the storage interface.
1298 not be exposed to the storage interface.
1296 """
1299 """
1297
1300
1298 def clone(tr, dest, **kwargs):
1301 def clone(tr, dest, **kwargs):
1299 """Clone this instance to another."""
1302 """Clone this instance to another."""
1300
1303
1301 def clearcaches(clear_persisted_data=False):
1304 def clearcaches(clear_persisted_data=False):
1302 """Clear any caches associated with this instance."""
1305 """Clear any caches associated with this instance."""
1303
1306
1304 def dirlog(d):
1307 def dirlog(d):
1305 """Obtain a manifest storage instance for a tree."""
1308 """Obtain a manifest storage instance for a tree."""
1306
1309
1307 def add(
1310 def add(
1308 m, transaction, link, p1, p2, added, removed, readtree=None, match=None
1311 m, transaction, link, p1, p2, added, removed, readtree=None, match=None
1309 ):
1312 ):
1310 """Add a revision to storage.
1313 """Add a revision to storage.
1311
1314
1312 ``m`` is an object conforming to ``imanifestdict``.
1315 ``m`` is an object conforming to ``imanifestdict``.
1313
1316
1314 ``link`` is the linkrev revision number.
1317 ``link`` is the linkrev revision number.
1315
1318
1316 ``p1`` and ``p2`` are the parent revision numbers.
1319 ``p1`` and ``p2`` are the parent revision numbers.
1317
1320
1318 ``added`` and ``removed`` are iterables of added and removed paths,
1321 ``added`` and ``removed`` are iterables of added and removed paths,
1319 respectively.
1322 respectively.
1320
1323
1321 ``readtree`` is a function that can be used to read the child tree(s)
1324 ``readtree`` is a function that can be used to read the child tree(s)
1322 when recursively writing the full tree structure when using
1325 when recursively writing the full tree structure when using
1323 treemanifets.
1326 treemanifets.
1324
1327
1325 ``match`` is a matcher that can be used to hint to storage that not all
1328 ``match`` is a matcher that can be used to hint to storage that not all
1326 paths must be inspected; this is an optimization and can be safely
1329 paths must be inspected; this is an optimization and can be safely
1327 ignored. Note that the storage must still be able to reproduce a full
1330 ignored. Note that the storage must still be able to reproduce a full
1328 manifest including files that did not match.
1331 manifest including files that did not match.
1329 """
1332 """
1330
1333
1331 def storageinfo(
1334 def storageinfo(
1332 exclusivefiles=False,
1335 exclusivefiles=False,
1333 sharedfiles=False,
1336 sharedfiles=False,
1334 revisionscount=False,
1337 revisionscount=False,
1335 trackedsize=False,
1338 trackedsize=False,
1336 storedsize=False,
1339 storedsize=False,
1337 ):
1340 ):
1338 """Obtain information about storage for this manifest's data.
1341 """Obtain information about storage for this manifest's data.
1339
1342
1340 See ``ifilestorage.storageinfo()`` for a description of this method.
1343 See ``ifilestorage.storageinfo()`` for a description of this method.
1341 This one behaves the same way, except for manifest data.
1344 This one behaves the same way, except for manifest data.
1342 """
1345 """
1343
1346
1344
1347
1345 class imanifestlog(interfaceutil.Interface):
1348 class imanifestlog(interfaceutil.Interface):
1346 """Interface representing a collection of manifest snapshots.
1349 """Interface representing a collection of manifest snapshots.
1347
1350
1348 Represents the root manifest in a repository.
1351 Represents the root manifest in a repository.
1349
1352
1350 Also serves as a means to access nested tree manifests and to cache
1353 Also serves as a means to access nested tree manifests and to cache
1351 tree manifests.
1354 tree manifests.
1352 """
1355 """
1353
1356
1354 def __getitem__(node):
1357 def __getitem__(node):
1355 """Obtain a manifest instance for a given binary node.
1358 """Obtain a manifest instance for a given binary node.
1356
1359
1357 Equivalent to calling ``self.get('', node)``.
1360 Equivalent to calling ``self.get('', node)``.
1358
1361
1359 The returned object conforms to the ``imanifestrevisionstored``
1362 The returned object conforms to the ``imanifestrevisionstored``
1360 interface.
1363 interface.
1361 """
1364 """
1362
1365
1363 def get(tree, node, verify=True):
1366 def get(tree, node, verify=True):
1364 """Retrieve the manifest instance for a given directory and binary node.
1367 """Retrieve the manifest instance for a given directory and binary node.
1365
1368
1366 ``node`` always refers to the node of the root manifest (which will be
1369 ``node`` always refers to the node of the root manifest (which will be
1367 the only manifest if flat manifests are being used).
1370 the only manifest if flat manifests are being used).
1368
1371
1369 If ``tree`` is the empty string, the root manifest is returned.
1372 If ``tree`` is the empty string, the root manifest is returned.
1370 Otherwise the manifest for the specified directory will be returned
1373 Otherwise the manifest for the specified directory will be returned
1371 (requires tree manifests).
1374 (requires tree manifests).
1372
1375
1373 If ``verify`` is True, ``LookupError`` is raised if the node is not
1376 If ``verify`` is True, ``LookupError`` is raised if the node is not
1374 known.
1377 known.
1375
1378
1376 The returned object conforms to the ``imanifestrevisionstored``
1379 The returned object conforms to the ``imanifestrevisionstored``
1377 interface.
1380 interface.
1378 """
1381 """
1379
1382
1380 def getstorage(tree):
1383 def getstorage(tree):
1381 """Retrieve an interface to storage for a particular tree.
1384 """Retrieve an interface to storage for a particular tree.
1382
1385
1383 If ``tree`` is the empty bytestring, storage for the root manifest will
1386 If ``tree`` is the empty bytestring, storage for the root manifest will
1384 be returned. Otherwise storage for a tree manifest is returned.
1387 be returned. Otherwise storage for a tree manifest is returned.
1385
1388
1386 TODO formalize interface for returned object.
1389 TODO formalize interface for returned object.
1387 """
1390 """
1388
1391
1389 def clearcaches():
1392 def clearcaches():
1390 """Clear caches associated with this collection."""
1393 """Clear caches associated with this collection."""
1391
1394
1392 def rev(node):
1395 def rev(node):
1393 """Obtain the revision number for a binary node.
1396 """Obtain the revision number for a binary node.
1394
1397
1395 Raises ``error.LookupError`` if the node is not known.
1398 Raises ``error.LookupError`` if the node is not known.
1396 """
1399 """
1397
1400
1398 def update_caches(transaction):
1401 def update_caches(transaction):
1399 """update whatever cache are relevant for the used storage."""
1402 """update whatever cache are relevant for the used storage."""
1400
1403
1401
1404
1402 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1405 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1403 """Local repository sub-interface providing access to tracked file storage.
1406 """Local repository sub-interface providing access to tracked file storage.
1404
1407
1405 This interface defines how a repository accesses storage for a single
1408 This interface defines how a repository accesses storage for a single
1406 tracked file path.
1409 tracked file path.
1407 """
1410 """
1408
1411
1409 def file(f):
1412 def file(f):
1410 """Obtain a filelog for a tracked path.
1413 """Obtain a filelog for a tracked path.
1411
1414
1412 The returned type conforms to the ``ifilestorage`` interface.
1415 The returned type conforms to the ``ifilestorage`` interface.
1413 """
1416 """
1414
1417
1415
1418
1416 class ilocalrepositorymain(interfaceutil.Interface):
1419 class ilocalrepositorymain(interfaceutil.Interface):
1417 """Main interface for local repositories.
1420 """Main interface for local repositories.
1418
1421
1419 This currently captures the reality of things - not how things should be.
1422 This currently captures the reality of things - not how things should be.
1420 """
1423 """
1421
1424
1422 supportedformats = interfaceutil.Attribute(
1425 supportedformats = interfaceutil.Attribute(
1423 """Set of requirements that apply to stream clone.
1426 """Set of requirements that apply to stream clone.
1424
1427
1425 This is actually a class attribute and is shared among all instances.
1428 This is actually a class attribute and is shared among all instances.
1426 """
1429 """
1427 )
1430 )
1428
1431
1429 supported = interfaceutil.Attribute(
1432 supported = interfaceutil.Attribute(
1430 """Set of requirements that this repo is capable of opening."""
1433 """Set of requirements that this repo is capable of opening."""
1431 )
1434 )
1432
1435
1433 requirements = interfaceutil.Attribute(
1436 requirements = interfaceutil.Attribute(
1434 """Set of requirements this repo uses."""
1437 """Set of requirements this repo uses."""
1435 )
1438 )
1436
1439
1437 features = interfaceutil.Attribute(
1440 features = interfaceutil.Attribute(
1438 """Set of "features" this repository supports.
1441 """Set of "features" this repository supports.
1439
1442
1440 A "feature" is a loosely-defined term. It can refer to a feature
1443 A "feature" is a loosely-defined term. It can refer to a feature
1441 in the classical sense or can describe an implementation detail
1444 in the classical sense or can describe an implementation detail
1442 of the repository. For example, a ``readonly`` feature may denote
1445 of the repository. For example, a ``readonly`` feature may denote
1443 the repository as read-only. Or a ``revlogfilestore`` feature may
1446 the repository as read-only. Or a ``revlogfilestore`` feature may
1444 denote that the repository is using revlogs for file storage.
1447 denote that the repository is using revlogs for file storage.
1445
1448
1446 The intent of features is to provide a machine-queryable mechanism
1449 The intent of features is to provide a machine-queryable mechanism
1447 for repo consumers to test for various repository characteristics.
1450 for repo consumers to test for various repository characteristics.
1448
1451
1449 Features are similar to ``requirements``. The main difference is that
1452 Features are similar to ``requirements``. The main difference is that
1450 requirements are stored on-disk and represent requirements to open the
1453 requirements are stored on-disk and represent requirements to open the
1451 repository. Features are more run-time capabilities of the repository
1454 repository. Features are more run-time capabilities of the repository
1452 and more granular capabilities (which may be derived from requirements).
1455 and more granular capabilities (which may be derived from requirements).
1453 """
1456 """
1454 )
1457 )
1455
1458
1456 filtername = interfaceutil.Attribute(
1459 filtername = interfaceutil.Attribute(
1457 """Name of the repoview that is active on this repo."""
1460 """Name of the repoview that is active on this repo."""
1458 )
1461 )
1459
1462
1460 wvfs = interfaceutil.Attribute(
1463 wvfs = interfaceutil.Attribute(
1461 """VFS used to access the working directory."""
1464 """VFS used to access the working directory."""
1462 )
1465 )
1463
1466
1464 vfs = interfaceutil.Attribute(
1467 vfs = interfaceutil.Attribute(
1465 """VFS rooted at the .hg directory.
1468 """VFS rooted at the .hg directory.
1466
1469
1467 Used to access repository data not in the store.
1470 Used to access repository data not in the store.
1468 """
1471 """
1469 )
1472 )
1470
1473
1471 svfs = interfaceutil.Attribute(
1474 svfs = interfaceutil.Attribute(
1472 """VFS rooted at the store.
1475 """VFS rooted at the store.
1473
1476
1474 Used to access repository data in the store. Typically .hg/store.
1477 Used to access repository data in the store. Typically .hg/store.
1475 But can point elsewhere if the store is shared.
1478 But can point elsewhere if the store is shared.
1476 """
1479 """
1477 )
1480 )
1478
1481
1479 root = interfaceutil.Attribute(
1482 root = interfaceutil.Attribute(
1480 """Path to the root of the working directory."""
1483 """Path to the root of the working directory."""
1481 )
1484 )
1482
1485
1483 path = interfaceutil.Attribute("""Path to the .hg directory.""")
1486 path = interfaceutil.Attribute("""Path to the .hg directory.""")
1484
1487
1485 origroot = interfaceutil.Attribute(
1488 origroot = interfaceutil.Attribute(
1486 """The filesystem path that was used to construct the repo."""
1489 """The filesystem path that was used to construct the repo."""
1487 )
1490 )
1488
1491
1489 auditor = interfaceutil.Attribute(
1492 auditor = interfaceutil.Attribute(
1490 """A pathauditor for the working directory.
1493 """A pathauditor for the working directory.
1491
1494
1492 This checks if a path refers to a nested repository.
1495 This checks if a path refers to a nested repository.
1493
1496
1494 Operates on the filesystem.
1497 Operates on the filesystem.
1495 """
1498 """
1496 )
1499 )
1497
1500
1498 nofsauditor = interfaceutil.Attribute(
1501 nofsauditor = interfaceutil.Attribute(
1499 """A pathauditor for the working directory.
1502 """A pathauditor for the working directory.
1500
1503
1501 This is like ``auditor`` except it doesn't do filesystem checks.
1504 This is like ``auditor`` except it doesn't do filesystem checks.
1502 """
1505 """
1503 )
1506 )
1504
1507
1505 baseui = interfaceutil.Attribute(
1508 baseui = interfaceutil.Attribute(
1506 """Original ui instance passed into constructor."""
1509 """Original ui instance passed into constructor."""
1507 )
1510 )
1508
1511
1509 ui = interfaceutil.Attribute("""Main ui instance for this instance.""")
1512 ui = interfaceutil.Attribute("""Main ui instance for this instance.""")
1510
1513
1511 sharedpath = interfaceutil.Attribute(
1514 sharedpath = interfaceutil.Attribute(
1512 """Path to the .hg directory of the repo this repo was shared from."""
1515 """Path to the .hg directory of the repo this repo was shared from."""
1513 )
1516 )
1514
1517
1515 store = interfaceutil.Attribute("""A store instance.""")
1518 store = interfaceutil.Attribute("""A store instance.""")
1516
1519
1517 spath = interfaceutil.Attribute("""Path to the store.""")
1520 spath = interfaceutil.Attribute("""Path to the store.""")
1518
1521
1519 sjoin = interfaceutil.Attribute("""Alias to self.store.join.""")
1522 sjoin = interfaceutil.Attribute("""Alias to self.store.join.""")
1520
1523
1521 cachevfs = interfaceutil.Attribute(
1524 cachevfs = interfaceutil.Attribute(
1522 """A VFS used to access the cache directory.
1525 """A VFS used to access the cache directory.
1523
1526
1524 Typically .hg/cache.
1527 Typically .hg/cache.
1525 """
1528 """
1526 )
1529 )
1527
1530
1528 wcachevfs = interfaceutil.Attribute(
1531 wcachevfs = interfaceutil.Attribute(
1529 """A VFS used to access the cache directory dedicated to working copy
1532 """A VFS used to access the cache directory dedicated to working copy
1530
1533
1531 Typically .hg/wcache.
1534 Typically .hg/wcache.
1532 """
1535 """
1533 )
1536 )
1534
1537
1535 filteredrevcache = interfaceutil.Attribute(
1538 filteredrevcache = interfaceutil.Attribute(
1536 """Holds sets of revisions to be filtered."""
1539 """Holds sets of revisions to be filtered."""
1537 )
1540 )
1538
1541
1539 names = interfaceutil.Attribute("""A ``namespaces`` instance.""")
1542 names = interfaceutil.Attribute("""A ``namespaces`` instance.""")
1540
1543
1541 filecopiesmode = interfaceutil.Attribute(
1544 filecopiesmode = interfaceutil.Attribute(
1542 """The way files copies should be dealt with in this repo."""
1545 """The way files copies should be dealt with in this repo."""
1543 )
1546 )
1544
1547
1545 def close():
1548 def close():
1546 """Close the handle on this repository."""
1549 """Close the handle on this repository."""
1547
1550
1548 def peer():
1551 def peer():
1549 """Obtain an object conforming to the ``peer`` interface."""
1552 """Obtain an object conforming to the ``peer`` interface."""
1550
1553
1551 def unfiltered():
1554 def unfiltered():
1552 """Obtain an unfiltered/raw view of this repo."""
1555 """Obtain an unfiltered/raw view of this repo."""
1553
1556
1554 def filtered(name, visibilityexceptions=None):
1557 def filtered(name, visibilityexceptions=None):
1555 """Obtain a named view of this repository."""
1558 """Obtain a named view of this repository."""
1556
1559
1557 obsstore = interfaceutil.Attribute("""A store of obsolescence data.""")
1560 obsstore = interfaceutil.Attribute("""A store of obsolescence data.""")
1558
1561
1559 changelog = interfaceutil.Attribute("""A handle on the changelog revlog.""")
1562 changelog = interfaceutil.Attribute("""A handle on the changelog revlog.""")
1560
1563
1561 manifestlog = interfaceutil.Attribute(
1564 manifestlog = interfaceutil.Attribute(
1562 """An instance conforming to the ``imanifestlog`` interface.
1565 """An instance conforming to the ``imanifestlog`` interface.
1563
1566
1564 Provides access to manifests for the repository.
1567 Provides access to manifests for the repository.
1565 """
1568 """
1566 )
1569 )
1567
1570
1568 dirstate = interfaceutil.Attribute("""Working directory state.""")
1571 dirstate = interfaceutil.Attribute("""Working directory state.""")
1569
1572
1570 narrowpats = interfaceutil.Attribute(
1573 narrowpats = interfaceutil.Attribute(
1571 """Matcher patterns for this repository's narrowspec."""
1574 """Matcher patterns for this repository's narrowspec."""
1572 )
1575 )
1573
1576
1574 def narrowmatch(match=None, includeexact=False):
1577 def narrowmatch(match=None, includeexact=False):
1575 """Obtain a matcher for the narrowspec."""
1578 """Obtain a matcher for the narrowspec."""
1576
1579
1577 def setnarrowpats(newincludes, newexcludes):
1580 def setnarrowpats(newincludes, newexcludes):
1578 """Define the narrowspec for this repository."""
1581 """Define the narrowspec for this repository."""
1579
1582
1580 def __getitem__(changeid):
1583 def __getitem__(changeid):
1581 """Try to resolve a changectx."""
1584 """Try to resolve a changectx."""
1582
1585
1583 def __contains__(changeid):
1586 def __contains__(changeid):
1584 """Whether a changeset exists."""
1587 """Whether a changeset exists."""
1585
1588
1586 def __nonzero__():
1589 def __nonzero__():
1587 """Always returns True."""
1590 """Always returns True."""
1588 return True
1591 return True
1589
1592
1590 __bool__ = __nonzero__
1593 __bool__ = __nonzero__
1591
1594
1592 def __len__():
1595 def __len__():
1593 """Returns the number of changesets in the repo."""
1596 """Returns the number of changesets in the repo."""
1594
1597
1595 def __iter__():
1598 def __iter__():
1596 """Iterate over revisions in the changelog."""
1599 """Iterate over revisions in the changelog."""
1597
1600
1598 def revs(expr, *args):
1601 def revs(expr, *args):
1599 """Evaluate a revset.
1602 """Evaluate a revset.
1600
1603
1601 Emits revisions.
1604 Emits revisions.
1602 """
1605 """
1603
1606
1604 def set(expr, *args):
1607 def set(expr, *args):
1605 """Evaluate a revset.
1608 """Evaluate a revset.
1606
1609
1607 Emits changectx instances.
1610 Emits changectx instances.
1608 """
1611 """
1609
1612
1610 def anyrevs(specs, user=False, localalias=None):
1613 def anyrevs(specs, user=False, localalias=None):
1611 """Find revisions matching one of the given revsets."""
1614 """Find revisions matching one of the given revsets."""
1612
1615
1613 def url():
1616 def url():
1614 """Returns a string representing the location of this repo."""
1617 """Returns a string representing the location of this repo."""
1615
1618
1616 def hook(name, throw=False, **args):
1619 def hook(name, throw=False, **args):
1617 """Call a hook."""
1620 """Call a hook."""
1618
1621
1619 def tags():
1622 def tags():
1620 """Return a mapping of tag to node."""
1623 """Return a mapping of tag to node."""
1621
1624
1622 def tagtype(tagname):
1625 def tagtype(tagname):
1623 """Return the type of a given tag."""
1626 """Return the type of a given tag."""
1624
1627
1625 def tagslist():
1628 def tagslist():
1626 """Return a list of tags ordered by revision."""
1629 """Return a list of tags ordered by revision."""
1627
1630
1628 def nodetags(node):
1631 def nodetags(node):
1629 """Return the tags associated with a node."""
1632 """Return the tags associated with a node."""
1630
1633
1631 def nodebookmarks(node):
1634 def nodebookmarks(node):
1632 """Return the list of bookmarks pointing to the specified node."""
1635 """Return the list of bookmarks pointing to the specified node."""
1633
1636
1634 def branchmap():
1637 def branchmap():
1635 """Return a mapping of branch to heads in that branch."""
1638 """Return a mapping of branch to heads in that branch."""
1636
1639
1637 def revbranchcache():
1640 def revbranchcache():
1638 pass
1641 pass
1639
1642
1640 def branchtip(branchtip, ignoremissing=False):
1643 def branchtip(branchtip, ignoremissing=False):
1641 """Return the tip node for a given branch."""
1644 """Return the tip node for a given branch."""
1642
1645
1643 def lookup(key):
1646 def lookup(key):
1644 """Resolve the node for a revision."""
1647 """Resolve the node for a revision."""
1645
1648
1646 def lookupbranch(key):
1649 def lookupbranch(key):
1647 """Look up the branch name of the given revision or branch name."""
1650 """Look up the branch name of the given revision or branch name."""
1648
1651
1649 def known(nodes):
1652 def known(nodes):
1650 """Determine whether a series of nodes is known.
1653 """Determine whether a series of nodes is known.
1651
1654
1652 Returns a list of bools.
1655 Returns a list of bools.
1653 """
1656 """
1654
1657
1655 def local():
1658 def local():
1656 """Whether the repository is local."""
1659 """Whether the repository is local."""
1657 return True
1660 return True
1658
1661
1659 def publishing():
1662 def publishing():
1660 """Whether the repository is a publishing repository."""
1663 """Whether the repository is a publishing repository."""
1661
1664
1662 def cancopy():
1665 def cancopy():
1663 pass
1666 pass
1664
1667
1665 def shared():
1668 def shared():
1666 """The type of shared repository or None."""
1669 """The type of shared repository or None."""
1667
1670
1668 def wjoin(f, *insidef):
1671 def wjoin(f, *insidef):
1669 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1672 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1670
1673
1671 def setparents(p1, p2):
1674 def setparents(p1, p2):
1672 """Set the parent nodes of the working directory."""
1675 """Set the parent nodes of the working directory."""
1673
1676
1674 def filectx(path, changeid=None, fileid=None):
1677 def filectx(path, changeid=None, fileid=None):
1675 """Obtain a filectx for the given file revision."""
1678 """Obtain a filectx for the given file revision."""
1676
1679
1677 def getcwd():
1680 def getcwd():
1678 """Obtain the current working directory from the dirstate."""
1681 """Obtain the current working directory from the dirstate."""
1679
1682
1680 def pathto(f, cwd=None):
1683 def pathto(f, cwd=None):
1681 """Obtain the relative path to a file."""
1684 """Obtain the relative path to a file."""
1682
1685
1683 def adddatafilter(name, fltr):
1686 def adddatafilter(name, fltr):
1684 pass
1687 pass
1685
1688
1686 def wread(filename):
1689 def wread(filename):
1687 """Read a file from wvfs, using data filters."""
1690 """Read a file from wvfs, using data filters."""
1688
1691
1689 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1692 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1690 """Write data to a file in the wvfs, using data filters."""
1693 """Write data to a file in the wvfs, using data filters."""
1691
1694
1692 def wwritedata(filename, data):
1695 def wwritedata(filename, data):
1693 """Resolve data for writing to the wvfs, using data filters."""
1696 """Resolve data for writing to the wvfs, using data filters."""
1694
1697
1695 def currenttransaction():
1698 def currenttransaction():
1696 """Obtain the current transaction instance or None."""
1699 """Obtain the current transaction instance or None."""
1697
1700
1698 def transaction(desc, report=None):
1701 def transaction(desc, report=None):
1699 """Open a new transaction to write to the repository."""
1702 """Open a new transaction to write to the repository."""
1700
1703
1701 def undofiles():
1704 def undofiles():
1702 """Returns a list of (vfs, path) for files to undo transactions."""
1705 """Returns a list of (vfs, path) for files to undo transactions."""
1703
1706
1704 def recover():
1707 def recover():
1705 """Roll back an interrupted transaction."""
1708 """Roll back an interrupted transaction."""
1706
1709
1707 def rollback(dryrun=False, force=False):
1710 def rollback(dryrun=False, force=False):
1708 """Undo the last transaction.
1711 """Undo the last transaction.
1709
1712
1710 DANGEROUS.
1713 DANGEROUS.
1711 """
1714 """
1712
1715
1713 def updatecaches(tr=None, full=False):
1716 def updatecaches(tr=None, full=False):
1714 """Warm repo caches."""
1717 """Warm repo caches."""
1715
1718
1716 def invalidatecaches():
1719 def invalidatecaches():
1717 """Invalidate cached data due to the repository mutating."""
1720 """Invalidate cached data due to the repository mutating."""
1718
1721
1719 def invalidatevolatilesets():
1722 def invalidatevolatilesets():
1720 pass
1723 pass
1721
1724
1722 def invalidatedirstate():
1725 def invalidatedirstate():
1723 """Invalidate the dirstate."""
1726 """Invalidate the dirstate."""
1724
1727
1725 def invalidate(clearfilecache=False):
1728 def invalidate(clearfilecache=False):
1726 pass
1729 pass
1727
1730
1728 def invalidateall():
1731 def invalidateall():
1729 pass
1732 pass
1730
1733
1731 def lock(wait=True):
1734 def lock(wait=True):
1732 """Lock the repository store and return a lock instance."""
1735 """Lock the repository store and return a lock instance."""
1733
1736
1734 def wlock(wait=True):
1737 def wlock(wait=True):
1735 """Lock the non-store parts of the repository."""
1738 """Lock the non-store parts of the repository."""
1736
1739
1737 def currentwlock():
1740 def currentwlock():
1738 """Return the wlock if it's held or None."""
1741 """Return the wlock if it's held or None."""
1739
1742
1740 def checkcommitpatterns(wctx, match, status, fail):
1743 def checkcommitpatterns(wctx, match, status, fail):
1741 pass
1744 pass
1742
1745
1743 def commit(
1746 def commit(
1744 text=b'',
1747 text=b'',
1745 user=None,
1748 user=None,
1746 date=None,
1749 date=None,
1747 match=None,
1750 match=None,
1748 force=False,
1751 force=False,
1749 editor=False,
1752 editor=False,
1750 extra=None,
1753 extra=None,
1751 ):
1754 ):
1752 """Add a new revision to the repository."""
1755 """Add a new revision to the repository."""
1753
1756
1754 def commitctx(ctx, error=False, origctx=None):
1757 def commitctx(ctx, error=False, origctx=None):
1755 """Commit a commitctx instance to the repository."""
1758 """Commit a commitctx instance to the repository."""
1756
1759
1757 def destroying():
1760 def destroying():
1758 """Inform the repository that nodes are about to be destroyed."""
1761 """Inform the repository that nodes are about to be destroyed."""
1759
1762
1760 def destroyed():
1763 def destroyed():
1761 """Inform the repository that nodes have been destroyed."""
1764 """Inform the repository that nodes have been destroyed."""
1762
1765
1763 def status(
1766 def status(
1764 node1=b'.',
1767 node1=b'.',
1765 node2=None,
1768 node2=None,
1766 match=None,
1769 match=None,
1767 ignored=False,
1770 ignored=False,
1768 clean=False,
1771 clean=False,
1769 unknown=False,
1772 unknown=False,
1770 listsubrepos=False,
1773 listsubrepos=False,
1771 ):
1774 ):
1772 """Convenience method to call repo[x].status()."""
1775 """Convenience method to call repo[x].status()."""
1773
1776
1774 def addpostdsstatus(ps):
1777 def addpostdsstatus(ps):
1775 pass
1778 pass
1776
1779
1777 def postdsstatus():
1780 def postdsstatus():
1778 pass
1781 pass
1779
1782
1780 def clearpostdsstatus():
1783 def clearpostdsstatus():
1781 pass
1784 pass
1782
1785
1783 def heads(start=None):
1786 def heads(start=None):
1784 """Obtain list of nodes that are DAG heads."""
1787 """Obtain list of nodes that are DAG heads."""
1785
1788
1786 def branchheads(branch=None, start=None, closed=False):
1789 def branchheads(branch=None, start=None, closed=False):
1787 pass
1790 pass
1788
1791
1789 def branches(nodes):
1792 def branches(nodes):
1790 pass
1793 pass
1791
1794
1792 def between(pairs):
1795 def between(pairs):
1793 pass
1796 pass
1794
1797
1795 def checkpush(pushop):
1798 def checkpush(pushop):
1796 pass
1799 pass
1797
1800
1798 prepushoutgoinghooks = interfaceutil.Attribute("""util.hooks instance.""")
1801 prepushoutgoinghooks = interfaceutil.Attribute("""util.hooks instance.""")
1799
1802
1800 def pushkey(namespace, key, old, new):
1803 def pushkey(namespace, key, old, new):
1801 pass
1804 pass
1802
1805
1803 def listkeys(namespace):
1806 def listkeys(namespace):
1804 pass
1807 pass
1805
1808
1806 def debugwireargs(one, two, three=None, four=None, five=None):
1809 def debugwireargs(one, two, three=None, four=None, five=None):
1807 pass
1810 pass
1808
1811
1809 def savecommitmessage(text):
1812 def savecommitmessage(text):
1810 pass
1813 pass
1811
1814
1812
1815
1813 class completelocalrepository(
1816 class completelocalrepository(
1814 ilocalrepositorymain, ilocalrepositoryfilestorage
1817 ilocalrepositorymain, ilocalrepositoryfilestorage
1815 ):
1818 ):
1816 """Complete interface for a local repository."""
1819 """Complete interface for a local repository."""
1817
1820
1818
1821
1819 class iwireprotocolcommandcacher(interfaceutil.Interface):
1822 class iwireprotocolcommandcacher(interfaceutil.Interface):
1820 """Represents a caching backend for wire protocol commands.
1823 """Represents a caching backend for wire protocol commands.
1821
1824
1822 Wire protocol version 2 supports transparent caching of many commands.
1825 Wire protocol version 2 supports transparent caching of many commands.
1823 To leverage this caching, servers can activate objects that cache
1826 To leverage this caching, servers can activate objects that cache
1824 command responses. Objects handle both cache writing and reading.
1827 command responses. Objects handle both cache writing and reading.
1825 This interface defines how that response caching mechanism works.
1828 This interface defines how that response caching mechanism works.
1826
1829
1827 Wire protocol version 2 commands emit a series of objects that are
1830 Wire protocol version 2 commands emit a series of objects that are
1828 serialized and sent to the client. The caching layer exists between
1831 serialized and sent to the client. The caching layer exists between
1829 the invocation of the command function and the sending of its output
1832 the invocation of the command function and the sending of its output
1830 objects to an output layer.
1833 objects to an output layer.
1831
1834
1832 Instances of this interface represent a binding to a cache that
1835 Instances of this interface represent a binding to a cache that
1833 can serve a response (in place of calling a command function) and/or
1836 can serve a response (in place of calling a command function) and/or
1834 write responses to a cache for subsequent use.
1837 write responses to a cache for subsequent use.
1835
1838
1836 When a command request arrives, the following happens with regards
1839 When a command request arrives, the following happens with regards
1837 to this interface:
1840 to this interface:
1838
1841
1839 1. The server determines whether the command request is cacheable.
1842 1. The server determines whether the command request is cacheable.
1840 2. If it is, an instance of this interface is spawned.
1843 2. If it is, an instance of this interface is spawned.
1841 3. The cacher is activated in a context manager (``__enter__`` is called).
1844 3. The cacher is activated in a context manager (``__enter__`` is called).
1842 4. A cache *key* for that request is derived. This will call the
1845 4. A cache *key* for that request is derived. This will call the
1843 instance's ``adjustcachekeystate()`` method so the derivation
1846 instance's ``adjustcachekeystate()`` method so the derivation
1844 can be influenced.
1847 can be influenced.
1845 5. The cacher is informed of the derived cache key via a call to
1848 5. The cacher is informed of the derived cache key via a call to
1846 ``setcachekey()``.
1849 ``setcachekey()``.
1847 6. The cacher's ``lookup()`` method is called to test for presence of
1850 6. The cacher's ``lookup()`` method is called to test for presence of
1848 the derived key in the cache.
1851 the derived key in the cache.
1849 7. If ``lookup()`` returns a hit, that cached result is used in place
1852 7. If ``lookup()`` returns a hit, that cached result is used in place
1850 of invoking the command function. ``__exit__`` is called and the instance
1853 of invoking the command function. ``__exit__`` is called and the instance
1851 is discarded.
1854 is discarded.
1852 8. The command function is invoked.
1855 8. The command function is invoked.
1853 9. ``onobject()`` is called for each object emitted by the command
1856 9. ``onobject()`` is called for each object emitted by the command
1854 function.
1857 function.
1855 10. After the final object is seen, ``onfinished()`` is called.
1858 10. After the final object is seen, ``onfinished()`` is called.
1856 11. ``__exit__`` is called to signal the end of use of the instance.
1859 11. ``__exit__`` is called to signal the end of use of the instance.
1857
1860
1858 Cache *key* derivation can be influenced by the instance.
1861 Cache *key* derivation can be influenced by the instance.
1859
1862
1860 Cache keys are initially derived by a deterministic representation of
1863 Cache keys are initially derived by a deterministic representation of
1861 the command request. This includes the command name, arguments, protocol
1864 the command request. This includes the command name, arguments, protocol
1862 version, etc. This initial key derivation is performed by CBOR-encoding a
1865 version, etc. This initial key derivation is performed by CBOR-encoding a
1863 data structure and feeding that output into a hasher.
1866 data structure and feeding that output into a hasher.
1864
1867
1865 Instances of this interface can influence this initial key derivation
1868 Instances of this interface can influence this initial key derivation
1866 via ``adjustcachekeystate()``.
1869 via ``adjustcachekeystate()``.
1867
1870
1868 The instance is informed of the derived cache key via a call to
1871 The instance is informed of the derived cache key via a call to
1869 ``setcachekey()``. The instance must store the key locally so it can
1872 ``setcachekey()``. The instance must store the key locally so it can
1870 be consulted on subsequent operations that may require it.
1873 be consulted on subsequent operations that may require it.
1871
1874
1872 When constructed, the instance has access to a callable that can be used
1875 When constructed, the instance has access to a callable that can be used
1873 for encoding response objects. This callable receives as its single
1876 for encoding response objects. This callable receives as its single
1874 argument an object emitted by a command function. It returns an iterable
1877 argument an object emitted by a command function. It returns an iterable
1875 of bytes chunks representing the encoded object. Unless the cacher is
1878 of bytes chunks representing the encoded object. Unless the cacher is
1876 caching native Python objects in memory or has a way of reconstructing
1879 caching native Python objects in memory or has a way of reconstructing
1877 the original Python objects, implementations typically call this function
1880 the original Python objects, implementations typically call this function
1878 to produce bytes from the output objects and then store those bytes in
1881 to produce bytes from the output objects and then store those bytes in
1879 the cache. When it comes time to re-emit those bytes, they are wrapped
1882 the cache. When it comes time to re-emit those bytes, they are wrapped
1880 in a ``wireprototypes.encodedresponse`` instance to tell the output
1883 in a ``wireprototypes.encodedresponse`` instance to tell the output
1881 layer that they are pre-encoded.
1884 layer that they are pre-encoded.
1882
1885
1883 When receiving the objects emitted by the command function, instances
1886 When receiving the objects emitted by the command function, instances
1884 can choose what to do with those objects. The simplest thing to do is
1887 can choose what to do with those objects. The simplest thing to do is
1885 re-emit the original objects. They will be forwarded to the output
1888 re-emit the original objects. They will be forwarded to the output
1886 layer and will be processed as if the cacher did not exist.
1889 layer and will be processed as if the cacher did not exist.
1887
1890
1888 Implementations could also choose to not emit objects - instead locally
1891 Implementations could also choose to not emit objects - instead locally
1889 buffering objects or their encoded representation. They could then emit
1892 buffering objects or their encoded representation. They could then emit
1890 a single "coalesced" object when ``onfinished()`` is called. In
1893 a single "coalesced" object when ``onfinished()`` is called. In
1891 this way, the implementation would function as a filtering layer of
1894 this way, the implementation would function as a filtering layer of
1892 sorts.
1895 sorts.
1893
1896
1894 When caching objects, typically the encoded form of the object will
1897 When caching objects, typically the encoded form of the object will
1895 be stored. Keep in mind that if the original object is forwarded to
1898 be stored. Keep in mind that if the original object is forwarded to
1896 the output layer, it will need to be encoded there as well. For large
1899 the output layer, it will need to be encoded there as well. For large
1897 output, this redundant encoding could add overhead. Implementations
1900 output, this redundant encoding could add overhead. Implementations
1898 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1901 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1899 instances to avoid this overhead.
1902 instances to avoid this overhead.
1900 """
1903 """
1901
1904
1902 def __enter__():
1905 def __enter__():
1903 """Marks the instance as active.
1906 """Marks the instance as active.
1904
1907
1905 Should return self.
1908 Should return self.
1906 """
1909 """
1907
1910
1908 def __exit__(exctype, excvalue, exctb):
1911 def __exit__(exctype, excvalue, exctb):
1909 """Called when cacher is no longer used.
1912 """Called when cacher is no longer used.
1910
1913
1911 This can be used by implementations to perform cleanup actions (e.g.
1914 This can be used by implementations to perform cleanup actions (e.g.
1912 disconnecting network sockets, aborting a partially cached response.
1915 disconnecting network sockets, aborting a partially cached response.
1913 """
1916 """
1914
1917
1915 def adjustcachekeystate(state):
1918 def adjustcachekeystate(state):
1916 """Influences cache key derivation by adjusting state to derive key.
1919 """Influences cache key derivation by adjusting state to derive key.
1917
1920
1918 A dict defining the state used to derive the cache key is passed.
1921 A dict defining the state used to derive the cache key is passed.
1919
1922
1920 Implementations can modify this dict to record additional state that
1923 Implementations can modify this dict to record additional state that
1921 is wanted to influence key derivation.
1924 is wanted to influence key derivation.
1922
1925
1923 Implementations are *highly* encouraged to not modify or delete
1926 Implementations are *highly* encouraged to not modify or delete
1924 existing keys.
1927 existing keys.
1925 """
1928 """
1926
1929
1927 def setcachekey(key):
1930 def setcachekey(key):
1928 """Record the derived cache key for this request.
1931 """Record the derived cache key for this request.
1929
1932
1930 Instances may mutate the key for internal usage, as desired. e.g.
1933 Instances may mutate the key for internal usage, as desired. e.g.
1931 instances may wish to prepend the repo name, introduce path
1934 instances may wish to prepend the repo name, introduce path
1932 components for filesystem or URL addressing, etc. Behavior is up to
1935 components for filesystem or URL addressing, etc. Behavior is up to
1933 the cache.
1936 the cache.
1934
1937
1935 Returns a bool indicating if the request is cacheable by this
1938 Returns a bool indicating if the request is cacheable by this
1936 instance.
1939 instance.
1937 """
1940 """
1938
1941
1939 def lookup():
1942 def lookup():
1940 """Attempt to resolve an entry in the cache.
1943 """Attempt to resolve an entry in the cache.
1941
1944
1942 The instance is instructed to look for the cache key that it was
1945 The instance is instructed to look for the cache key that it was
1943 informed about via the call to ``setcachekey()``.
1946 informed about via the call to ``setcachekey()``.
1944
1947
1945 If there's no cache hit or the cacher doesn't wish to use the cached
1948 If there's no cache hit or the cacher doesn't wish to use the cached
1946 entry, ``None`` should be returned.
1949 entry, ``None`` should be returned.
1947
1950
1948 Else, a dict defining the cached result should be returned. The
1951 Else, a dict defining the cached result should be returned. The
1949 dict may have the following keys:
1952 dict may have the following keys:
1950
1953
1951 objs
1954 objs
1952 An iterable of objects that should be sent to the client. That
1955 An iterable of objects that should be sent to the client. That
1953 iterable of objects is expected to be what the command function
1956 iterable of objects is expected to be what the command function
1954 would return if invoked or an equivalent representation thereof.
1957 would return if invoked or an equivalent representation thereof.
1955 """
1958 """
1956
1959
1957 def onobject(obj):
1960 def onobject(obj):
1958 """Called when a new object is emitted from the command function.
1961 """Called when a new object is emitted from the command function.
1959
1962
1960 Receives as its argument the object that was emitted from the
1963 Receives as its argument the object that was emitted from the
1961 command function.
1964 command function.
1962
1965
1963 This method returns an iterator of objects to forward to the output
1966 This method returns an iterator of objects to forward to the output
1964 layer. The easiest implementation is a generator that just
1967 layer. The easiest implementation is a generator that just
1965 ``yield obj``.
1968 ``yield obj``.
1966 """
1969 """
1967
1970
1968 def onfinished():
1971 def onfinished():
1969 """Called after all objects have been emitted from the command function.
1972 """Called after all objects have been emitted from the command function.
1970
1973
1971 Implementations should return an iterator of objects to forward to
1974 Implementations should return an iterator of objects to forward to
1972 the output layer.
1975 the output layer.
1973
1976
1974 This method can be a generator.
1977 This method can be a generator.
1975 """
1978 """
@@ -1,3530 +1,3530 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import random
12 import random
13 import sys
13 import sys
14 import time
14 import time
15 import weakref
15 import weakref
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 bin,
19 bin,
20 hex,
20 hex,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 )
24 )
25 from .pycompat import (
25 from .pycompat import (
26 delattr,
26 delattr,
27 getattr,
27 getattr,
28 )
28 )
29 from . import (
29 from . import (
30 bookmarks,
30 bookmarks,
31 branchmap,
31 branchmap,
32 bundle2,
32 bundle2,
33 changegroup,
33 changegroup,
34 color,
34 color,
35 commit,
35 commit,
36 context,
36 context,
37 dirstate,
37 dirstate,
38 dirstateguard,
38 dirstateguard,
39 discovery,
39 discovery,
40 encoding,
40 encoding,
41 error,
41 error,
42 exchange,
42 exchange,
43 extensions,
43 extensions,
44 filelog,
44 filelog,
45 hook,
45 hook,
46 lock as lockmod,
46 lock as lockmod,
47 match as matchmod,
47 match as matchmod,
48 mergestate as mergestatemod,
48 mergestate as mergestatemod,
49 mergeutil,
49 mergeutil,
50 namespaces,
50 namespaces,
51 narrowspec,
51 narrowspec,
52 obsolete,
52 obsolete,
53 pathutil,
53 pathutil,
54 phases,
54 phases,
55 pushkey,
55 pushkey,
56 pycompat,
56 pycompat,
57 rcutil,
57 rcutil,
58 repoview,
58 repoview,
59 revset,
59 revset,
60 revsetlang,
60 revsetlang,
61 scmutil,
61 scmutil,
62 sparse,
62 sparse,
63 store as storemod,
63 store as storemod,
64 subrepoutil,
64 subrepoutil,
65 tags as tagsmod,
65 tags as tagsmod,
66 transaction,
66 transaction,
67 txnutil,
67 txnutil,
68 util,
68 util,
69 vfs as vfsmod,
69 vfs as vfsmod,
70 )
70 )
71
71
72 from .interfaces import (
72 from .interfaces import (
73 repository,
73 repository,
74 util as interfaceutil,
74 util as interfaceutil,
75 )
75 )
76
76
77 from .utils import (
77 from .utils import (
78 hashutil,
78 hashutil,
79 procutil,
79 procutil,
80 stringutil,
80 stringutil,
81 )
81 )
82
82
83 from .revlogutils import constants as revlogconst
83 from .revlogutils import constants as revlogconst
84
84
85 release = lockmod.release
85 release = lockmod.release
86 urlerr = util.urlerr
86 urlerr = util.urlerr
87 urlreq = util.urlreq
87 urlreq = util.urlreq
88
88
89 # set of (path, vfs-location) tuples. vfs-location is:
89 # set of (path, vfs-location) tuples. vfs-location is:
90 # - 'plain for vfs relative paths
90 # - 'plain for vfs relative paths
91 # - '' for svfs relative paths
91 # - '' for svfs relative paths
92 _cachedfiles = set()
92 _cachedfiles = set()
93
93
94
94
95 class _basefilecache(scmutil.filecache):
95 class _basefilecache(scmutil.filecache):
96 """All filecache usage on repo are done for logic that should be unfiltered
96 """All filecache usage on repo are done for logic that should be unfiltered
97 """
97 """
98
98
99 def __get__(self, repo, type=None):
99 def __get__(self, repo, type=None):
100 if repo is None:
100 if repo is None:
101 return self
101 return self
102 # proxy to unfiltered __dict__ since filtered repo has no entry
102 # proxy to unfiltered __dict__ since filtered repo has no entry
103 unfi = repo.unfiltered()
103 unfi = repo.unfiltered()
104 try:
104 try:
105 return unfi.__dict__[self.sname]
105 return unfi.__dict__[self.sname]
106 except KeyError:
106 except KeyError:
107 pass
107 pass
108 return super(_basefilecache, self).__get__(unfi, type)
108 return super(_basefilecache, self).__get__(unfi, type)
109
109
110 def set(self, repo, value):
110 def set(self, repo, value):
111 return super(_basefilecache, self).set(repo.unfiltered(), value)
111 return super(_basefilecache, self).set(repo.unfiltered(), value)
112
112
113
113
114 class repofilecache(_basefilecache):
114 class repofilecache(_basefilecache):
115 """filecache for files in .hg but outside of .hg/store"""
115 """filecache for files in .hg but outside of .hg/store"""
116
116
117 def __init__(self, *paths):
117 def __init__(self, *paths):
118 super(repofilecache, self).__init__(*paths)
118 super(repofilecache, self).__init__(*paths)
119 for path in paths:
119 for path in paths:
120 _cachedfiles.add((path, b'plain'))
120 _cachedfiles.add((path, b'plain'))
121
121
122 def join(self, obj, fname):
122 def join(self, obj, fname):
123 return obj.vfs.join(fname)
123 return obj.vfs.join(fname)
124
124
125
125
126 class storecache(_basefilecache):
126 class storecache(_basefilecache):
127 """filecache for files in the store"""
127 """filecache for files in the store"""
128
128
129 def __init__(self, *paths):
129 def __init__(self, *paths):
130 super(storecache, self).__init__(*paths)
130 super(storecache, self).__init__(*paths)
131 for path in paths:
131 for path in paths:
132 _cachedfiles.add((path, b''))
132 _cachedfiles.add((path, b''))
133
133
134 def join(self, obj, fname):
134 def join(self, obj, fname):
135 return obj.sjoin(fname)
135 return obj.sjoin(fname)
136
136
137
137
138 class mixedrepostorecache(_basefilecache):
138 class mixedrepostorecache(_basefilecache):
139 """filecache for a mix files in .hg/store and outside"""
139 """filecache for a mix files in .hg/store and outside"""
140
140
141 def __init__(self, *pathsandlocations):
141 def __init__(self, *pathsandlocations):
142 # scmutil.filecache only uses the path for passing back into our
142 # scmutil.filecache only uses the path for passing back into our
143 # join(), so we can safely pass a list of paths and locations
143 # join(), so we can safely pass a list of paths and locations
144 super(mixedrepostorecache, self).__init__(*pathsandlocations)
144 super(mixedrepostorecache, self).__init__(*pathsandlocations)
145 _cachedfiles.update(pathsandlocations)
145 _cachedfiles.update(pathsandlocations)
146
146
147 def join(self, obj, fnameandlocation):
147 def join(self, obj, fnameandlocation):
148 fname, location = fnameandlocation
148 fname, location = fnameandlocation
149 if location == b'plain':
149 if location == b'plain':
150 return obj.vfs.join(fname)
150 return obj.vfs.join(fname)
151 else:
151 else:
152 if location != b'':
152 if location != b'':
153 raise error.ProgrammingError(
153 raise error.ProgrammingError(
154 b'unexpected location: %s' % location
154 b'unexpected location: %s' % location
155 )
155 )
156 return obj.sjoin(fname)
156 return obj.sjoin(fname)
157
157
158
158
159 def isfilecached(repo, name):
159 def isfilecached(repo, name):
160 """check if a repo has already cached "name" filecache-ed property
160 """check if a repo has already cached "name" filecache-ed property
161
161
162 This returns (cachedobj-or-None, iscached) tuple.
162 This returns (cachedobj-or-None, iscached) tuple.
163 """
163 """
164 cacheentry = repo.unfiltered()._filecache.get(name, None)
164 cacheentry = repo.unfiltered()._filecache.get(name, None)
165 if not cacheentry:
165 if not cacheentry:
166 return None, False
166 return None, False
167 return cacheentry.obj, True
167 return cacheentry.obj, True
168
168
169
169
170 class unfilteredpropertycache(util.propertycache):
170 class unfilteredpropertycache(util.propertycache):
171 """propertycache that apply to unfiltered repo only"""
171 """propertycache that apply to unfiltered repo only"""
172
172
173 def __get__(self, repo, type=None):
173 def __get__(self, repo, type=None):
174 unfi = repo.unfiltered()
174 unfi = repo.unfiltered()
175 if unfi is repo:
175 if unfi is repo:
176 return super(unfilteredpropertycache, self).__get__(unfi)
176 return super(unfilteredpropertycache, self).__get__(unfi)
177 return getattr(unfi, self.name)
177 return getattr(unfi, self.name)
178
178
179
179
180 class filteredpropertycache(util.propertycache):
180 class filteredpropertycache(util.propertycache):
181 """propertycache that must take filtering in account"""
181 """propertycache that must take filtering in account"""
182
182
183 def cachevalue(self, obj, value):
183 def cachevalue(self, obj, value):
184 object.__setattr__(obj, self.name, value)
184 object.__setattr__(obj, self.name, value)
185
185
186
186
187 def hasunfilteredcache(repo, name):
187 def hasunfilteredcache(repo, name):
188 """check if a repo has an unfilteredpropertycache value for <name>"""
188 """check if a repo has an unfilteredpropertycache value for <name>"""
189 return name in vars(repo.unfiltered())
189 return name in vars(repo.unfiltered())
190
190
191
191
192 def unfilteredmethod(orig):
192 def unfilteredmethod(orig):
193 """decorate method that always need to be run on unfiltered version"""
193 """decorate method that always need to be run on unfiltered version"""
194
194
195 def wrapper(repo, *args, **kwargs):
195 def wrapper(repo, *args, **kwargs):
196 return orig(repo.unfiltered(), *args, **kwargs)
196 return orig(repo.unfiltered(), *args, **kwargs)
197
197
198 return wrapper
198 return wrapper
199
199
200
200
201 moderncaps = {
201 moderncaps = {
202 b'lookup',
202 b'lookup',
203 b'branchmap',
203 b'branchmap',
204 b'pushkey',
204 b'pushkey',
205 b'known',
205 b'known',
206 b'getbundle',
206 b'getbundle',
207 b'unbundle',
207 b'unbundle',
208 }
208 }
209 legacycaps = moderncaps.union({b'changegroupsubset'})
209 legacycaps = moderncaps.union({b'changegroupsubset'})
210
210
211
211
212 @interfaceutil.implementer(repository.ipeercommandexecutor)
212 @interfaceutil.implementer(repository.ipeercommandexecutor)
213 class localcommandexecutor(object):
213 class localcommandexecutor(object):
214 def __init__(self, peer):
214 def __init__(self, peer):
215 self._peer = peer
215 self._peer = peer
216 self._sent = False
216 self._sent = False
217 self._closed = False
217 self._closed = False
218
218
219 def __enter__(self):
219 def __enter__(self):
220 return self
220 return self
221
221
222 def __exit__(self, exctype, excvalue, exctb):
222 def __exit__(self, exctype, excvalue, exctb):
223 self.close()
223 self.close()
224
224
225 def callcommand(self, command, args):
225 def callcommand(self, command, args):
226 if self._sent:
226 if self._sent:
227 raise error.ProgrammingError(
227 raise error.ProgrammingError(
228 b'callcommand() cannot be used after sendcommands()'
228 b'callcommand() cannot be used after sendcommands()'
229 )
229 )
230
230
231 if self._closed:
231 if self._closed:
232 raise error.ProgrammingError(
232 raise error.ProgrammingError(
233 b'callcommand() cannot be used after close()'
233 b'callcommand() cannot be used after close()'
234 )
234 )
235
235
236 # We don't need to support anything fancy. Just call the named
236 # We don't need to support anything fancy. Just call the named
237 # method on the peer and return a resolved future.
237 # method on the peer and return a resolved future.
238 fn = getattr(self._peer, pycompat.sysstr(command))
238 fn = getattr(self._peer, pycompat.sysstr(command))
239
239
240 f = pycompat.futures.Future()
240 f = pycompat.futures.Future()
241
241
242 try:
242 try:
243 result = fn(**pycompat.strkwargs(args))
243 result = fn(**pycompat.strkwargs(args))
244 except Exception:
244 except Exception:
245 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
245 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
246 else:
246 else:
247 f.set_result(result)
247 f.set_result(result)
248
248
249 return f
249 return f
250
250
251 def sendcommands(self):
251 def sendcommands(self):
252 self._sent = True
252 self._sent = True
253
253
254 def close(self):
254 def close(self):
255 self._closed = True
255 self._closed = True
256
256
257
257
258 @interfaceutil.implementer(repository.ipeercommands)
258 @interfaceutil.implementer(repository.ipeercommands)
259 class localpeer(repository.peer):
259 class localpeer(repository.peer):
260 '''peer for a local repo; reflects only the most recent API'''
260 '''peer for a local repo; reflects only the most recent API'''
261
261
262 def __init__(self, repo, caps=None):
262 def __init__(self, repo, caps=None):
263 super(localpeer, self).__init__()
263 super(localpeer, self).__init__()
264
264
265 if caps is None:
265 if caps is None:
266 caps = moderncaps.copy()
266 caps = moderncaps.copy()
267 self._repo = repo.filtered(b'served')
267 self._repo = repo.filtered(b'served')
268 self.ui = repo.ui
268 self.ui = repo.ui
269 self._caps = repo._restrictcapabilities(caps)
269 self._caps = repo._restrictcapabilities(caps)
270
270
271 # Begin of _basepeer interface.
271 # Begin of _basepeer interface.
272
272
273 def url(self):
273 def url(self):
274 return self._repo.url()
274 return self._repo.url()
275
275
276 def local(self):
276 def local(self):
277 return self._repo
277 return self._repo
278
278
279 def peer(self):
279 def peer(self):
280 return self
280 return self
281
281
282 def canpush(self):
282 def canpush(self):
283 return True
283 return True
284
284
285 def close(self):
285 def close(self):
286 self._repo.close()
286 self._repo.close()
287
287
288 # End of _basepeer interface.
288 # End of _basepeer interface.
289
289
290 # Begin of _basewirecommands interface.
290 # Begin of _basewirecommands interface.
291
291
292 def branchmap(self):
292 def branchmap(self):
293 return self._repo.branchmap()
293 return self._repo.branchmap()
294
294
295 def capabilities(self):
295 def capabilities(self):
296 return self._caps
296 return self._caps
297
297
298 def clonebundles(self):
298 def clonebundles(self):
299 return self._repo.tryread(b'clonebundles.manifest')
299 return self._repo.tryread(b'clonebundles.manifest')
300
300
301 def debugwireargs(self, one, two, three=None, four=None, five=None):
301 def debugwireargs(self, one, two, three=None, four=None, five=None):
302 """Used to test argument passing over the wire"""
302 """Used to test argument passing over the wire"""
303 return b"%s %s %s %s %s" % (
303 return b"%s %s %s %s %s" % (
304 one,
304 one,
305 two,
305 two,
306 pycompat.bytestr(three),
306 pycompat.bytestr(three),
307 pycompat.bytestr(four),
307 pycompat.bytestr(four),
308 pycompat.bytestr(five),
308 pycompat.bytestr(five),
309 )
309 )
310
310
311 def getbundle(
311 def getbundle(
312 self, source, heads=None, common=None, bundlecaps=None, **kwargs
312 self, source, heads=None, common=None, bundlecaps=None, **kwargs
313 ):
313 ):
314 chunks = exchange.getbundlechunks(
314 chunks = exchange.getbundlechunks(
315 self._repo,
315 self._repo,
316 source,
316 source,
317 heads=heads,
317 heads=heads,
318 common=common,
318 common=common,
319 bundlecaps=bundlecaps,
319 bundlecaps=bundlecaps,
320 **kwargs
320 **kwargs
321 )[1]
321 )[1]
322 cb = util.chunkbuffer(chunks)
322 cb = util.chunkbuffer(chunks)
323
323
324 if exchange.bundle2requested(bundlecaps):
324 if exchange.bundle2requested(bundlecaps):
325 # When requesting a bundle2, getbundle returns a stream to make the
325 # When requesting a bundle2, getbundle returns a stream to make the
326 # wire level function happier. We need to build a proper object
326 # wire level function happier. We need to build a proper object
327 # from it in local peer.
327 # from it in local peer.
328 return bundle2.getunbundler(self.ui, cb)
328 return bundle2.getunbundler(self.ui, cb)
329 else:
329 else:
330 return changegroup.getunbundler(b'01', cb, None)
330 return changegroup.getunbundler(b'01', cb, None)
331
331
332 def heads(self):
332 def heads(self):
333 return self._repo.heads()
333 return self._repo.heads()
334
334
335 def known(self, nodes):
335 def known(self, nodes):
336 return self._repo.known(nodes)
336 return self._repo.known(nodes)
337
337
338 def listkeys(self, namespace):
338 def listkeys(self, namespace):
339 return self._repo.listkeys(namespace)
339 return self._repo.listkeys(namespace)
340
340
341 def lookup(self, key):
341 def lookup(self, key):
342 return self._repo.lookup(key)
342 return self._repo.lookup(key)
343
343
344 def pushkey(self, namespace, key, old, new):
344 def pushkey(self, namespace, key, old, new):
345 return self._repo.pushkey(namespace, key, old, new)
345 return self._repo.pushkey(namespace, key, old, new)
346
346
347 def stream_out(self):
347 def stream_out(self):
348 raise error.Abort(_(b'cannot perform stream clone against local peer'))
348 raise error.Abort(_(b'cannot perform stream clone against local peer'))
349
349
350 def unbundle(self, bundle, heads, url):
350 def unbundle(self, bundle, heads, url):
351 """apply a bundle on a repo
351 """apply a bundle on a repo
352
352
353 This function handles the repo locking itself."""
353 This function handles the repo locking itself."""
354 try:
354 try:
355 try:
355 try:
356 bundle = exchange.readbundle(self.ui, bundle, None)
356 bundle = exchange.readbundle(self.ui, bundle, None)
357 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
357 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
358 if util.safehasattr(ret, b'getchunks'):
358 if util.safehasattr(ret, b'getchunks'):
359 # This is a bundle20 object, turn it into an unbundler.
359 # This is a bundle20 object, turn it into an unbundler.
360 # This little dance should be dropped eventually when the
360 # This little dance should be dropped eventually when the
361 # API is finally improved.
361 # API is finally improved.
362 stream = util.chunkbuffer(ret.getchunks())
362 stream = util.chunkbuffer(ret.getchunks())
363 ret = bundle2.getunbundler(self.ui, stream)
363 ret = bundle2.getunbundler(self.ui, stream)
364 return ret
364 return ret
365 except Exception as exc:
365 except Exception as exc:
366 # If the exception contains output salvaged from a bundle2
366 # If the exception contains output salvaged from a bundle2
367 # reply, we need to make sure it is printed before continuing
367 # reply, we need to make sure it is printed before continuing
368 # to fail. So we build a bundle2 with such output and consume
368 # to fail. So we build a bundle2 with such output and consume
369 # it directly.
369 # it directly.
370 #
370 #
371 # This is not very elegant but allows a "simple" solution for
371 # This is not very elegant but allows a "simple" solution for
372 # issue4594
372 # issue4594
373 output = getattr(exc, '_bundle2salvagedoutput', ())
373 output = getattr(exc, '_bundle2salvagedoutput', ())
374 if output:
374 if output:
375 bundler = bundle2.bundle20(self._repo.ui)
375 bundler = bundle2.bundle20(self._repo.ui)
376 for out in output:
376 for out in output:
377 bundler.addpart(out)
377 bundler.addpart(out)
378 stream = util.chunkbuffer(bundler.getchunks())
378 stream = util.chunkbuffer(bundler.getchunks())
379 b = bundle2.getunbundler(self.ui, stream)
379 b = bundle2.getunbundler(self.ui, stream)
380 bundle2.processbundle(self._repo, b)
380 bundle2.processbundle(self._repo, b)
381 raise
381 raise
382 except error.PushRaced as exc:
382 except error.PushRaced as exc:
383 raise error.ResponseError(
383 raise error.ResponseError(
384 _(b'push failed:'), stringutil.forcebytestr(exc)
384 _(b'push failed:'), stringutil.forcebytestr(exc)
385 )
385 )
386
386
387 # End of _basewirecommands interface.
387 # End of _basewirecommands interface.
388
388
389 # Begin of peer interface.
389 # Begin of peer interface.
390
390
391 def commandexecutor(self):
391 def commandexecutor(self):
392 return localcommandexecutor(self)
392 return localcommandexecutor(self)
393
393
394 # End of peer interface.
394 # End of peer interface.
395
395
396
396
397 @interfaceutil.implementer(repository.ipeerlegacycommands)
397 @interfaceutil.implementer(repository.ipeerlegacycommands)
398 class locallegacypeer(localpeer):
398 class locallegacypeer(localpeer):
399 '''peer extension which implements legacy methods too; used for tests with
399 '''peer extension which implements legacy methods too; used for tests with
400 restricted capabilities'''
400 restricted capabilities'''
401
401
402 def __init__(self, repo):
402 def __init__(self, repo):
403 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
403 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
404
404
405 # Begin of baselegacywirecommands interface.
405 # Begin of baselegacywirecommands interface.
406
406
407 def between(self, pairs):
407 def between(self, pairs):
408 return self._repo.between(pairs)
408 return self._repo.between(pairs)
409
409
410 def branches(self, nodes):
410 def branches(self, nodes):
411 return self._repo.branches(nodes)
411 return self._repo.branches(nodes)
412
412
413 def changegroup(self, nodes, source):
413 def changegroup(self, nodes, source):
414 outgoing = discovery.outgoing(
414 outgoing = discovery.outgoing(
415 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
415 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
416 )
416 )
417 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
417 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
418
418
419 def changegroupsubset(self, bases, heads, source):
419 def changegroupsubset(self, bases, heads, source):
420 outgoing = discovery.outgoing(
420 outgoing = discovery.outgoing(
421 self._repo, missingroots=bases, ancestorsof=heads
421 self._repo, missingroots=bases, ancestorsof=heads
422 )
422 )
423 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
423 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
424
424
425 # End of baselegacywirecommands interface.
425 # End of baselegacywirecommands interface.
426
426
427
427
428 # Increment the sub-version when the revlog v2 format changes to lock out old
428 # Increment the sub-version when the revlog v2 format changes to lock out old
429 # clients.
429 # clients.
430 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
430 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
431
431
432 # A repository with the sparserevlog feature will have delta chains that
432 # A repository with the sparserevlog feature will have delta chains that
433 # can spread over a larger span. Sparse reading cuts these large spans into
433 # can spread over a larger span. Sparse reading cuts these large spans into
434 # pieces, so that each piece isn't too big.
434 # pieces, so that each piece isn't too big.
435 # Without the sparserevlog capability, reading from the repository could use
435 # Without the sparserevlog capability, reading from the repository could use
436 # huge amounts of memory, because the whole span would be read at once,
436 # huge amounts of memory, because the whole span would be read at once,
437 # including all the intermediate revisions that aren't pertinent for the chain.
437 # including all the intermediate revisions that aren't pertinent for the chain.
438 # This is why once a repository has enabled sparse-read, it becomes required.
438 # This is why once a repository has enabled sparse-read, it becomes required.
439 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
439 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
440
440
441 # A repository with the sidedataflag requirement will allow to store extra
441 # A repository with the sidedataflag requirement will allow to store extra
442 # information for revision without altering their original hashes.
442 # information for revision without altering their original hashes.
443 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
443 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
444
444
445 # A repository with the the copies-sidedata-changeset requirement will store
445 # A repository with the the copies-sidedata-changeset requirement will store
446 # copies related information in changeset's sidedata.
446 # copies related information in changeset's sidedata.
447 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
447 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
448
448
449 # The repository use persistent nodemap for the changelog and the manifest.
449 # The repository use persistent nodemap for the changelog and the manifest.
450 NODEMAP_REQUIREMENT = b'persistent-nodemap'
450 NODEMAP_REQUIREMENT = b'persistent-nodemap'
451
451
452 # Functions receiving (ui, features) that extensions can register to impact
452 # Functions receiving (ui, features) that extensions can register to impact
453 # the ability to load repositories with custom requirements. Only
453 # the ability to load repositories with custom requirements. Only
454 # functions defined in loaded extensions are called.
454 # functions defined in loaded extensions are called.
455 #
455 #
456 # The function receives a set of requirement strings that the repository
456 # The function receives a set of requirement strings that the repository
457 # is capable of opening. Functions will typically add elements to the
457 # is capable of opening. Functions will typically add elements to the
458 # set to reflect that the extension knows how to handle that requirements.
458 # set to reflect that the extension knows how to handle that requirements.
459 featuresetupfuncs = set()
459 featuresetupfuncs = set()
460
460
461
461
462 def _getsharedvfs(hgvfs, requirements):
462 def _getsharedvfs(hgvfs, requirements):
463 """ returns the vfs object pointing to root of shared source
463 """ returns the vfs object pointing to root of shared source
464 repo for a shared repository
464 repo for a shared repository
465
465
466 hgvfs is vfs pointing at .hg/ of current repo (shared one)
466 hgvfs is vfs pointing at .hg/ of current repo (shared one)
467 requirements is a set of requirements of current repo (shared one)
467 requirements is a set of requirements of current repo (shared one)
468 """
468 """
469 # The ``shared`` or ``relshared`` requirements indicate the
469 # The ``shared`` or ``relshared`` requirements indicate the
470 # store lives in the path contained in the ``.hg/sharedpath`` file.
470 # store lives in the path contained in the ``.hg/sharedpath`` file.
471 # This is an absolute path for ``shared`` and relative to
471 # This is an absolute path for ``shared`` and relative to
472 # ``.hg/`` for ``relshared``.
472 # ``.hg/`` for ``relshared``.
473 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
473 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
474 if b'relshared' in requirements:
474 if b'relshared' in requirements:
475 sharedpath = hgvfs.join(sharedpath)
475 sharedpath = hgvfs.join(sharedpath)
476
476
477 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
477 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
478
478
479 if not sharedvfs.exists():
479 if not sharedvfs.exists():
480 raise error.RepoError(
480 raise error.RepoError(
481 _(b'.hg/sharedpath points to nonexistent directory %s')
481 _(b'.hg/sharedpath points to nonexistent directory %s')
482 % sharedvfs.base
482 % sharedvfs.base
483 )
483 )
484 return sharedvfs
484 return sharedvfs
485
485
486
486
487 def _readrequires(vfs, allowmissing):
487 def _readrequires(vfs, allowmissing):
488 """ reads the require file present at root of this vfs
488 """ reads the require file present at root of this vfs
489 and return a set of requirements
489 and return a set of requirements
490
490
491 If allowmissing is True, we suppress ENOENT if raised"""
491 If allowmissing is True, we suppress ENOENT if raised"""
492 # requires file contains a newline-delimited list of
492 # requires file contains a newline-delimited list of
493 # features/capabilities the opener (us) must have in order to use
493 # features/capabilities the opener (us) must have in order to use
494 # the repository. This file was introduced in Mercurial 0.9.2,
494 # the repository. This file was introduced in Mercurial 0.9.2,
495 # which means very old repositories may not have one. We assume
495 # which means very old repositories may not have one. We assume
496 # a missing file translates to no requirements.
496 # a missing file translates to no requirements.
497 try:
497 try:
498 requirements = set(vfs.read(b'requires').splitlines())
498 requirements = set(vfs.read(b'requires').splitlines())
499 except IOError as e:
499 except IOError as e:
500 if not (allowmissing and e.errno == errno.ENOENT):
500 if not (allowmissing and e.errno == errno.ENOENT):
501 raise
501 raise
502 requirements = set()
502 requirements = set()
503 return requirements
503 return requirements
504
504
505
505
506 def makelocalrepository(baseui, path, intents=None):
506 def makelocalrepository(baseui, path, intents=None):
507 """Create a local repository object.
507 """Create a local repository object.
508
508
509 Given arguments needed to construct a local repository, this function
509 Given arguments needed to construct a local repository, this function
510 performs various early repository loading functionality (such as
510 performs various early repository loading functionality (such as
511 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
511 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
512 the repository can be opened, derives a type suitable for representing
512 the repository can be opened, derives a type suitable for representing
513 that repository, and returns an instance of it.
513 that repository, and returns an instance of it.
514
514
515 The returned object conforms to the ``repository.completelocalrepository``
515 The returned object conforms to the ``repository.completelocalrepository``
516 interface.
516 interface.
517
517
518 The repository type is derived by calling a series of factory functions
518 The repository type is derived by calling a series of factory functions
519 for each aspect/interface of the final repository. These are defined by
519 for each aspect/interface of the final repository. These are defined by
520 ``REPO_INTERFACES``.
520 ``REPO_INTERFACES``.
521
521
522 Each factory function is called to produce a type implementing a specific
522 Each factory function is called to produce a type implementing a specific
523 interface. The cumulative list of returned types will be combined into a
523 interface. The cumulative list of returned types will be combined into a
524 new type and that type will be instantiated to represent the local
524 new type and that type will be instantiated to represent the local
525 repository.
525 repository.
526
526
527 The factory functions each receive various state that may be consulted
527 The factory functions each receive various state that may be consulted
528 as part of deriving a type.
528 as part of deriving a type.
529
529
530 Extensions should wrap these factory functions to customize repository type
530 Extensions should wrap these factory functions to customize repository type
531 creation. Note that an extension's wrapped function may be called even if
531 creation. Note that an extension's wrapped function may be called even if
532 that extension is not loaded for the repo being constructed. Extensions
532 that extension is not loaded for the repo being constructed. Extensions
533 should check if their ``__name__`` appears in the
533 should check if their ``__name__`` appears in the
534 ``extensionmodulenames`` set passed to the factory function and no-op if
534 ``extensionmodulenames`` set passed to the factory function and no-op if
535 not.
535 not.
536 """
536 """
537 ui = baseui.copy()
537 ui = baseui.copy()
538 # Prevent copying repo configuration.
538 # Prevent copying repo configuration.
539 ui.copy = baseui.copy
539 ui.copy = baseui.copy
540
540
541 # Working directory VFS rooted at repository root.
541 # Working directory VFS rooted at repository root.
542 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
542 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
543
543
544 # Main VFS for .hg/ directory.
544 # Main VFS for .hg/ directory.
545 hgpath = wdirvfs.join(b'.hg')
545 hgpath = wdirvfs.join(b'.hg')
546 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
546 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
547 # Whether this repository is shared one or not
547 # Whether this repository is shared one or not
548 shared = False
548 shared = False
549 # If this repository is shared, vfs pointing to shared repo
549 # If this repository is shared, vfs pointing to shared repo
550 sharedvfs = None
550 sharedvfs = None
551
551
552 # The .hg/ path should exist and should be a directory. All other
552 # The .hg/ path should exist and should be a directory. All other
553 # cases are errors.
553 # cases are errors.
554 if not hgvfs.isdir():
554 if not hgvfs.isdir():
555 try:
555 try:
556 hgvfs.stat()
556 hgvfs.stat()
557 except OSError as e:
557 except OSError as e:
558 if e.errno != errno.ENOENT:
558 if e.errno != errno.ENOENT:
559 raise
559 raise
560 except ValueError as e:
560 except ValueError as e:
561 # Can be raised on Python 3.8 when path is invalid.
561 # Can be raised on Python 3.8 when path is invalid.
562 raise error.Abort(
562 raise error.Abort(
563 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
563 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
564 )
564 )
565
565
566 raise error.RepoError(_(b'repository %s not found') % path)
566 raise error.RepoError(_(b'repository %s not found') % path)
567
567
568 requirements = _readrequires(hgvfs, True)
568 requirements = _readrequires(hgvfs, True)
569
569
570 # The .hg/hgrc file may load extensions or contain config options
570 # The .hg/hgrc file may load extensions or contain config options
571 # that influence repository construction. Attempt to load it and
571 # that influence repository construction. Attempt to load it and
572 # process any new extensions that it may have pulled in.
572 # process any new extensions that it may have pulled in.
573 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
573 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
574 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
574 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
575 extensions.loadall(ui)
575 extensions.loadall(ui)
576 extensions.populateui(ui)
576 extensions.populateui(ui)
577
577
578 # Set of module names of extensions loaded for this repository.
578 # Set of module names of extensions loaded for this repository.
579 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
579 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
580
580
581 supportedrequirements = gathersupportedrequirements(ui)
581 supportedrequirements = gathersupportedrequirements(ui)
582
582
583 # We first validate the requirements are known.
583 # We first validate the requirements are known.
584 ensurerequirementsrecognized(requirements, supportedrequirements)
584 ensurerequirementsrecognized(requirements, supportedrequirements)
585
585
586 # Then we validate that the known set is reasonable to use together.
586 # Then we validate that the known set is reasonable to use together.
587 ensurerequirementscompatible(ui, requirements)
587 ensurerequirementscompatible(ui, requirements)
588
588
589 # TODO there are unhandled edge cases related to opening repositories with
589 # TODO there are unhandled edge cases related to opening repositories with
590 # shared storage. If storage is shared, we should also test for requirements
590 # shared storage. If storage is shared, we should also test for requirements
591 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
591 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
592 # that repo, as that repo may load extensions needed to open it. This is a
592 # that repo, as that repo may load extensions needed to open it. This is a
593 # bit complicated because we don't want the other hgrc to overwrite settings
593 # bit complicated because we don't want the other hgrc to overwrite settings
594 # in this hgrc.
594 # in this hgrc.
595 #
595 #
596 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
596 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
597 # file when sharing repos. But if a requirement is added after the share is
597 # file when sharing repos. But if a requirement is added after the share is
598 # performed, thereby introducing a new requirement for the opener, we may
598 # performed, thereby introducing a new requirement for the opener, we may
599 # will not see that and could encounter a run-time error interacting with
599 # will not see that and could encounter a run-time error interacting with
600 # that shared store since it has an unknown-to-us requirement.
600 # that shared store since it has an unknown-to-us requirement.
601
601
602 # At this point, we know we should be capable of opening the repository.
602 # At this point, we know we should be capable of opening the repository.
603 # Now get on with doing that.
603 # Now get on with doing that.
604
604
605 features = set()
605 features = set()
606
606
607 # The "store" part of the repository holds versioned data. How it is
607 # The "store" part of the repository holds versioned data. How it is
608 # accessed is determined by various requirements. If `shared` or
608 # accessed is determined by various requirements. If `shared` or
609 # `relshared` requirements are present, this indicates current repository
609 # `relshared` requirements are present, this indicates current repository
610 # is a share and store exists in path mentioned in `.hg/sharedpath`
610 # is a share and store exists in path mentioned in `.hg/sharedpath`
611 shared = b'shared' in requirements or b'relshared' in requirements
611 shared = b'shared' in requirements or b'relshared' in requirements
612 if shared:
612 if shared:
613 sharedvfs = _getsharedvfs(hgvfs, requirements)
613 sharedvfs = _getsharedvfs(hgvfs, requirements)
614 storebasepath = sharedvfs.base
614 storebasepath = sharedvfs.base
615 cachepath = sharedvfs.join(b'cache')
615 cachepath = sharedvfs.join(b'cache')
616 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
616 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
617 else:
617 else:
618 storebasepath = hgvfs.base
618 storebasepath = hgvfs.base
619 cachepath = hgvfs.join(b'cache')
619 cachepath = hgvfs.join(b'cache')
620 wcachepath = hgvfs.join(b'wcache')
620 wcachepath = hgvfs.join(b'wcache')
621
621
622 # The store has changed over time and the exact layout is dictated by
622 # The store has changed over time and the exact layout is dictated by
623 # requirements. The store interface abstracts differences across all
623 # requirements. The store interface abstracts differences across all
624 # of them.
624 # of them.
625 store = makestore(
625 store = makestore(
626 requirements,
626 requirements,
627 storebasepath,
627 storebasepath,
628 lambda base: vfsmod.vfs(base, cacheaudited=True),
628 lambda base: vfsmod.vfs(base, cacheaudited=True),
629 )
629 )
630 hgvfs.createmode = store.createmode
630 hgvfs.createmode = store.createmode
631
631
632 storevfs = store.vfs
632 storevfs = store.vfs
633 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
633 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
634
634
635 # The cache vfs is used to manage cache files.
635 # The cache vfs is used to manage cache files.
636 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
636 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
637 cachevfs.createmode = store.createmode
637 cachevfs.createmode = store.createmode
638 # The cache vfs is used to manage cache files related to the working copy
638 # The cache vfs is used to manage cache files related to the working copy
639 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
639 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
640 wcachevfs.createmode = store.createmode
640 wcachevfs.createmode = store.createmode
641
641
642 # Now resolve the type for the repository object. We do this by repeatedly
642 # Now resolve the type for the repository object. We do this by repeatedly
643 # calling a factory function to produces types for specific aspects of the
643 # calling a factory function to produces types for specific aspects of the
644 # repo's operation. The aggregate returned types are used as base classes
644 # repo's operation. The aggregate returned types are used as base classes
645 # for a dynamically-derived type, which will represent our new repository.
645 # for a dynamically-derived type, which will represent our new repository.
646
646
647 bases = []
647 bases = []
648 extrastate = {}
648 extrastate = {}
649
649
650 for iface, fn in REPO_INTERFACES:
650 for iface, fn in REPO_INTERFACES:
651 # We pass all potentially useful state to give extensions tons of
651 # We pass all potentially useful state to give extensions tons of
652 # flexibility.
652 # flexibility.
653 typ = fn()(
653 typ = fn()(
654 ui=ui,
654 ui=ui,
655 intents=intents,
655 intents=intents,
656 requirements=requirements,
656 requirements=requirements,
657 features=features,
657 features=features,
658 wdirvfs=wdirvfs,
658 wdirvfs=wdirvfs,
659 hgvfs=hgvfs,
659 hgvfs=hgvfs,
660 store=store,
660 store=store,
661 storevfs=storevfs,
661 storevfs=storevfs,
662 storeoptions=storevfs.options,
662 storeoptions=storevfs.options,
663 cachevfs=cachevfs,
663 cachevfs=cachevfs,
664 wcachevfs=wcachevfs,
664 wcachevfs=wcachevfs,
665 extensionmodulenames=extensionmodulenames,
665 extensionmodulenames=extensionmodulenames,
666 extrastate=extrastate,
666 extrastate=extrastate,
667 baseclasses=bases,
667 baseclasses=bases,
668 )
668 )
669
669
670 if not isinstance(typ, type):
670 if not isinstance(typ, type):
671 raise error.ProgrammingError(
671 raise error.ProgrammingError(
672 b'unable to construct type for %s' % iface
672 b'unable to construct type for %s' % iface
673 )
673 )
674
674
675 bases.append(typ)
675 bases.append(typ)
676
676
677 # type() allows you to use characters in type names that wouldn't be
677 # type() allows you to use characters in type names that wouldn't be
678 # recognized as Python symbols in source code. We abuse that to add
678 # recognized as Python symbols in source code. We abuse that to add
679 # rich information about our constructed repo.
679 # rich information about our constructed repo.
680 name = pycompat.sysstr(
680 name = pycompat.sysstr(
681 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
681 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
682 )
682 )
683
683
684 cls = type(name, tuple(bases), {})
684 cls = type(name, tuple(bases), {})
685
685
686 return cls(
686 return cls(
687 baseui=baseui,
687 baseui=baseui,
688 ui=ui,
688 ui=ui,
689 origroot=path,
689 origroot=path,
690 wdirvfs=wdirvfs,
690 wdirvfs=wdirvfs,
691 hgvfs=hgvfs,
691 hgvfs=hgvfs,
692 requirements=requirements,
692 requirements=requirements,
693 supportedrequirements=supportedrequirements,
693 supportedrequirements=supportedrequirements,
694 sharedpath=storebasepath,
694 sharedpath=storebasepath,
695 store=store,
695 store=store,
696 cachevfs=cachevfs,
696 cachevfs=cachevfs,
697 wcachevfs=wcachevfs,
697 wcachevfs=wcachevfs,
698 features=features,
698 features=features,
699 intents=intents,
699 intents=intents,
700 )
700 )
701
701
702
702
703 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
703 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
704 """Load hgrc files/content into a ui instance.
704 """Load hgrc files/content into a ui instance.
705
705
706 This is called during repository opening to load any additional
706 This is called during repository opening to load any additional
707 config files or settings relevant to the current repository.
707 config files or settings relevant to the current repository.
708
708
709 Returns a bool indicating whether any additional configs were loaded.
709 Returns a bool indicating whether any additional configs were loaded.
710
710
711 Extensions should monkeypatch this function to modify how per-repo
711 Extensions should monkeypatch this function to modify how per-repo
712 configs are loaded. For example, an extension may wish to pull in
712 configs are loaded. For example, an extension may wish to pull in
713 configs from alternate files or sources.
713 configs from alternate files or sources.
714 """
714 """
715 if not rcutil.use_repo_hgrc():
715 if not rcutil.use_repo_hgrc():
716 return False
716 return False
717 try:
717 try:
718 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
718 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
719 return True
719 return True
720 except IOError:
720 except IOError:
721 return False
721 return False
722
722
723
723
724 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
724 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
725 """Perform additional actions after .hg/hgrc is loaded.
725 """Perform additional actions after .hg/hgrc is loaded.
726
726
727 This function is called during repository loading immediately after
727 This function is called during repository loading immediately after
728 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
728 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
729
729
730 The function can be used to validate configs, automatically add
730 The function can be used to validate configs, automatically add
731 options (including extensions) based on requirements, etc.
731 options (including extensions) based on requirements, etc.
732 """
732 """
733
733
734 # Map of requirements to list of extensions to load automatically when
734 # Map of requirements to list of extensions to load automatically when
735 # requirement is present.
735 # requirement is present.
736 autoextensions = {
736 autoextensions = {
737 b'git': [b'git'],
737 b'git': [b'git'],
738 b'largefiles': [b'largefiles'],
738 b'largefiles': [b'largefiles'],
739 b'lfs': [b'lfs'],
739 b'lfs': [b'lfs'],
740 }
740 }
741
741
742 for requirement, names in sorted(autoextensions.items()):
742 for requirement, names in sorted(autoextensions.items()):
743 if requirement not in requirements:
743 if requirement not in requirements:
744 continue
744 continue
745
745
746 for name in names:
746 for name in names:
747 if not ui.hasconfig(b'extensions', name):
747 if not ui.hasconfig(b'extensions', name):
748 ui.setconfig(b'extensions', name, b'', source=b'autoload')
748 ui.setconfig(b'extensions', name, b'', source=b'autoload')
749
749
750
750
751 def gathersupportedrequirements(ui):
751 def gathersupportedrequirements(ui):
752 """Determine the complete set of recognized requirements."""
752 """Determine the complete set of recognized requirements."""
753 # Start with all requirements supported by this file.
753 # Start with all requirements supported by this file.
754 supported = set(localrepository._basesupported)
754 supported = set(localrepository._basesupported)
755
755
756 # Execute ``featuresetupfuncs`` entries if they belong to an extension
756 # Execute ``featuresetupfuncs`` entries if they belong to an extension
757 # relevant to this ui instance.
757 # relevant to this ui instance.
758 modules = {m.__name__ for n, m in extensions.extensions(ui)}
758 modules = {m.__name__ for n, m in extensions.extensions(ui)}
759
759
760 for fn in featuresetupfuncs:
760 for fn in featuresetupfuncs:
761 if fn.__module__ in modules:
761 if fn.__module__ in modules:
762 fn(ui, supported)
762 fn(ui, supported)
763
763
764 # Add derived requirements from registered compression engines.
764 # Add derived requirements from registered compression engines.
765 for name in util.compengines:
765 for name in util.compengines:
766 engine = util.compengines[name]
766 engine = util.compengines[name]
767 if engine.available() and engine.revlogheader():
767 if engine.available() and engine.revlogheader():
768 supported.add(b'exp-compression-%s' % name)
768 supported.add(b'exp-compression-%s' % name)
769 if engine.name() == b'zstd':
769 if engine.name() == b'zstd':
770 supported.add(b'revlog-compression-zstd')
770 supported.add(b'revlog-compression-zstd')
771
771
772 return supported
772 return supported
773
773
774
774
775 def ensurerequirementsrecognized(requirements, supported):
775 def ensurerequirementsrecognized(requirements, supported):
776 """Validate that a set of local requirements is recognized.
776 """Validate that a set of local requirements is recognized.
777
777
778 Receives a set of requirements. Raises an ``error.RepoError`` if there
778 Receives a set of requirements. Raises an ``error.RepoError`` if there
779 exists any requirement in that set that currently loaded code doesn't
779 exists any requirement in that set that currently loaded code doesn't
780 recognize.
780 recognize.
781
781
782 Returns a set of supported requirements.
782 Returns a set of supported requirements.
783 """
783 """
784 missing = set()
784 missing = set()
785
785
786 for requirement in requirements:
786 for requirement in requirements:
787 if requirement in supported:
787 if requirement in supported:
788 continue
788 continue
789
789
790 if not requirement or not requirement[0:1].isalnum():
790 if not requirement or not requirement[0:1].isalnum():
791 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
791 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
792
792
793 missing.add(requirement)
793 missing.add(requirement)
794
794
795 if missing:
795 if missing:
796 raise error.RequirementError(
796 raise error.RequirementError(
797 _(b'repository requires features unknown to this Mercurial: %s')
797 _(b'repository requires features unknown to this Mercurial: %s')
798 % b' '.join(sorted(missing)),
798 % b' '.join(sorted(missing)),
799 hint=_(
799 hint=_(
800 b'see https://mercurial-scm.org/wiki/MissingRequirement '
800 b'see https://mercurial-scm.org/wiki/MissingRequirement '
801 b'for more information'
801 b'for more information'
802 ),
802 ),
803 )
803 )
804
804
805
805
806 def ensurerequirementscompatible(ui, requirements):
806 def ensurerequirementscompatible(ui, requirements):
807 """Validates that a set of recognized requirements is mutually compatible.
807 """Validates that a set of recognized requirements is mutually compatible.
808
808
809 Some requirements may not be compatible with others or require
809 Some requirements may not be compatible with others or require
810 config options that aren't enabled. This function is called during
810 config options that aren't enabled. This function is called during
811 repository opening to ensure that the set of requirements needed
811 repository opening to ensure that the set of requirements needed
812 to open a repository is sane and compatible with config options.
812 to open a repository is sane and compatible with config options.
813
813
814 Extensions can monkeypatch this function to perform additional
814 Extensions can monkeypatch this function to perform additional
815 checking.
815 checking.
816
816
817 ``error.RepoError`` should be raised on failure.
817 ``error.RepoError`` should be raised on failure.
818 """
818 """
819 if b'exp-sparse' in requirements and not sparse.enabled:
819 if repository.SPARSE_REQUIREMENT in requirements and not sparse.enabled:
820 raise error.RepoError(
820 raise error.RepoError(
821 _(
821 _(
822 b'repository is using sparse feature but '
822 b'repository is using sparse feature but '
823 b'sparse is not enabled; enable the '
823 b'sparse is not enabled; enable the '
824 b'"sparse" extensions to access'
824 b'"sparse" extensions to access'
825 )
825 )
826 )
826 )
827
827
828
828
829 def makestore(requirements, path, vfstype):
829 def makestore(requirements, path, vfstype):
830 """Construct a storage object for a repository."""
830 """Construct a storage object for a repository."""
831 if b'store' in requirements:
831 if b'store' in requirements:
832 if b'fncache' in requirements:
832 if b'fncache' in requirements:
833 return storemod.fncachestore(
833 return storemod.fncachestore(
834 path, vfstype, b'dotencode' in requirements
834 path, vfstype, b'dotencode' in requirements
835 )
835 )
836
836
837 return storemod.encodedstore(path, vfstype)
837 return storemod.encodedstore(path, vfstype)
838
838
839 return storemod.basicstore(path, vfstype)
839 return storemod.basicstore(path, vfstype)
840
840
841
841
842 def resolvestorevfsoptions(ui, requirements, features):
842 def resolvestorevfsoptions(ui, requirements, features):
843 """Resolve the options to pass to the store vfs opener.
843 """Resolve the options to pass to the store vfs opener.
844
844
845 The returned dict is used to influence behavior of the storage layer.
845 The returned dict is used to influence behavior of the storage layer.
846 """
846 """
847 options = {}
847 options = {}
848
848
849 if b'treemanifest' in requirements:
849 if b'treemanifest' in requirements:
850 options[b'treemanifest'] = True
850 options[b'treemanifest'] = True
851
851
852 # experimental config: format.manifestcachesize
852 # experimental config: format.manifestcachesize
853 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
853 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
854 if manifestcachesize is not None:
854 if manifestcachesize is not None:
855 options[b'manifestcachesize'] = manifestcachesize
855 options[b'manifestcachesize'] = manifestcachesize
856
856
857 # In the absence of another requirement superseding a revlog-related
857 # In the absence of another requirement superseding a revlog-related
858 # requirement, we have to assume the repo is using revlog version 0.
858 # requirement, we have to assume the repo is using revlog version 0.
859 # This revlog format is super old and we don't bother trying to parse
859 # This revlog format is super old and we don't bother trying to parse
860 # opener options for it because those options wouldn't do anything
860 # opener options for it because those options wouldn't do anything
861 # meaningful on such old repos.
861 # meaningful on such old repos.
862 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
862 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
863 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
863 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
864 else: # explicitly mark repo as using revlogv0
864 else: # explicitly mark repo as using revlogv0
865 options[b'revlogv0'] = True
865 options[b'revlogv0'] = True
866
866
867 if COPIESSDC_REQUIREMENT in requirements:
867 if COPIESSDC_REQUIREMENT in requirements:
868 options[b'copies-storage'] = b'changeset-sidedata'
868 options[b'copies-storage'] = b'changeset-sidedata'
869 else:
869 else:
870 writecopiesto = ui.config(b'experimental', b'copies.write-to')
870 writecopiesto = ui.config(b'experimental', b'copies.write-to')
871 copiesextramode = (b'changeset-only', b'compatibility')
871 copiesextramode = (b'changeset-only', b'compatibility')
872 if writecopiesto in copiesextramode:
872 if writecopiesto in copiesextramode:
873 options[b'copies-storage'] = b'extra'
873 options[b'copies-storage'] = b'extra'
874
874
875 return options
875 return options
876
876
877
877
878 def resolverevlogstorevfsoptions(ui, requirements, features):
878 def resolverevlogstorevfsoptions(ui, requirements, features):
879 """Resolve opener options specific to revlogs."""
879 """Resolve opener options specific to revlogs."""
880
880
881 options = {}
881 options = {}
882 options[b'flagprocessors'] = {}
882 options[b'flagprocessors'] = {}
883
883
884 if b'revlogv1' in requirements:
884 if b'revlogv1' in requirements:
885 options[b'revlogv1'] = True
885 options[b'revlogv1'] = True
886 if REVLOGV2_REQUIREMENT in requirements:
886 if REVLOGV2_REQUIREMENT in requirements:
887 options[b'revlogv2'] = True
887 options[b'revlogv2'] = True
888
888
889 if b'generaldelta' in requirements:
889 if b'generaldelta' in requirements:
890 options[b'generaldelta'] = True
890 options[b'generaldelta'] = True
891
891
892 # experimental config: format.chunkcachesize
892 # experimental config: format.chunkcachesize
893 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
893 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
894 if chunkcachesize is not None:
894 if chunkcachesize is not None:
895 options[b'chunkcachesize'] = chunkcachesize
895 options[b'chunkcachesize'] = chunkcachesize
896
896
897 deltabothparents = ui.configbool(
897 deltabothparents = ui.configbool(
898 b'storage', b'revlog.optimize-delta-parent-choice'
898 b'storage', b'revlog.optimize-delta-parent-choice'
899 )
899 )
900 options[b'deltabothparents'] = deltabothparents
900 options[b'deltabothparents'] = deltabothparents
901
901
902 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
902 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
903 lazydeltabase = False
903 lazydeltabase = False
904 if lazydelta:
904 if lazydelta:
905 lazydeltabase = ui.configbool(
905 lazydeltabase = ui.configbool(
906 b'storage', b'revlog.reuse-external-delta-parent'
906 b'storage', b'revlog.reuse-external-delta-parent'
907 )
907 )
908 if lazydeltabase is None:
908 if lazydeltabase is None:
909 lazydeltabase = not scmutil.gddeltaconfig(ui)
909 lazydeltabase = not scmutil.gddeltaconfig(ui)
910 options[b'lazydelta'] = lazydelta
910 options[b'lazydelta'] = lazydelta
911 options[b'lazydeltabase'] = lazydeltabase
911 options[b'lazydeltabase'] = lazydeltabase
912
912
913 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
913 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
914 if 0 <= chainspan:
914 if 0 <= chainspan:
915 options[b'maxdeltachainspan'] = chainspan
915 options[b'maxdeltachainspan'] = chainspan
916
916
917 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
917 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
918 if mmapindexthreshold is not None:
918 if mmapindexthreshold is not None:
919 options[b'mmapindexthreshold'] = mmapindexthreshold
919 options[b'mmapindexthreshold'] = mmapindexthreshold
920
920
921 withsparseread = ui.configbool(b'experimental', b'sparse-read')
921 withsparseread = ui.configbool(b'experimental', b'sparse-read')
922 srdensitythres = float(
922 srdensitythres = float(
923 ui.config(b'experimental', b'sparse-read.density-threshold')
923 ui.config(b'experimental', b'sparse-read.density-threshold')
924 )
924 )
925 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
925 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
926 options[b'with-sparse-read'] = withsparseread
926 options[b'with-sparse-read'] = withsparseread
927 options[b'sparse-read-density-threshold'] = srdensitythres
927 options[b'sparse-read-density-threshold'] = srdensitythres
928 options[b'sparse-read-min-gap-size'] = srmingapsize
928 options[b'sparse-read-min-gap-size'] = srmingapsize
929
929
930 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
930 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
931 options[b'sparse-revlog'] = sparserevlog
931 options[b'sparse-revlog'] = sparserevlog
932 if sparserevlog:
932 if sparserevlog:
933 options[b'generaldelta'] = True
933 options[b'generaldelta'] = True
934
934
935 sidedata = SIDEDATA_REQUIREMENT in requirements
935 sidedata = SIDEDATA_REQUIREMENT in requirements
936 options[b'side-data'] = sidedata
936 options[b'side-data'] = sidedata
937
937
938 maxchainlen = None
938 maxchainlen = None
939 if sparserevlog:
939 if sparserevlog:
940 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
940 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
941 # experimental config: format.maxchainlen
941 # experimental config: format.maxchainlen
942 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
942 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
943 if maxchainlen is not None:
943 if maxchainlen is not None:
944 options[b'maxchainlen'] = maxchainlen
944 options[b'maxchainlen'] = maxchainlen
945
945
946 for r in requirements:
946 for r in requirements:
947 # we allow multiple compression engine requirement to co-exist because
947 # we allow multiple compression engine requirement to co-exist because
948 # strickly speaking, revlog seems to support mixed compression style.
948 # strickly speaking, revlog seems to support mixed compression style.
949 #
949 #
950 # The compression used for new entries will be "the last one"
950 # The compression used for new entries will be "the last one"
951 prefix = r.startswith
951 prefix = r.startswith
952 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
952 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
953 options[b'compengine'] = r.split(b'-', 2)[2]
953 options[b'compengine'] = r.split(b'-', 2)[2]
954
954
955 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
955 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
956 if options[b'zlib.level'] is not None:
956 if options[b'zlib.level'] is not None:
957 if not (0 <= options[b'zlib.level'] <= 9):
957 if not (0 <= options[b'zlib.level'] <= 9):
958 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
958 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
959 raise error.Abort(msg % options[b'zlib.level'])
959 raise error.Abort(msg % options[b'zlib.level'])
960 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
960 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
961 if options[b'zstd.level'] is not None:
961 if options[b'zstd.level'] is not None:
962 if not (0 <= options[b'zstd.level'] <= 22):
962 if not (0 <= options[b'zstd.level'] <= 22):
963 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
963 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
964 raise error.Abort(msg % options[b'zstd.level'])
964 raise error.Abort(msg % options[b'zstd.level'])
965
965
966 if repository.NARROW_REQUIREMENT in requirements:
966 if repository.NARROW_REQUIREMENT in requirements:
967 options[b'enableellipsis'] = True
967 options[b'enableellipsis'] = True
968
968
969 if ui.configbool(b'experimental', b'rust.index'):
969 if ui.configbool(b'experimental', b'rust.index'):
970 options[b'rust.index'] = True
970 options[b'rust.index'] = True
971 if NODEMAP_REQUIREMENT in requirements:
971 if NODEMAP_REQUIREMENT in requirements:
972 options[b'persistent-nodemap'] = True
972 options[b'persistent-nodemap'] = True
973 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
973 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
974 options[b'persistent-nodemap.mmap'] = True
974 options[b'persistent-nodemap.mmap'] = True
975 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
975 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
976 options[b'persistent-nodemap.mode'] = epnm
976 options[b'persistent-nodemap.mode'] = epnm
977 if ui.configbool(b'devel', b'persistent-nodemap'):
977 if ui.configbool(b'devel', b'persistent-nodemap'):
978 options[b'devel-force-nodemap'] = True
978 options[b'devel-force-nodemap'] = True
979
979
980 return options
980 return options
981
981
982
982
983 def makemain(**kwargs):
983 def makemain(**kwargs):
984 """Produce a type conforming to ``ilocalrepositorymain``."""
984 """Produce a type conforming to ``ilocalrepositorymain``."""
985 return localrepository
985 return localrepository
986
986
987
987
988 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
988 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
989 class revlogfilestorage(object):
989 class revlogfilestorage(object):
990 """File storage when using revlogs."""
990 """File storage when using revlogs."""
991
991
992 def file(self, path):
992 def file(self, path):
993 if path[0] == b'/':
993 if path[0] == b'/':
994 path = path[1:]
994 path = path[1:]
995
995
996 return filelog.filelog(self.svfs, path)
996 return filelog.filelog(self.svfs, path)
997
997
998
998
999 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
999 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1000 class revlognarrowfilestorage(object):
1000 class revlognarrowfilestorage(object):
1001 """File storage when using revlogs and narrow files."""
1001 """File storage when using revlogs and narrow files."""
1002
1002
1003 def file(self, path):
1003 def file(self, path):
1004 if path[0] == b'/':
1004 if path[0] == b'/':
1005 path = path[1:]
1005 path = path[1:]
1006
1006
1007 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1007 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1008
1008
1009
1009
1010 def makefilestorage(requirements, features, **kwargs):
1010 def makefilestorage(requirements, features, **kwargs):
1011 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1011 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1012 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1012 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1013 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1013 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1014
1014
1015 if repository.NARROW_REQUIREMENT in requirements:
1015 if repository.NARROW_REQUIREMENT in requirements:
1016 return revlognarrowfilestorage
1016 return revlognarrowfilestorage
1017 else:
1017 else:
1018 return revlogfilestorage
1018 return revlogfilestorage
1019
1019
1020
1020
1021 # List of repository interfaces and factory functions for them. Each
1021 # List of repository interfaces and factory functions for them. Each
1022 # will be called in order during ``makelocalrepository()`` to iteratively
1022 # will be called in order during ``makelocalrepository()`` to iteratively
1023 # derive the final type for a local repository instance. We capture the
1023 # derive the final type for a local repository instance. We capture the
1024 # function as a lambda so we don't hold a reference and the module-level
1024 # function as a lambda so we don't hold a reference and the module-level
1025 # functions can be wrapped.
1025 # functions can be wrapped.
1026 REPO_INTERFACES = [
1026 REPO_INTERFACES = [
1027 (repository.ilocalrepositorymain, lambda: makemain),
1027 (repository.ilocalrepositorymain, lambda: makemain),
1028 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1028 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1029 ]
1029 ]
1030
1030
1031
1031
1032 @interfaceutil.implementer(repository.ilocalrepositorymain)
1032 @interfaceutil.implementer(repository.ilocalrepositorymain)
1033 class localrepository(object):
1033 class localrepository(object):
1034 """Main class for representing local repositories.
1034 """Main class for representing local repositories.
1035
1035
1036 All local repositories are instances of this class.
1036 All local repositories are instances of this class.
1037
1037
1038 Constructed on its own, instances of this class are not usable as
1038 Constructed on its own, instances of this class are not usable as
1039 repository objects. To obtain a usable repository object, call
1039 repository objects. To obtain a usable repository object, call
1040 ``hg.repository()``, ``localrepo.instance()``, or
1040 ``hg.repository()``, ``localrepo.instance()``, or
1041 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1041 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1042 ``instance()`` adds support for creating new repositories.
1042 ``instance()`` adds support for creating new repositories.
1043 ``hg.repository()`` adds more extension integration, including calling
1043 ``hg.repository()`` adds more extension integration, including calling
1044 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1044 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1045 used.
1045 used.
1046 """
1046 """
1047
1047
1048 # obsolete experimental requirements:
1048 # obsolete experimental requirements:
1049 # - manifestv2: An experimental new manifest format that allowed
1049 # - manifestv2: An experimental new manifest format that allowed
1050 # for stem compression of long paths. Experiment ended up not
1050 # for stem compression of long paths. Experiment ended up not
1051 # being successful (repository sizes went up due to worse delta
1051 # being successful (repository sizes went up due to worse delta
1052 # chains), and the code was deleted in 4.6.
1052 # chains), and the code was deleted in 4.6.
1053 supportedformats = {
1053 supportedformats = {
1054 b'revlogv1',
1054 b'revlogv1',
1055 b'generaldelta',
1055 b'generaldelta',
1056 b'treemanifest',
1056 b'treemanifest',
1057 COPIESSDC_REQUIREMENT,
1057 COPIESSDC_REQUIREMENT,
1058 REVLOGV2_REQUIREMENT,
1058 REVLOGV2_REQUIREMENT,
1059 SIDEDATA_REQUIREMENT,
1059 SIDEDATA_REQUIREMENT,
1060 SPARSEREVLOG_REQUIREMENT,
1060 SPARSEREVLOG_REQUIREMENT,
1061 NODEMAP_REQUIREMENT,
1061 NODEMAP_REQUIREMENT,
1062 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1062 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1063 }
1063 }
1064 _basesupported = supportedformats | {
1064 _basesupported = supportedformats | {
1065 b'store',
1065 b'store',
1066 b'fncache',
1066 b'fncache',
1067 b'shared',
1067 b'shared',
1068 b'relshared',
1068 b'relshared',
1069 b'dotencode',
1069 b'dotencode',
1070 b'exp-sparse',
1070 repository.SPARSE_REQUIREMENT,
1071 b'internal-phase',
1071 b'internal-phase',
1072 }
1072 }
1073
1073
1074 # list of prefix for file which can be written without 'wlock'
1074 # list of prefix for file which can be written without 'wlock'
1075 # Extensions should extend this list when needed
1075 # Extensions should extend this list when needed
1076 _wlockfreeprefix = {
1076 _wlockfreeprefix = {
1077 # We migh consider requiring 'wlock' for the next
1077 # We migh consider requiring 'wlock' for the next
1078 # two, but pretty much all the existing code assume
1078 # two, but pretty much all the existing code assume
1079 # wlock is not needed so we keep them excluded for
1079 # wlock is not needed so we keep them excluded for
1080 # now.
1080 # now.
1081 b'hgrc',
1081 b'hgrc',
1082 b'requires',
1082 b'requires',
1083 # XXX cache is a complicatged business someone
1083 # XXX cache is a complicatged business someone
1084 # should investigate this in depth at some point
1084 # should investigate this in depth at some point
1085 b'cache/',
1085 b'cache/',
1086 # XXX shouldn't be dirstate covered by the wlock?
1086 # XXX shouldn't be dirstate covered by the wlock?
1087 b'dirstate',
1087 b'dirstate',
1088 # XXX bisect was still a bit too messy at the time
1088 # XXX bisect was still a bit too messy at the time
1089 # this changeset was introduced. Someone should fix
1089 # this changeset was introduced. Someone should fix
1090 # the remainig bit and drop this line
1090 # the remainig bit and drop this line
1091 b'bisect.state',
1091 b'bisect.state',
1092 }
1092 }
1093
1093
1094 def __init__(
1094 def __init__(
1095 self,
1095 self,
1096 baseui,
1096 baseui,
1097 ui,
1097 ui,
1098 origroot,
1098 origroot,
1099 wdirvfs,
1099 wdirvfs,
1100 hgvfs,
1100 hgvfs,
1101 requirements,
1101 requirements,
1102 supportedrequirements,
1102 supportedrequirements,
1103 sharedpath,
1103 sharedpath,
1104 store,
1104 store,
1105 cachevfs,
1105 cachevfs,
1106 wcachevfs,
1106 wcachevfs,
1107 features,
1107 features,
1108 intents=None,
1108 intents=None,
1109 ):
1109 ):
1110 """Create a new local repository instance.
1110 """Create a new local repository instance.
1111
1111
1112 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1112 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1113 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1113 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1114 object.
1114 object.
1115
1115
1116 Arguments:
1116 Arguments:
1117
1117
1118 baseui
1118 baseui
1119 ``ui.ui`` instance that ``ui`` argument was based off of.
1119 ``ui.ui`` instance that ``ui`` argument was based off of.
1120
1120
1121 ui
1121 ui
1122 ``ui.ui`` instance for use by the repository.
1122 ``ui.ui`` instance for use by the repository.
1123
1123
1124 origroot
1124 origroot
1125 ``bytes`` path to working directory root of this repository.
1125 ``bytes`` path to working directory root of this repository.
1126
1126
1127 wdirvfs
1127 wdirvfs
1128 ``vfs.vfs`` rooted at the working directory.
1128 ``vfs.vfs`` rooted at the working directory.
1129
1129
1130 hgvfs
1130 hgvfs
1131 ``vfs.vfs`` rooted at .hg/
1131 ``vfs.vfs`` rooted at .hg/
1132
1132
1133 requirements
1133 requirements
1134 ``set`` of bytestrings representing repository opening requirements.
1134 ``set`` of bytestrings representing repository opening requirements.
1135
1135
1136 supportedrequirements
1136 supportedrequirements
1137 ``set`` of bytestrings representing repository requirements that we
1137 ``set`` of bytestrings representing repository requirements that we
1138 know how to open. May be a supetset of ``requirements``.
1138 know how to open. May be a supetset of ``requirements``.
1139
1139
1140 sharedpath
1140 sharedpath
1141 ``bytes`` Defining path to storage base directory. Points to a
1141 ``bytes`` Defining path to storage base directory. Points to a
1142 ``.hg/`` directory somewhere.
1142 ``.hg/`` directory somewhere.
1143
1143
1144 store
1144 store
1145 ``store.basicstore`` (or derived) instance providing access to
1145 ``store.basicstore`` (or derived) instance providing access to
1146 versioned storage.
1146 versioned storage.
1147
1147
1148 cachevfs
1148 cachevfs
1149 ``vfs.vfs`` used for cache files.
1149 ``vfs.vfs`` used for cache files.
1150
1150
1151 wcachevfs
1151 wcachevfs
1152 ``vfs.vfs`` used for cache files related to the working copy.
1152 ``vfs.vfs`` used for cache files related to the working copy.
1153
1153
1154 features
1154 features
1155 ``set`` of bytestrings defining features/capabilities of this
1155 ``set`` of bytestrings defining features/capabilities of this
1156 instance.
1156 instance.
1157
1157
1158 intents
1158 intents
1159 ``set`` of system strings indicating what this repo will be used
1159 ``set`` of system strings indicating what this repo will be used
1160 for.
1160 for.
1161 """
1161 """
1162 self.baseui = baseui
1162 self.baseui = baseui
1163 self.ui = ui
1163 self.ui = ui
1164 self.origroot = origroot
1164 self.origroot = origroot
1165 # vfs rooted at working directory.
1165 # vfs rooted at working directory.
1166 self.wvfs = wdirvfs
1166 self.wvfs = wdirvfs
1167 self.root = wdirvfs.base
1167 self.root = wdirvfs.base
1168 # vfs rooted at .hg/. Used to access most non-store paths.
1168 # vfs rooted at .hg/. Used to access most non-store paths.
1169 self.vfs = hgvfs
1169 self.vfs = hgvfs
1170 self.path = hgvfs.base
1170 self.path = hgvfs.base
1171 self.requirements = requirements
1171 self.requirements = requirements
1172 self.supported = supportedrequirements
1172 self.supported = supportedrequirements
1173 self.sharedpath = sharedpath
1173 self.sharedpath = sharedpath
1174 self.store = store
1174 self.store = store
1175 self.cachevfs = cachevfs
1175 self.cachevfs = cachevfs
1176 self.wcachevfs = wcachevfs
1176 self.wcachevfs = wcachevfs
1177 self.features = features
1177 self.features = features
1178
1178
1179 self.filtername = None
1179 self.filtername = None
1180
1180
1181 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1181 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1182 b'devel', b'check-locks'
1182 b'devel', b'check-locks'
1183 ):
1183 ):
1184 self.vfs.audit = self._getvfsward(self.vfs.audit)
1184 self.vfs.audit = self._getvfsward(self.vfs.audit)
1185 # A list of callback to shape the phase if no data were found.
1185 # A list of callback to shape the phase if no data were found.
1186 # Callback are in the form: func(repo, roots) --> processed root.
1186 # Callback are in the form: func(repo, roots) --> processed root.
1187 # This list it to be filled by extension during repo setup
1187 # This list it to be filled by extension during repo setup
1188 self._phasedefaults = []
1188 self._phasedefaults = []
1189
1189
1190 color.setup(self.ui)
1190 color.setup(self.ui)
1191
1191
1192 self.spath = self.store.path
1192 self.spath = self.store.path
1193 self.svfs = self.store.vfs
1193 self.svfs = self.store.vfs
1194 self.sjoin = self.store.join
1194 self.sjoin = self.store.join
1195 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1195 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1196 b'devel', b'check-locks'
1196 b'devel', b'check-locks'
1197 ):
1197 ):
1198 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1198 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1199 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1199 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1200 else: # standard vfs
1200 else: # standard vfs
1201 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1201 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1202
1202
1203 self._dirstatevalidatewarned = False
1203 self._dirstatevalidatewarned = False
1204
1204
1205 self._branchcaches = branchmap.BranchMapCache()
1205 self._branchcaches = branchmap.BranchMapCache()
1206 self._revbranchcache = None
1206 self._revbranchcache = None
1207 self._filterpats = {}
1207 self._filterpats = {}
1208 self._datafilters = {}
1208 self._datafilters = {}
1209 self._transref = self._lockref = self._wlockref = None
1209 self._transref = self._lockref = self._wlockref = None
1210
1210
1211 # A cache for various files under .hg/ that tracks file changes,
1211 # A cache for various files under .hg/ that tracks file changes,
1212 # (used by the filecache decorator)
1212 # (used by the filecache decorator)
1213 #
1213 #
1214 # Maps a property name to its util.filecacheentry
1214 # Maps a property name to its util.filecacheentry
1215 self._filecache = {}
1215 self._filecache = {}
1216
1216
1217 # hold sets of revision to be filtered
1217 # hold sets of revision to be filtered
1218 # should be cleared when something might have changed the filter value:
1218 # should be cleared when something might have changed the filter value:
1219 # - new changesets,
1219 # - new changesets,
1220 # - phase change,
1220 # - phase change,
1221 # - new obsolescence marker,
1221 # - new obsolescence marker,
1222 # - working directory parent change,
1222 # - working directory parent change,
1223 # - bookmark changes
1223 # - bookmark changes
1224 self.filteredrevcache = {}
1224 self.filteredrevcache = {}
1225
1225
1226 # post-dirstate-status hooks
1226 # post-dirstate-status hooks
1227 self._postdsstatus = []
1227 self._postdsstatus = []
1228
1228
1229 # generic mapping between names and nodes
1229 # generic mapping between names and nodes
1230 self.names = namespaces.namespaces()
1230 self.names = namespaces.namespaces()
1231
1231
1232 # Key to signature value.
1232 # Key to signature value.
1233 self._sparsesignaturecache = {}
1233 self._sparsesignaturecache = {}
1234 # Signature to cached matcher instance.
1234 # Signature to cached matcher instance.
1235 self._sparsematchercache = {}
1235 self._sparsematchercache = {}
1236
1236
1237 self._extrafilterid = repoview.extrafilter(ui)
1237 self._extrafilterid = repoview.extrafilter(ui)
1238
1238
1239 self.filecopiesmode = None
1239 self.filecopiesmode = None
1240 if COPIESSDC_REQUIREMENT in self.requirements:
1240 if COPIESSDC_REQUIREMENT in self.requirements:
1241 self.filecopiesmode = b'changeset-sidedata'
1241 self.filecopiesmode = b'changeset-sidedata'
1242
1242
1243 def _getvfsward(self, origfunc):
1243 def _getvfsward(self, origfunc):
1244 """build a ward for self.vfs"""
1244 """build a ward for self.vfs"""
1245 rref = weakref.ref(self)
1245 rref = weakref.ref(self)
1246
1246
1247 def checkvfs(path, mode=None):
1247 def checkvfs(path, mode=None):
1248 ret = origfunc(path, mode=mode)
1248 ret = origfunc(path, mode=mode)
1249 repo = rref()
1249 repo = rref()
1250 if (
1250 if (
1251 repo is None
1251 repo is None
1252 or not util.safehasattr(repo, b'_wlockref')
1252 or not util.safehasattr(repo, b'_wlockref')
1253 or not util.safehasattr(repo, b'_lockref')
1253 or not util.safehasattr(repo, b'_lockref')
1254 ):
1254 ):
1255 return
1255 return
1256 if mode in (None, b'r', b'rb'):
1256 if mode in (None, b'r', b'rb'):
1257 return
1257 return
1258 if path.startswith(repo.path):
1258 if path.startswith(repo.path):
1259 # truncate name relative to the repository (.hg)
1259 # truncate name relative to the repository (.hg)
1260 path = path[len(repo.path) + 1 :]
1260 path = path[len(repo.path) + 1 :]
1261 if path.startswith(b'cache/'):
1261 if path.startswith(b'cache/'):
1262 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1262 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1263 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1263 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1264 # path prefixes covered by 'lock'
1264 # path prefixes covered by 'lock'
1265 vfs_path_prefixes = (b'journal.', b'undo.', b'strip-backup/')
1265 vfs_path_prefixes = (b'journal.', b'undo.', b'strip-backup/')
1266 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1266 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1267 if repo._currentlock(repo._lockref) is None:
1267 if repo._currentlock(repo._lockref) is None:
1268 repo.ui.develwarn(
1268 repo.ui.develwarn(
1269 b'write with no lock: "%s"' % path,
1269 b'write with no lock: "%s"' % path,
1270 stacklevel=3,
1270 stacklevel=3,
1271 config=b'check-locks',
1271 config=b'check-locks',
1272 )
1272 )
1273 elif repo._currentlock(repo._wlockref) is None:
1273 elif repo._currentlock(repo._wlockref) is None:
1274 # rest of vfs files are covered by 'wlock'
1274 # rest of vfs files are covered by 'wlock'
1275 #
1275 #
1276 # exclude special files
1276 # exclude special files
1277 for prefix in self._wlockfreeprefix:
1277 for prefix in self._wlockfreeprefix:
1278 if path.startswith(prefix):
1278 if path.startswith(prefix):
1279 return
1279 return
1280 repo.ui.develwarn(
1280 repo.ui.develwarn(
1281 b'write with no wlock: "%s"' % path,
1281 b'write with no wlock: "%s"' % path,
1282 stacklevel=3,
1282 stacklevel=3,
1283 config=b'check-locks',
1283 config=b'check-locks',
1284 )
1284 )
1285 return ret
1285 return ret
1286
1286
1287 return checkvfs
1287 return checkvfs
1288
1288
1289 def _getsvfsward(self, origfunc):
1289 def _getsvfsward(self, origfunc):
1290 """build a ward for self.svfs"""
1290 """build a ward for self.svfs"""
1291 rref = weakref.ref(self)
1291 rref = weakref.ref(self)
1292
1292
1293 def checksvfs(path, mode=None):
1293 def checksvfs(path, mode=None):
1294 ret = origfunc(path, mode=mode)
1294 ret = origfunc(path, mode=mode)
1295 repo = rref()
1295 repo = rref()
1296 if repo is None or not util.safehasattr(repo, b'_lockref'):
1296 if repo is None or not util.safehasattr(repo, b'_lockref'):
1297 return
1297 return
1298 if mode in (None, b'r', b'rb'):
1298 if mode in (None, b'r', b'rb'):
1299 return
1299 return
1300 if path.startswith(repo.sharedpath):
1300 if path.startswith(repo.sharedpath):
1301 # truncate name relative to the repository (.hg)
1301 # truncate name relative to the repository (.hg)
1302 path = path[len(repo.sharedpath) + 1 :]
1302 path = path[len(repo.sharedpath) + 1 :]
1303 if repo._currentlock(repo._lockref) is None:
1303 if repo._currentlock(repo._lockref) is None:
1304 repo.ui.develwarn(
1304 repo.ui.develwarn(
1305 b'write with no lock: "%s"' % path, stacklevel=4
1305 b'write with no lock: "%s"' % path, stacklevel=4
1306 )
1306 )
1307 return ret
1307 return ret
1308
1308
1309 return checksvfs
1309 return checksvfs
1310
1310
1311 def close(self):
1311 def close(self):
1312 self._writecaches()
1312 self._writecaches()
1313
1313
1314 def _writecaches(self):
1314 def _writecaches(self):
1315 if self._revbranchcache:
1315 if self._revbranchcache:
1316 self._revbranchcache.write()
1316 self._revbranchcache.write()
1317
1317
1318 def _restrictcapabilities(self, caps):
1318 def _restrictcapabilities(self, caps):
1319 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1319 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1320 caps = set(caps)
1320 caps = set(caps)
1321 capsblob = bundle2.encodecaps(
1321 capsblob = bundle2.encodecaps(
1322 bundle2.getrepocaps(self, role=b'client')
1322 bundle2.getrepocaps(self, role=b'client')
1323 )
1323 )
1324 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1324 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1325 return caps
1325 return caps
1326
1326
1327 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1327 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1328 # self -> auditor -> self._checknested -> self
1328 # self -> auditor -> self._checknested -> self
1329
1329
1330 @property
1330 @property
1331 def auditor(self):
1331 def auditor(self):
1332 # This is only used by context.workingctx.match in order to
1332 # This is only used by context.workingctx.match in order to
1333 # detect files in subrepos.
1333 # detect files in subrepos.
1334 return pathutil.pathauditor(self.root, callback=self._checknested)
1334 return pathutil.pathauditor(self.root, callback=self._checknested)
1335
1335
1336 @property
1336 @property
1337 def nofsauditor(self):
1337 def nofsauditor(self):
1338 # This is only used by context.basectx.match in order to detect
1338 # This is only used by context.basectx.match in order to detect
1339 # files in subrepos.
1339 # files in subrepos.
1340 return pathutil.pathauditor(
1340 return pathutil.pathauditor(
1341 self.root, callback=self._checknested, realfs=False, cached=True
1341 self.root, callback=self._checknested, realfs=False, cached=True
1342 )
1342 )
1343
1343
1344 def _checknested(self, path):
1344 def _checknested(self, path):
1345 """Determine if path is a legal nested repository."""
1345 """Determine if path is a legal nested repository."""
1346 if not path.startswith(self.root):
1346 if not path.startswith(self.root):
1347 return False
1347 return False
1348 subpath = path[len(self.root) + 1 :]
1348 subpath = path[len(self.root) + 1 :]
1349 normsubpath = util.pconvert(subpath)
1349 normsubpath = util.pconvert(subpath)
1350
1350
1351 # XXX: Checking against the current working copy is wrong in
1351 # XXX: Checking against the current working copy is wrong in
1352 # the sense that it can reject things like
1352 # the sense that it can reject things like
1353 #
1353 #
1354 # $ hg cat -r 10 sub/x.txt
1354 # $ hg cat -r 10 sub/x.txt
1355 #
1355 #
1356 # if sub/ is no longer a subrepository in the working copy
1356 # if sub/ is no longer a subrepository in the working copy
1357 # parent revision.
1357 # parent revision.
1358 #
1358 #
1359 # However, it can of course also allow things that would have
1359 # However, it can of course also allow things that would have
1360 # been rejected before, such as the above cat command if sub/
1360 # been rejected before, such as the above cat command if sub/
1361 # is a subrepository now, but was a normal directory before.
1361 # is a subrepository now, but was a normal directory before.
1362 # The old path auditor would have rejected by mistake since it
1362 # The old path auditor would have rejected by mistake since it
1363 # panics when it sees sub/.hg/.
1363 # panics when it sees sub/.hg/.
1364 #
1364 #
1365 # All in all, checking against the working copy seems sensible
1365 # All in all, checking against the working copy seems sensible
1366 # since we want to prevent access to nested repositories on
1366 # since we want to prevent access to nested repositories on
1367 # the filesystem *now*.
1367 # the filesystem *now*.
1368 ctx = self[None]
1368 ctx = self[None]
1369 parts = util.splitpath(subpath)
1369 parts = util.splitpath(subpath)
1370 while parts:
1370 while parts:
1371 prefix = b'/'.join(parts)
1371 prefix = b'/'.join(parts)
1372 if prefix in ctx.substate:
1372 if prefix in ctx.substate:
1373 if prefix == normsubpath:
1373 if prefix == normsubpath:
1374 return True
1374 return True
1375 else:
1375 else:
1376 sub = ctx.sub(prefix)
1376 sub = ctx.sub(prefix)
1377 return sub.checknested(subpath[len(prefix) + 1 :])
1377 return sub.checknested(subpath[len(prefix) + 1 :])
1378 else:
1378 else:
1379 parts.pop()
1379 parts.pop()
1380 return False
1380 return False
1381
1381
1382 def peer(self):
1382 def peer(self):
1383 return localpeer(self) # not cached to avoid reference cycle
1383 return localpeer(self) # not cached to avoid reference cycle
1384
1384
1385 def unfiltered(self):
1385 def unfiltered(self):
1386 """Return unfiltered version of the repository
1386 """Return unfiltered version of the repository
1387
1387
1388 Intended to be overwritten by filtered repo."""
1388 Intended to be overwritten by filtered repo."""
1389 return self
1389 return self
1390
1390
1391 def filtered(self, name, visibilityexceptions=None):
1391 def filtered(self, name, visibilityexceptions=None):
1392 """Return a filtered version of a repository
1392 """Return a filtered version of a repository
1393
1393
1394 The `name` parameter is the identifier of the requested view. This
1394 The `name` parameter is the identifier of the requested view. This
1395 will return a repoview object set "exactly" to the specified view.
1395 will return a repoview object set "exactly" to the specified view.
1396
1396
1397 This function does not apply recursive filtering to a repository. For
1397 This function does not apply recursive filtering to a repository. For
1398 example calling `repo.filtered("served")` will return a repoview using
1398 example calling `repo.filtered("served")` will return a repoview using
1399 the "served" view, regardless of the initial view used by `repo`.
1399 the "served" view, regardless of the initial view used by `repo`.
1400
1400
1401 In other word, there is always only one level of `repoview` "filtering".
1401 In other word, there is always only one level of `repoview` "filtering".
1402 """
1402 """
1403 if self._extrafilterid is not None and b'%' not in name:
1403 if self._extrafilterid is not None and b'%' not in name:
1404 name = name + b'%' + self._extrafilterid
1404 name = name + b'%' + self._extrafilterid
1405
1405
1406 cls = repoview.newtype(self.unfiltered().__class__)
1406 cls = repoview.newtype(self.unfiltered().__class__)
1407 return cls(self, name, visibilityexceptions)
1407 return cls(self, name, visibilityexceptions)
1408
1408
1409 @mixedrepostorecache(
1409 @mixedrepostorecache(
1410 (b'bookmarks', b'plain'),
1410 (b'bookmarks', b'plain'),
1411 (b'bookmarks.current', b'plain'),
1411 (b'bookmarks.current', b'plain'),
1412 (b'bookmarks', b''),
1412 (b'bookmarks', b''),
1413 (b'00changelog.i', b''),
1413 (b'00changelog.i', b''),
1414 )
1414 )
1415 def _bookmarks(self):
1415 def _bookmarks(self):
1416 # Since the multiple files involved in the transaction cannot be
1416 # Since the multiple files involved in the transaction cannot be
1417 # written atomically (with current repository format), there is a race
1417 # written atomically (with current repository format), there is a race
1418 # condition here.
1418 # condition here.
1419 #
1419 #
1420 # 1) changelog content A is read
1420 # 1) changelog content A is read
1421 # 2) outside transaction update changelog to content B
1421 # 2) outside transaction update changelog to content B
1422 # 3) outside transaction update bookmark file referring to content B
1422 # 3) outside transaction update bookmark file referring to content B
1423 # 4) bookmarks file content is read and filtered against changelog-A
1423 # 4) bookmarks file content is read and filtered against changelog-A
1424 #
1424 #
1425 # When this happens, bookmarks against nodes missing from A are dropped.
1425 # When this happens, bookmarks against nodes missing from A are dropped.
1426 #
1426 #
1427 # Having this happening during read is not great, but it become worse
1427 # Having this happening during read is not great, but it become worse
1428 # when this happen during write because the bookmarks to the "unknown"
1428 # when this happen during write because the bookmarks to the "unknown"
1429 # nodes will be dropped for good. However, writes happen within locks.
1429 # nodes will be dropped for good. However, writes happen within locks.
1430 # This locking makes it possible to have a race free consistent read.
1430 # This locking makes it possible to have a race free consistent read.
1431 # For this purpose data read from disc before locking are
1431 # For this purpose data read from disc before locking are
1432 # "invalidated" right after the locks are taken. This invalidations are
1432 # "invalidated" right after the locks are taken. This invalidations are
1433 # "light", the `filecache` mechanism keep the data in memory and will
1433 # "light", the `filecache` mechanism keep the data in memory and will
1434 # reuse them if the underlying files did not changed. Not parsing the
1434 # reuse them if the underlying files did not changed. Not parsing the
1435 # same data multiple times helps performances.
1435 # same data multiple times helps performances.
1436 #
1436 #
1437 # Unfortunately in the case describe above, the files tracked by the
1437 # Unfortunately in the case describe above, the files tracked by the
1438 # bookmarks file cache might not have changed, but the in-memory
1438 # bookmarks file cache might not have changed, but the in-memory
1439 # content is still "wrong" because we used an older changelog content
1439 # content is still "wrong" because we used an older changelog content
1440 # to process the on-disk data. So after locking, the changelog would be
1440 # to process the on-disk data. So after locking, the changelog would be
1441 # refreshed but `_bookmarks` would be preserved.
1441 # refreshed but `_bookmarks` would be preserved.
1442 # Adding `00changelog.i` to the list of tracked file is not
1442 # Adding `00changelog.i` to the list of tracked file is not
1443 # enough, because at the time we build the content for `_bookmarks` in
1443 # enough, because at the time we build the content for `_bookmarks` in
1444 # (4), the changelog file has already diverged from the content used
1444 # (4), the changelog file has already diverged from the content used
1445 # for loading `changelog` in (1)
1445 # for loading `changelog` in (1)
1446 #
1446 #
1447 # To prevent the issue, we force the changelog to be explicitly
1447 # To prevent the issue, we force the changelog to be explicitly
1448 # reloaded while computing `_bookmarks`. The data race can still happen
1448 # reloaded while computing `_bookmarks`. The data race can still happen
1449 # without the lock (with a narrower window), but it would no longer go
1449 # without the lock (with a narrower window), but it would no longer go
1450 # undetected during the lock time refresh.
1450 # undetected during the lock time refresh.
1451 #
1451 #
1452 # The new schedule is as follow
1452 # The new schedule is as follow
1453 #
1453 #
1454 # 1) filecache logic detect that `_bookmarks` needs to be computed
1454 # 1) filecache logic detect that `_bookmarks` needs to be computed
1455 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1455 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1456 # 3) We force `changelog` filecache to be tested
1456 # 3) We force `changelog` filecache to be tested
1457 # 4) cachestat for `changelog` are captured (for changelog)
1457 # 4) cachestat for `changelog` are captured (for changelog)
1458 # 5) `_bookmarks` is computed and cached
1458 # 5) `_bookmarks` is computed and cached
1459 #
1459 #
1460 # The step in (3) ensure we have a changelog at least as recent as the
1460 # The step in (3) ensure we have a changelog at least as recent as the
1461 # cache stat computed in (1). As a result at locking time:
1461 # cache stat computed in (1). As a result at locking time:
1462 # * if the changelog did not changed since (1) -> we can reuse the data
1462 # * if the changelog did not changed since (1) -> we can reuse the data
1463 # * otherwise -> the bookmarks get refreshed.
1463 # * otherwise -> the bookmarks get refreshed.
1464 self._refreshchangelog()
1464 self._refreshchangelog()
1465 return bookmarks.bmstore(self)
1465 return bookmarks.bmstore(self)
1466
1466
1467 def _refreshchangelog(self):
1467 def _refreshchangelog(self):
1468 """make sure the in memory changelog match the on-disk one"""
1468 """make sure the in memory changelog match the on-disk one"""
1469 if 'changelog' in vars(self) and self.currenttransaction() is None:
1469 if 'changelog' in vars(self) and self.currenttransaction() is None:
1470 del self.changelog
1470 del self.changelog
1471
1471
1472 @property
1472 @property
1473 def _activebookmark(self):
1473 def _activebookmark(self):
1474 return self._bookmarks.active
1474 return self._bookmarks.active
1475
1475
1476 # _phasesets depend on changelog. what we need is to call
1476 # _phasesets depend on changelog. what we need is to call
1477 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1477 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1478 # can't be easily expressed in filecache mechanism.
1478 # can't be easily expressed in filecache mechanism.
1479 @storecache(b'phaseroots', b'00changelog.i')
1479 @storecache(b'phaseroots', b'00changelog.i')
1480 def _phasecache(self):
1480 def _phasecache(self):
1481 return phases.phasecache(self, self._phasedefaults)
1481 return phases.phasecache(self, self._phasedefaults)
1482
1482
1483 @storecache(b'obsstore')
1483 @storecache(b'obsstore')
1484 def obsstore(self):
1484 def obsstore(self):
1485 return obsolete.makestore(self.ui, self)
1485 return obsolete.makestore(self.ui, self)
1486
1486
1487 @storecache(b'00changelog.i')
1487 @storecache(b'00changelog.i')
1488 def changelog(self):
1488 def changelog(self):
1489 # load dirstate before changelog to avoid race see issue6303
1489 # load dirstate before changelog to avoid race see issue6303
1490 self.dirstate.prefetch_parents()
1490 self.dirstate.prefetch_parents()
1491 return self.store.changelog(txnutil.mayhavepending(self.root))
1491 return self.store.changelog(txnutil.mayhavepending(self.root))
1492
1492
1493 @storecache(b'00manifest.i')
1493 @storecache(b'00manifest.i')
1494 def manifestlog(self):
1494 def manifestlog(self):
1495 return self.store.manifestlog(self, self._storenarrowmatch)
1495 return self.store.manifestlog(self, self._storenarrowmatch)
1496
1496
1497 @repofilecache(b'dirstate')
1497 @repofilecache(b'dirstate')
1498 def dirstate(self):
1498 def dirstate(self):
1499 return self._makedirstate()
1499 return self._makedirstate()
1500
1500
1501 def _makedirstate(self):
1501 def _makedirstate(self):
1502 """Extension point for wrapping the dirstate per-repo."""
1502 """Extension point for wrapping the dirstate per-repo."""
1503 sparsematchfn = lambda: sparse.matcher(self)
1503 sparsematchfn = lambda: sparse.matcher(self)
1504
1504
1505 return dirstate.dirstate(
1505 return dirstate.dirstate(
1506 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1506 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1507 )
1507 )
1508
1508
1509 def _dirstatevalidate(self, node):
1509 def _dirstatevalidate(self, node):
1510 try:
1510 try:
1511 self.changelog.rev(node)
1511 self.changelog.rev(node)
1512 return node
1512 return node
1513 except error.LookupError:
1513 except error.LookupError:
1514 if not self._dirstatevalidatewarned:
1514 if not self._dirstatevalidatewarned:
1515 self._dirstatevalidatewarned = True
1515 self._dirstatevalidatewarned = True
1516 self.ui.warn(
1516 self.ui.warn(
1517 _(b"warning: ignoring unknown working parent %s!\n")
1517 _(b"warning: ignoring unknown working parent %s!\n")
1518 % short(node)
1518 % short(node)
1519 )
1519 )
1520 return nullid
1520 return nullid
1521
1521
1522 @storecache(narrowspec.FILENAME)
1522 @storecache(narrowspec.FILENAME)
1523 def narrowpats(self):
1523 def narrowpats(self):
1524 """matcher patterns for this repository's narrowspec
1524 """matcher patterns for this repository's narrowspec
1525
1525
1526 A tuple of (includes, excludes).
1526 A tuple of (includes, excludes).
1527 """
1527 """
1528 return narrowspec.load(self)
1528 return narrowspec.load(self)
1529
1529
1530 @storecache(narrowspec.FILENAME)
1530 @storecache(narrowspec.FILENAME)
1531 def _storenarrowmatch(self):
1531 def _storenarrowmatch(self):
1532 if repository.NARROW_REQUIREMENT not in self.requirements:
1532 if repository.NARROW_REQUIREMENT not in self.requirements:
1533 return matchmod.always()
1533 return matchmod.always()
1534 include, exclude = self.narrowpats
1534 include, exclude = self.narrowpats
1535 return narrowspec.match(self.root, include=include, exclude=exclude)
1535 return narrowspec.match(self.root, include=include, exclude=exclude)
1536
1536
1537 @storecache(narrowspec.FILENAME)
1537 @storecache(narrowspec.FILENAME)
1538 def _narrowmatch(self):
1538 def _narrowmatch(self):
1539 if repository.NARROW_REQUIREMENT not in self.requirements:
1539 if repository.NARROW_REQUIREMENT not in self.requirements:
1540 return matchmod.always()
1540 return matchmod.always()
1541 narrowspec.checkworkingcopynarrowspec(self)
1541 narrowspec.checkworkingcopynarrowspec(self)
1542 include, exclude = self.narrowpats
1542 include, exclude = self.narrowpats
1543 return narrowspec.match(self.root, include=include, exclude=exclude)
1543 return narrowspec.match(self.root, include=include, exclude=exclude)
1544
1544
1545 def narrowmatch(self, match=None, includeexact=False):
1545 def narrowmatch(self, match=None, includeexact=False):
1546 """matcher corresponding the the repo's narrowspec
1546 """matcher corresponding the the repo's narrowspec
1547
1547
1548 If `match` is given, then that will be intersected with the narrow
1548 If `match` is given, then that will be intersected with the narrow
1549 matcher.
1549 matcher.
1550
1550
1551 If `includeexact` is True, then any exact matches from `match` will
1551 If `includeexact` is True, then any exact matches from `match` will
1552 be included even if they're outside the narrowspec.
1552 be included even if they're outside the narrowspec.
1553 """
1553 """
1554 if match:
1554 if match:
1555 if includeexact and not self._narrowmatch.always():
1555 if includeexact and not self._narrowmatch.always():
1556 # do not exclude explicitly-specified paths so that they can
1556 # do not exclude explicitly-specified paths so that they can
1557 # be warned later on
1557 # be warned later on
1558 em = matchmod.exact(match.files())
1558 em = matchmod.exact(match.files())
1559 nm = matchmod.unionmatcher([self._narrowmatch, em])
1559 nm = matchmod.unionmatcher([self._narrowmatch, em])
1560 return matchmod.intersectmatchers(match, nm)
1560 return matchmod.intersectmatchers(match, nm)
1561 return matchmod.intersectmatchers(match, self._narrowmatch)
1561 return matchmod.intersectmatchers(match, self._narrowmatch)
1562 return self._narrowmatch
1562 return self._narrowmatch
1563
1563
1564 def setnarrowpats(self, newincludes, newexcludes):
1564 def setnarrowpats(self, newincludes, newexcludes):
1565 narrowspec.save(self, newincludes, newexcludes)
1565 narrowspec.save(self, newincludes, newexcludes)
1566 self.invalidate(clearfilecache=True)
1566 self.invalidate(clearfilecache=True)
1567
1567
1568 @unfilteredpropertycache
1568 @unfilteredpropertycache
1569 def _quick_access_changeid_null(self):
1569 def _quick_access_changeid_null(self):
1570 return {
1570 return {
1571 b'null': (nullrev, nullid),
1571 b'null': (nullrev, nullid),
1572 nullrev: (nullrev, nullid),
1572 nullrev: (nullrev, nullid),
1573 nullid: (nullrev, nullid),
1573 nullid: (nullrev, nullid),
1574 }
1574 }
1575
1575
1576 @unfilteredpropertycache
1576 @unfilteredpropertycache
1577 def _quick_access_changeid_wc(self):
1577 def _quick_access_changeid_wc(self):
1578 # also fast path access to the working copy parents
1578 # also fast path access to the working copy parents
1579 # however, only do it for filter that ensure wc is visible.
1579 # however, only do it for filter that ensure wc is visible.
1580 quick = {}
1580 quick = {}
1581 cl = self.unfiltered().changelog
1581 cl = self.unfiltered().changelog
1582 for node in self.dirstate.parents():
1582 for node in self.dirstate.parents():
1583 if node == nullid:
1583 if node == nullid:
1584 continue
1584 continue
1585 rev = cl.index.get_rev(node)
1585 rev = cl.index.get_rev(node)
1586 if rev is None:
1586 if rev is None:
1587 # unknown working copy parent case:
1587 # unknown working copy parent case:
1588 #
1588 #
1589 # skip the fast path and let higher code deal with it
1589 # skip the fast path and let higher code deal with it
1590 continue
1590 continue
1591 pair = (rev, node)
1591 pair = (rev, node)
1592 quick[rev] = pair
1592 quick[rev] = pair
1593 quick[node] = pair
1593 quick[node] = pair
1594 # also add the parents of the parents
1594 # also add the parents of the parents
1595 for r in cl.parentrevs(rev):
1595 for r in cl.parentrevs(rev):
1596 if r == nullrev:
1596 if r == nullrev:
1597 continue
1597 continue
1598 n = cl.node(r)
1598 n = cl.node(r)
1599 pair = (r, n)
1599 pair = (r, n)
1600 quick[r] = pair
1600 quick[r] = pair
1601 quick[n] = pair
1601 quick[n] = pair
1602 p1node = self.dirstate.p1()
1602 p1node = self.dirstate.p1()
1603 if p1node != nullid:
1603 if p1node != nullid:
1604 quick[b'.'] = quick[p1node]
1604 quick[b'.'] = quick[p1node]
1605 return quick
1605 return quick
1606
1606
1607 @unfilteredmethod
1607 @unfilteredmethod
1608 def _quick_access_changeid_invalidate(self):
1608 def _quick_access_changeid_invalidate(self):
1609 if '_quick_access_changeid_wc' in vars(self):
1609 if '_quick_access_changeid_wc' in vars(self):
1610 del self.__dict__['_quick_access_changeid_wc']
1610 del self.__dict__['_quick_access_changeid_wc']
1611
1611
1612 @property
1612 @property
1613 def _quick_access_changeid(self):
1613 def _quick_access_changeid(self):
1614 """an helper dictionnary for __getitem__ calls
1614 """an helper dictionnary for __getitem__ calls
1615
1615
1616 This contains a list of symbol we can recognise right away without
1616 This contains a list of symbol we can recognise right away without
1617 further processing.
1617 further processing.
1618 """
1618 """
1619 mapping = self._quick_access_changeid_null
1619 mapping = self._quick_access_changeid_null
1620 if self.filtername in repoview.filter_has_wc:
1620 if self.filtername in repoview.filter_has_wc:
1621 mapping = mapping.copy()
1621 mapping = mapping.copy()
1622 mapping.update(self._quick_access_changeid_wc)
1622 mapping.update(self._quick_access_changeid_wc)
1623 return mapping
1623 return mapping
1624
1624
1625 def __getitem__(self, changeid):
1625 def __getitem__(self, changeid):
1626 # dealing with special cases
1626 # dealing with special cases
1627 if changeid is None:
1627 if changeid is None:
1628 return context.workingctx(self)
1628 return context.workingctx(self)
1629 if isinstance(changeid, context.basectx):
1629 if isinstance(changeid, context.basectx):
1630 return changeid
1630 return changeid
1631
1631
1632 # dealing with multiple revisions
1632 # dealing with multiple revisions
1633 if isinstance(changeid, slice):
1633 if isinstance(changeid, slice):
1634 # wdirrev isn't contiguous so the slice shouldn't include it
1634 # wdirrev isn't contiguous so the slice shouldn't include it
1635 return [
1635 return [
1636 self[i]
1636 self[i]
1637 for i in pycompat.xrange(*changeid.indices(len(self)))
1637 for i in pycompat.xrange(*changeid.indices(len(self)))
1638 if i not in self.changelog.filteredrevs
1638 if i not in self.changelog.filteredrevs
1639 ]
1639 ]
1640
1640
1641 # dealing with some special values
1641 # dealing with some special values
1642 quick_access = self._quick_access_changeid.get(changeid)
1642 quick_access = self._quick_access_changeid.get(changeid)
1643 if quick_access is not None:
1643 if quick_access is not None:
1644 rev, node = quick_access
1644 rev, node = quick_access
1645 return context.changectx(self, rev, node, maybe_filtered=False)
1645 return context.changectx(self, rev, node, maybe_filtered=False)
1646 if changeid == b'tip':
1646 if changeid == b'tip':
1647 node = self.changelog.tip()
1647 node = self.changelog.tip()
1648 rev = self.changelog.rev(node)
1648 rev = self.changelog.rev(node)
1649 return context.changectx(self, rev, node)
1649 return context.changectx(self, rev, node)
1650
1650
1651 # dealing with arbitrary values
1651 # dealing with arbitrary values
1652 try:
1652 try:
1653 if isinstance(changeid, int):
1653 if isinstance(changeid, int):
1654 node = self.changelog.node(changeid)
1654 node = self.changelog.node(changeid)
1655 rev = changeid
1655 rev = changeid
1656 elif changeid == b'.':
1656 elif changeid == b'.':
1657 # this is a hack to delay/avoid loading obsmarkers
1657 # this is a hack to delay/avoid loading obsmarkers
1658 # when we know that '.' won't be hidden
1658 # when we know that '.' won't be hidden
1659 node = self.dirstate.p1()
1659 node = self.dirstate.p1()
1660 rev = self.unfiltered().changelog.rev(node)
1660 rev = self.unfiltered().changelog.rev(node)
1661 elif len(changeid) == 20:
1661 elif len(changeid) == 20:
1662 try:
1662 try:
1663 node = changeid
1663 node = changeid
1664 rev = self.changelog.rev(changeid)
1664 rev = self.changelog.rev(changeid)
1665 except error.FilteredLookupError:
1665 except error.FilteredLookupError:
1666 changeid = hex(changeid) # for the error message
1666 changeid = hex(changeid) # for the error message
1667 raise
1667 raise
1668 except LookupError:
1668 except LookupError:
1669 # check if it might have come from damaged dirstate
1669 # check if it might have come from damaged dirstate
1670 #
1670 #
1671 # XXX we could avoid the unfiltered if we had a recognizable
1671 # XXX we could avoid the unfiltered if we had a recognizable
1672 # exception for filtered changeset access
1672 # exception for filtered changeset access
1673 if (
1673 if (
1674 self.local()
1674 self.local()
1675 and changeid in self.unfiltered().dirstate.parents()
1675 and changeid in self.unfiltered().dirstate.parents()
1676 ):
1676 ):
1677 msg = _(b"working directory has unknown parent '%s'!")
1677 msg = _(b"working directory has unknown parent '%s'!")
1678 raise error.Abort(msg % short(changeid))
1678 raise error.Abort(msg % short(changeid))
1679 changeid = hex(changeid) # for the error message
1679 changeid = hex(changeid) # for the error message
1680 raise
1680 raise
1681
1681
1682 elif len(changeid) == 40:
1682 elif len(changeid) == 40:
1683 node = bin(changeid)
1683 node = bin(changeid)
1684 rev = self.changelog.rev(node)
1684 rev = self.changelog.rev(node)
1685 else:
1685 else:
1686 raise error.ProgrammingError(
1686 raise error.ProgrammingError(
1687 b"unsupported changeid '%s' of type %s"
1687 b"unsupported changeid '%s' of type %s"
1688 % (changeid, pycompat.bytestr(type(changeid)))
1688 % (changeid, pycompat.bytestr(type(changeid)))
1689 )
1689 )
1690
1690
1691 return context.changectx(self, rev, node)
1691 return context.changectx(self, rev, node)
1692
1692
1693 except (error.FilteredIndexError, error.FilteredLookupError):
1693 except (error.FilteredIndexError, error.FilteredLookupError):
1694 raise error.FilteredRepoLookupError(
1694 raise error.FilteredRepoLookupError(
1695 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1695 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1696 )
1696 )
1697 except (IndexError, LookupError):
1697 except (IndexError, LookupError):
1698 raise error.RepoLookupError(
1698 raise error.RepoLookupError(
1699 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1699 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1700 )
1700 )
1701 except error.WdirUnsupported:
1701 except error.WdirUnsupported:
1702 return context.workingctx(self)
1702 return context.workingctx(self)
1703
1703
1704 def __contains__(self, changeid):
1704 def __contains__(self, changeid):
1705 """True if the given changeid exists
1705 """True if the given changeid exists
1706
1706
1707 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1707 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1708 specified.
1708 specified.
1709 """
1709 """
1710 try:
1710 try:
1711 self[changeid]
1711 self[changeid]
1712 return True
1712 return True
1713 except error.RepoLookupError:
1713 except error.RepoLookupError:
1714 return False
1714 return False
1715
1715
1716 def __nonzero__(self):
1716 def __nonzero__(self):
1717 return True
1717 return True
1718
1718
1719 __bool__ = __nonzero__
1719 __bool__ = __nonzero__
1720
1720
1721 def __len__(self):
1721 def __len__(self):
1722 # no need to pay the cost of repoview.changelog
1722 # no need to pay the cost of repoview.changelog
1723 unfi = self.unfiltered()
1723 unfi = self.unfiltered()
1724 return len(unfi.changelog)
1724 return len(unfi.changelog)
1725
1725
1726 def __iter__(self):
1726 def __iter__(self):
1727 return iter(self.changelog)
1727 return iter(self.changelog)
1728
1728
1729 def revs(self, expr, *args):
1729 def revs(self, expr, *args):
1730 '''Find revisions matching a revset.
1730 '''Find revisions matching a revset.
1731
1731
1732 The revset is specified as a string ``expr`` that may contain
1732 The revset is specified as a string ``expr`` that may contain
1733 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1733 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1734
1734
1735 Revset aliases from the configuration are not expanded. To expand
1735 Revset aliases from the configuration are not expanded. To expand
1736 user aliases, consider calling ``scmutil.revrange()`` or
1736 user aliases, consider calling ``scmutil.revrange()`` or
1737 ``repo.anyrevs([expr], user=True)``.
1737 ``repo.anyrevs([expr], user=True)``.
1738
1738
1739 Returns a smartset.abstractsmartset, which is a list-like interface
1739 Returns a smartset.abstractsmartset, which is a list-like interface
1740 that contains integer revisions.
1740 that contains integer revisions.
1741 '''
1741 '''
1742 tree = revsetlang.spectree(expr, *args)
1742 tree = revsetlang.spectree(expr, *args)
1743 return revset.makematcher(tree)(self)
1743 return revset.makematcher(tree)(self)
1744
1744
1745 def set(self, expr, *args):
1745 def set(self, expr, *args):
1746 '''Find revisions matching a revset and emit changectx instances.
1746 '''Find revisions matching a revset and emit changectx instances.
1747
1747
1748 This is a convenience wrapper around ``revs()`` that iterates the
1748 This is a convenience wrapper around ``revs()`` that iterates the
1749 result and is a generator of changectx instances.
1749 result and is a generator of changectx instances.
1750
1750
1751 Revset aliases from the configuration are not expanded. To expand
1751 Revset aliases from the configuration are not expanded. To expand
1752 user aliases, consider calling ``scmutil.revrange()``.
1752 user aliases, consider calling ``scmutil.revrange()``.
1753 '''
1753 '''
1754 for r in self.revs(expr, *args):
1754 for r in self.revs(expr, *args):
1755 yield self[r]
1755 yield self[r]
1756
1756
1757 def anyrevs(self, specs, user=False, localalias=None):
1757 def anyrevs(self, specs, user=False, localalias=None):
1758 '''Find revisions matching one of the given revsets.
1758 '''Find revisions matching one of the given revsets.
1759
1759
1760 Revset aliases from the configuration are not expanded by default. To
1760 Revset aliases from the configuration are not expanded by default. To
1761 expand user aliases, specify ``user=True``. To provide some local
1761 expand user aliases, specify ``user=True``. To provide some local
1762 definitions overriding user aliases, set ``localalias`` to
1762 definitions overriding user aliases, set ``localalias`` to
1763 ``{name: definitionstring}``.
1763 ``{name: definitionstring}``.
1764 '''
1764 '''
1765 if specs == [b'null']:
1765 if specs == [b'null']:
1766 return revset.baseset([nullrev])
1766 return revset.baseset([nullrev])
1767 if specs == [b'.']:
1767 if specs == [b'.']:
1768 quick_data = self._quick_access_changeid.get(b'.')
1768 quick_data = self._quick_access_changeid.get(b'.')
1769 if quick_data is not None:
1769 if quick_data is not None:
1770 return revset.baseset([quick_data[0]])
1770 return revset.baseset([quick_data[0]])
1771 if user:
1771 if user:
1772 m = revset.matchany(
1772 m = revset.matchany(
1773 self.ui,
1773 self.ui,
1774 specs,
1774 specs,
1775 lookup=revset.lookupfn(self),
1775 lookup=revset.lookupfn(self),
1776 localalias=localalias,
1776 localalias=localalias,
1777 )
1777 )
1778 else:
1778 else:
1779 m = revset.matchany(None, specs, localalias=localalias)
1779 m = revset.matchany(None, specs, localalias=localalias)
1780 return m(self)
1780 return m(self)
1781
1781
1782 def url(self):
1782 def url(self):
1783 return b'file:' + self.root
1783 return b'file:' + self.root
1784
1784
1785 def hook(self, name, throw=False, **args):
1785 def hook(self, name, throw=False, **args):
1786 """Call a hook, passing this repo instance.
1786 """Call a hook, passing this repo instance.
1787
1787
1788 This a convenience method to aid invoking hooks. Extensions likely
1788 This a convenience method to aid invoking hooks. Extensions likely
1789 won't call this unless they have registered a custom hook or are
1789 won't call this unless they have registered a custom hook or are
1790 replacing code that is expected to call a hook.
1790 replacing code that is expected to call a hook.
1791 """
1791 """
1792 return hook.hook(self.ui, self, name, throw, **args)
1792 return hook.hook(self.ui, self, name, throw, **args)
1793
1793
1794 @filteredpropertycache
1794 @filteredpropertycache
1795 def _tagscache(self):
1795 def _tagscache(self):
1796 '''Returns a tagscache object that contains various tags related
1796 '''Returns a tagscache object that contains various tags related
1797 caches.'''
1797 caches.'''
1798
1798
1799 # This simplifies its cache management by having one decorated
1799 # This simplifies its cache management by having one decorated
1800 # function (this one) and the rest simply fetch things from it.
1800 # function (this one) and the rest simply fetch things from it.
1801 class tagscache(object):
1801 class tagscache(object):
1802 def __init__(self):
1802 def __init__(self):
1803 # These two define the set of tags for this repository. tags
1803 # These two define the set of tags for this repository. tags
1804 # maps tag name to node; tagtypes maps tag name to 'global' or
1804 # maps tag name to node; tagtypes maps tag name to 'global' or
1805 # 'local'. (Global tags are defined by .hgtags across all
1805 # 'local'. (Global tags are defined by .hgtags across all
1806 # heads, and local tags are defined in .hg/localtags.)
1806 # heads, and local tags are defined in .hg/localtags.)
1807 # They constitute the in-memory cache of tags.
1807 # They constitute the in-memory cache of tags.
1808 self.tags = self.tagtypes = None
1808 self.tags = self.tagtypes = None
1809
1809
1810 self.nodetagscache = self.tagslist = None
1810 self.nodetagscache = self.tagslist = None
1811
1811
1812 cache = tagscache()
1812 cache = tagscache()
1813 cache.tags, cache.tagtypes = self._findtags()
1813 cache.tags, cache.tagtypes = self._findtags()
1814
1814
1815 return cache
1815 return cache
1816
1816
1817 def tags(self):
1817 def tags(self):
1818 '''return a mapping of tag to node'''
1818 '''return a mapping of tag to node'''
1819 t = {}
1819 t = {}
1820 if self.changelog.filteredrevs:
1820 if self.changelog.filteredrevs:
1821 tags, tt = self._findtags()
1821 tags, tt = self._findtags()
1822 else:
1822 else:
1823 tags = self._tagscache.tags
1823 tags = self._tagscache.tags
1824 rev = self.changelog.rev
1824 rev = self.changelog.rev
1825 for k, v in pycompat.iteritems(tags):
1825 for k, v in pycompat.iteritems(tags):
1826 try:
1826 try:
1827 # ignore tags to unknown nodes
1827 # ignore tags to unknown nodes
1828 rev(v)
1828 rev(v)
1829 t[k] = v
1829 t[k] = v
1830 except (error.LookupError, ValueError):
1830 except (error.LookupError, ValueError):
1831 pass
1831 pass
1832 return t
1832 return t
1833
1833
1834 def _findtags(self):
1834 def _findtags(self):
1835 '''Do the hard work of finding tags. Return a pair of dicts
1835 '''Do the hard work of finding tags. Return a pair of dicts
1836 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1836 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1837 maps tag name to a string like \'global\' or \'local\'.
1837 maps tag name to a string like \'global\' or \'local\'.
1838 Subclasses or extensions are free to add their own tags, but
1838 Subclasses or extensions are free to add their own tags, but
1839 should be aware that the returned dicts will be retained for the
1839 should be aware that the returned dicts will be retained for the
1840 duration of the localrepo object.'''
1840 duration of the localrepo object.'''
1841
1841
1842 # XXX what tagtype should subclasses/extensions use? Currently
1842 # XXX what tagtype should subclasses/extensions use? Currently
1843 # mq and bookmarks add tags, but do not set the tagtype at all.
1843 # mq and bookmarks add tags, but do not set the tagtype at all.
1844 # Should each extension invent its own tag type? Should there
1844 # Should each extension invent its own tag type? Should there
1845 # be one tagtype for all such "virtual" tags? Or is the status
1845 # be one tagtype for all such "virtual" tags? Or is the status
1846 # quo fine?
1846 # quo fine?
1847
1847
1848 # map tag name to (node, hist)
1848 # map tag name to (node, hist)
1849 alltags = tagsmod.findglobaltags(self.ui, self)
1849 alltags = tagsmod.findglobaltags(self.ui, self)
1850 # map tag name to tag type
1850 # map tag name to tag type
1851 tagtypes = {tag: b'global' for tag in alltags}
1851 tagtypes = {tag: b'global' for tag in alltags}
1852
1852
1853 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1853 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1854
1854
1855 # Build the return dicts. Have to re-encode tag names because
1855 # Build the return dicts. Have to re-encode tag names because
1856 # the tags module always uses UTF-8 (in order not to lose info
1856 # the tags module always uses UTF-8 (in order not to lose info
1857 # writing to the cache), but the rest of Mercurial wants them in
1857 # writing to the cache), but the rest of Mercurial wants them in
1858 # local encoding.
1858 # local encoding.
1859 tags = {}
1859 tags = {}
1860 for (name, (node, hist)) in pycompat.iteritems(alltags):
1860 for (name, (node, hist)) in pycompat.iteritems(alltags):
1861 if node != nullid:
1861 if node != nullid:
1862 tags[encoding.tolocal(name)] = node
1862 tags[encoding.tolocal(name)] = node
1863 tags[b'tip'] = self.changelog.tip()
1863 tags[b'tip'] = self.changelog.tip()
1864 tagtypes = {
1864 tagtypes = {
1865 encoding.tolocal(name): value
1865 encoding.tolocal(name): value
1866 for (name, value) in pycompat.iteritems(tagtypes)
1866 for (name, value) in pycompat.iteritems(tagtypes)
1867 }
1867 }
1868 return (tags, tagtypes)
1868 return (tags, tagtypes)
1869
1869
1870 def tagtype(self, tagname):
1870 def tagtype(self, tagname):
1871 '''
1871 '''
1872 return the type of the given tag. result can be:
1872 return the type of the given tag. result can be:
1873
1873
1874 'local' : a local tag
1874 'local' : a local tag
1875 'global' : a global tag
1875 'global' : a global tag
1876 None : tag does not exist
1876 None : tag does not exist
1877 '''
1877 '''
1878
1878
1879 return self._tagscache.tagtypes.get(tagname)
1879 return self._tagscache.tagtypes.get(tagname)
1880
1880
1881 def tagslist(self):
1881 def tagslist(self):
1882 '''return a list of tags ordered by revision'''
1882 '''return a list of tags ordered by revision'''
1883 if not self._tagscache.tagslist:
1883 if not self._tagscache.tagslist:
1884 l = []
1884 l = []
1885 for t, n in pycompat.iteritems(self.tags()):
1885 for t, n in pycompat.iteritems(self.tags()):
1886 l.append((self.changelog.rev(n), t, n))
1886 l.append((self.changelog.rev(n), t, n))
1887 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1887 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1888
1888
1889 return self._tagscache.tagslist
1889 return self._tagscache.tagslist
1890
1890
1891 def nodetags(self, node):
1891 def nodetags(self, node):
1892 '''return the tags associated with a node'''
1892 '''return the tags associated with a node'''
1893 if not self._tagscache.nodetagscache:
1893 if not self._tagscache.nodetagscache:
1894 nodetagscache = {}
1894 nodetagscache = {}
1895 for t, n in pycompat.iteritems(self._tagscache.tags):
1895 for t, n in pycompat.iteritems(self._tagscache.tags):
1896 nodetagscache.setdefault(n, []).append(t)
1896 nodetagscache.setdefault(n, []).append(t)
1897 for tags in pycompat.itervalues(nodetagscache):
1897 for tags in pycompat.itervalues(nodetagscache):
1898 tags.sort()
1898 tags.sort()
1899 self._tagscache.nodetagscache = nodetagscache
1899 self._tagscache.nodetagscache = nodetagscache
1900 return self._tagscache.nodetagscache.get(node, [])
1900 return self._tagscache.nodetagscache.get(node, [])
1901
1901
1902 def nodebookmarks(self, node):
1902 def nodebookmarks(self, node):
1903 """return the list of bookmarks pointing to the specified node"""
1903 """return the list of bookmarks pointing to the specified node"""
1904 return self._bookmarks.names(node)
1904 return self._bookmarks.names(node)
1905
1905
1906 def branchmap(self):
1906 def branchmap(self):
1907 '''returns a dictionary {branch: [branchheads]} with branchheads
1907 '''returns a dictionary {branch: [branchheads]} with branchheads
1908 ordered by increasing revision number'''
1908 ordered by increasing revision number'''
1909 return self._branchcaches[self]
1909 return self._branchcaches[self]
1910
1910
1911 @unfilteredmethod
1911 @unfilteredmethod
1912 def revbranchcache(self):
1912 def revbranchcache(self):
1913 if not self._revbranchcache:
1913 if not self._revbranchcache:
1914 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1914 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1915 return self._revbranchcache
1915 return self._revbranchcache
1916
1916
1917 def branchtip(self, branch, ignoremissing=False):
1917 def branchtip(self, branch, ignoremissing=False):
1918 '''return the tip node for a given branch
1918 '''return the tip node for a given branch
1919
1919
1920 If ignoremissing is True, then this method will not raise an error.
1920 If ignoremissing is True, then this method will not raise an error.
1921 This is helpful for callers that only expect None for a missing branch
1921 This is helpful for callers that only expect None for a missing branch
1922 (e.g. namespace).
1922 (e.g. namespace).
1923
1923
1924 '''
1924 '''
1925 try:
1925 try:
1926 return self.branchmap().branchtip(branch)
1926 return self.branchmap().branchtip(branch)
1927 except KeyError:
1927 except KeyError:
1928 if not ignoremissing:
1928 if not ignoremissing:
1929 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1929 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1930 else:
1930 else:
1931 pass
1931 pass
1932
1932
1933 def lookup(self, key):
1933 def lookup(self, key):
1934 node = scmutil.revsymbol(self, key).node()
1934 node = scmutil.revsymbol(self, key).node()
1935 if node is None:
1935 if node is None:
1936 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1936 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1937 return node
1937 return node
1938
1938
1939 def lookupbranch(self, key):
1939 def lookupbranch(self, key):
1940 if self.branchmap().hasbranch(key):
1940 if self.branchmap().hasbranch(key):
1941 return key
1941 return key
1942
1942
1943 return scmutil.revsymbol(self, key).branch()
1943 return scmutil.revsymbol(self, key).branch()
1944
1944
1945 def known(self, nodes):
1945 def known(self, nodes):
1946 cl = self.changelog
1946 cl = self.changelog
1947 get_rev = cl.index.get_rev
1947 get_rev = cl.index.get_rev
1948 filtered = cl.filteredrevs
1948 filtered = cl.filteredrevs
1949 result = []
1949 result = []
1950 for n in nodes:
1950 for n in nodes:
1951 r = get_rev(n)
1951 r = get_rev(n)
1952 resp = not (r is None or r in filtered)
1952 resp = not (r is None or r in filtered)
1953 result.append(resp)
1953 result.append(resp)
1954 return result
1954 return result
1955
1955
1956 def local(self):
1956 def local(self):
1957 return self
1957 return self
1958
1958
1959 def publishing(self):
1959 def publishing(self):
1960 # it's safe (and desirable) to trust the publish flag unconditionally
1960 # it's safe (and desirable) to trust the publish flag unconditionally
1961 # so that we don't finalize changes shared between users via ssh or nfs
1961 # so that we don't finalize changes shared between users via ssh or nfs
1962 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1962 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1963
1963
1964 def cancopy(self):
1964 def cancopy(self):
1965 # so statichttprepo's override of local() works
1965 # so statichttprepo's override of local() works
1966 if not self.local():
1966 if not self.local():
1967 return False
1967 return False
1968 if not self.publishing():
1968 if not self.publishing():
1969 return True
1969 return True
1970 # if publishing we can't copy if there is filtered content
1970 # if publishing we can't copy if there is filtered content
1971 return not self.filtered(b'visible').changelog.filteredrevs
1971 return not self.filtered(b'visible').changelog.filteredrevs
1972
1972
1973 def shared(self):
1973 def shared(self):
1974 '''the type of shared repository (None if not shared)'''
1974 '''the type of shared repository (None if not shared)'''
1975 if self.sharedpath != self.path:
1975 if self.sharedpath != self.path:
1976 return b'store'
1976 return b'store'
1977 return None
1977 return None
1978
1978
1979 def wjoin(self, f, *insidef):
1979 def wjoin(self, f, *insidef):
1980 return self.vfs.reljoin(self.root, f, *insidef)
1980 return self.vfs.reljoin(self.root, f, *insidef)
1981
1981
1982 def setparents(self, p1, p2=nullid):
1982 def setparents(self, p1, p2=nullid):
1983 self[None].setparents(p1, p2)
1983 self[None].setparents(p1, p2)
1984 self._quick_access_changeid_invalidate()
1984 self._quick_access_changeid_invalidate()
1985
1985
1986 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1986 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1987 """changeid must be a changeset revision, if specified.
1987 """changeid must be a changeset revision, if specified.
1988 fileid can be a file revision or node."""
1988 fileid can be a file revision or node."""
1989 return context.filectx(
1989 return context.filectx(
1990 self, path, changeid, fileid, changectx=changectx
1990 self, path, changeid, fileid, changectx=changectx
1991 )
1991 )
1992
1992
1993 def getcwd(self):
1993 def getcwd(self):
1994 return self.dirstate.getcwd()
1994 return self.dirstate.getcwd()
1995
1995
1996 def pathto(self, f, cwd=None):
1996 def pathto(self, f, cwd=None):
1997 return self.dirstate.pathto(f, cwd)
1997 return self.dirstate.pathto(f, cwd)
1998
1998
1999 def _loadfilter(self, filter):
1999 def _loadfilter(self, filter):
2000 if filter not in self._filterpats:
2000 if filter not in self._filterpats:
2001 l = []
2001 l = []
2002 for pat, cmd in self.ui.configitems(filter):
2002 for pat, cmd in self.ui.configitems(filter):
2003 if cmd == b'!':
2003 if cmd == b'!':
2004 continue
2004 continue
2005 mf = matchmod.match(self.root, b'', [pat])
2005 mf = matchmod.match(self.root, b'', [pat])
2006 fn = None
2006 fn = None
2007 params = cmd
2007 params = cmd
2008 for name, filterfn in pycompat.iteritems(self._datafilters):
2008 for name, filterfn in pycompat.iteritems(self._datafilters):
2009 if cmd.startswith(name):
2009 if cmd.startswith(name):
2010 fn = filterfn
2010 fn = filterfn
2011 params = cmd[len(name) :].lstrip()
2011 params = cmd[len(name) :].lstrip()
2012 break
2012 break
2013 if not fn:
2013 if not fn:
2014 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2014 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2015 fn.__name__ = 'commandfilter'
2015 fn.__name__ = 'commandfilter'
2016 # Wrap old filters not supporting keyword arguments
2016 # Wrap old filters not supporting keyword arguments
2017 if not pycompat.getargspec(fn)[2]:
2017 if not pycompat.getargspec(fn)[2]:
2018 oldfn = fn
2018 oldfn = fn
2019 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2019 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2020 fn.__name__ = 'compat-' + oldfn.__name__
2020 fn.__name__ = 'compat-' + oldfn.__name__
2021 l.append((mf, fn, params))
2021 l.append((mf, fn, params))
2022 self._filterpats[filter] = l
2022 self._filterpats[filter] = l
2023 return self._filterpats[filter]
2023 return self._filterpats[filter]
2024
2024
2025 def _filter(self, filterpats, filename, data):
2025 def _filter(self, filterpats, filename, data):
2026 for mf, fn, cmd in filterpats:
2026 for mf, fn, cmd in filterpats:
2027 if mf(filename):
2027 if mf(filename):
2028 self.ui.debug(
2028 self.ui.debug(
2029 b"filtering %s through %s\n"
2029 b"filtering %s through %s\n"
2030 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2030 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2031 )
2031 )
2032 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2032 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2033 break
2033 break
2034
2034
2035 return data
2035 return data
2036
2036
2037 @unfilteredpropertycache
2037 @unfilteredpropertycache
2038 def _encodefilterpats(self):
2038 def _encodefilterpats(self):
2039 return self._loadfilter(b'encode')
2039 return self._loadfilter(b'encode')
2040
2040
2041 @unfilteredpropertycache
2041 @unfilteredpropertycache
2042 def _decodefilterpats(self):
2042 def _decodefilterpats(self):
2043 return self._loadfilter(b'decode')
2043 return self._loadfilter(b'decode')
2044
2044
2045 def adddatafilter(self, name, filter):
2045 def adddatafilter(self, name, filter):
2046 self._datafilters[name] = filter
2046 self._datafilters[name] = filter
2047
2047
2048 def wread(self, filename):
2048 def wread(self, filename):
2049 if self.wvfs.islink(filename):
2049 if self.wvfs.islink(filename):
2050 data = self.wvfs.readlink(filename)
2050 data = self.wvfs.readlink(filename)
2051 else:
2051 else:
2052 data = self.wvfs.read(filename)
2052 data = self.wvfs.read(filename)
2053 return self._filter(self._encodefilterpats, filename, data)
2053 return self._filter(self._encodefilterpats, filename, data)
2054
2054
2055 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2055 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2056 """write ``data`` into ``filename`` in the working directory
2056 """write ``data`` into ``filename`` in the working directory
2057
2057
2058 This returns length of written (maybe decoded) data.
2058 This returns length of written (maybe decoded) data.
2059 """
2059 """
2060 data = self._filter(self._decodefilterpats, filename, data)
2060 data = self._filter(self._decodefilterpats, filename, data)
2061 if b'l' in flags:
2061 if b'l' in flags:
2062 self.wvfs.symlink(data, filename)
2062 self.wvfs.symlink(data, filename)
2063 else:
2063 else:
2064 self.wvfs.write(
2064 self.wvfs.write(
2065 filename, data, backgroundclose=backgroundclose, **kwargs
2065 filename, data, backgroundclose=backgroundclose, **kwargs
2066 )
2066 )
2067 if b'x' in flags:
2067 if b'x' in flags:
2068 self.wvfs.setflags(filename, False, True)
2068 self.wvfs.setflags(filename, False, True)
2069 else:
2069 else:
2070 self.wvfs.setflags(filename, False, False)
2070 self.wvfs.setflags(filename, False, False)
2071 return len(data)
2071 return len(data)
2072
2072
2073 def wwritedata(self, filename, data):
2073 def wwritedata(self, filename, data):
2074 return self._filter(self._decodefilterpats, filename, data)
2074 return self._filter(self._decodefilterpats, filename, data)
2075
2075
2076 def currenttransaction(self):
2076 def currenttransaction(self):
2077 """return the current transaction or None if non exists"""
2077 """return the current transaction or None if non exists"""
2078 if self._transref:
2078 if self._transref:
2079 tr = self._transref()
2079 tr = self._transref()
2080 else:
2080 else:
2081 tr = None
2081 tr = None
2082
2082
2083 if tr and tr.running():
2083 if tr and tr.running():
2084 return tr
2084 return tr
2085 return None
2085 return None
2086
2086
2087 def transaction(self, desc, report=None):
2087 def transaction(self, desc, report=None):
2088 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2088 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2089 b'devel', b'check-locks'
2089 b'devel', b'check-locks'
2090 ):
2090 ):
2091 if self._currentlock(self._lockref) is None:
2091 if self._currentlock(self._lockref) is None:
2092 raise error.ProgrammingError(b'transaction requires locking')
2092 raise error.ProgrammingError(b'transaction requires locking')
2093 tr = self.currenttransaction()
2093 tr = self.currenttransaction()
2094 if tr is not None:
2094 if tr is not None:
2095 return tr.nest(name=desc)
2095 return tr.nest(name=desc)
2096
2096
2097 # abort here if the journal already exists
2097 # abort here if the journal already exists
2098 if self.svfs.exists(b"journal"):
2098 if self.svfs.exists(b"journal"):
2099 raise error.RepoError(
2099 raise error.RepoError(
2100 _(b"abandoned transaction found"),
2100 _(b"abandoned transaction found"),
2101 hint=_(b"run 'hg recover' to clean up transaction"),
2101 hint=_(b"run 'hg recover' to clean up transaction"),
2102 )
2102 )
2103
2103
2104 idbase = b"%.40f#%f" % (random.random(), time.time())
2104 idbase = b"%.40f#%f" % (random.random(), time.time())
2105 ha = hex(hashutil.sha1(idbase).digest())
2105 ha = hex(hashutil.sha1(idbase).digest())
2106 txnid = b'TXN:' + ha
2106 txnid = b'TXN:' + ha
2107 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2107 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2108
2108
2109 self._writejournal(desc)
2109 self._writejournal(desc)
2110 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2110 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2111 if report:
2111 if report:
2112 rp = report
2112 rp = report
2113 else:
2113 else:
2114 rp = self.ui.warn
2114 rp = self.ui.warn
2115 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2115 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2116 # we must avoid cyclic reference between repo and transaction.
2116 # we must avoid cyclic reference between repo and transaction.
2117 reporef = weakref.ref(self)
2117 reporef = weakref.ref(self)
2118 # Code to track tag movement
2118 # Code to track tag movement
2119 #
2119 #
2120 # Since tags are all handled as file content, it is actually quite hard
2120 # Since tags are all handled as file content, it is actually quite hard
2121 # to track these movement from a code perspective. So we fallback to a
2121 # to track these movement from a code perspective. So we fallback to a
2122 # tracking at the repository level. One could envision to track changes
2122 # tracking at the repository level. One could envision to track changes
2123 # to the '.hgtags' file through changegroup apply but that fails to
2123 # to the '.hgtags' file through changegroup apply but that fails to
2124 # cope with case where transaction expose new heads without changegroup
2124 # cope with case where transaction expose new heads without changegroup
2125 # being involved (eg: phase movement).
2125 # being involved (eg: phase movement).
2126 #
2126 #
2127 # For now, We gate the feature behind a flag since this likely comes
2127 # For now, We gate the feature behind a flag since this likely comes
2128 # with performance impacts. The current code run more often than needed
2128 # with performance impacts. The current code run more often than needed
2129 # and do not use caches as much as it could. The current focus is on
2129 # and do not use caches as much as it could. The current focus is on
2130 # the behavior of the feature so we disable it by default. The flag
2130 # the behavior of the feature so we disable it by default. The flag
2131 # will be removed when we are happy with the performance impact.
2131 # will be removed when we are happy with the performance impact.
2132 #
2132 #
2133 # Once this feature is no longer experimental move the following
2133 # Once this feature is no longer experimental move the following
2134 # documentation to the appropriate help section:
2134 # documentation to the appropriate help section:
2135 #
2135 #
2136 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2136 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2137 # tags (new or changed or deleted tags). In addition the details of
2137 # tags (new or changed or deleted tags). In addition the details of
2138 # these changes are made available in a file at:
2138 # these changes are made available in a file at:
2139 # ``REPOROOT/.hg/changes/tags.changes``.
2139 # ``REPOROOT/.hg/changes/tags.changes``.
2140 # Make sure you check for HG_TAG_MOVED before reading that file as it
2140 # Make sure you check for HG_TAG_MOVED before reading that file as it
2141 # might exist from a previous transaction even if no tag were touched
2141 # might exist from a previous transaction even if no tag were touched
2142 # in this one. Changes are recorded in a line base format::
2142 # in this one. Changes are recorded in a line base format::
2143 #
2143 #
2144 # <action> <hex-node> <tag-name>\n
2144 # <action> <hex-node> <tag-name>\n
2145 #
2145 #
2146 # Actions are defined as follow:
2146 # Actions are defined as follow:
2147 # "-R": tag is removed,
2147 # "-R": tag is removed,
2148 # "+A": tag is added,
2148 # "+A": tag is added,
2149 # "-M": tag is moved (old value),
2149 # "-M": tag is moved (old value),
2150 # "+M": tag is moved (new value),
2150 # "+M": tag is moved (new value),
2151 tracktags = lambda x: None
2151 tracktags = lambda x: None
2152 # experimental config: experimental.hook-track-tags
2152 # experimental config: experimental.hook-track-tags
2153 shouldtracktags = self.ui.configbool(
2153 shouldtracktags = self.ui.configbool(
2154 b'experimental', b'hook-track-tags'
2154 b'experimental', b'hook-track-tags'
2155 )
2155 )
2156 if desc != b'strip' and shouldtracktags:
2156 if desc != b'strip' and shouldtracktags:
2157 oldheads = self.changelog.headrevs()
2157 oldheads = self.changelog.headrevs()
2158
2158
2159 def tracktags(tr2):
2159 def tracktags(tr2):
2160 repo = reporef()
2160 repo = reporef()
2161 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2161 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2162 newheads = repo.changelog.headrevs()
2162 newheads = repo.changelog.headrevs()
2163 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2163 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2164 # notes: we compare lists here.
2164 # notes: we compare lists here.
2165 # As we do it only once buiding set would not be cheaper
2165 # As we do it only once buiding set would not be cheaper
2166 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2166 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2167 if changes:
2167 if changes:
2168 tr2.hookargs[b'tag_moved'] = b'1'
2168 tr2.hookargs[b'tag_moved'] = b'1'
2169 with repo.vfs(
2169 with repo.vfs(
2170 b'changes/tags.changes', b'w', atomictemp=True
2170 b'changes/tags.changes', b'w', atomictemp=True
2171 ) as changesfile:
2171 ) as changesfile:
2172 # note: we do not register the file to the transaction
2172 # note: we do not register the file to the transaction
2173 # because we needs it to still exist on the transaction
2173 # because we needs it to still exist on the transaction
2174 # is close (for txnclose hooks)
2174 # is close (for txnclose hooks)
2175 tagsmod.writediff(changesfile, changes)
2175 tagsmod.writediff(changesfile, changes)
2176
2176
2177 def validate(tr2):
2177 def validate(tr2):
2178 """will run pre-closing hooks"""
2178 """will run pre-closing hooks"""
2179 # XXX the transaction API is a bit lacking here so we take a hacky
2179 # XXX the transaction API is a bit lacking here so we take a hacky
2180 # path for now
2180 # path for now
2181 #
2181 #
2182 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2182 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2183 # dict is copied before these run. In addition we needs the data
2183 # dict is copied before these run. In addition we needs the data
2184 # available to in memory hooks too.
2184 # available to in memory hooks too.
2185 #
2185 #
2186 # Moreover, we also need to make sure this runs before txnclose
2186 # Moreover, we also need to make sure this runs before txnclose
2187 # hooks and there is no "pending" mechanism that would execute
2187 # hooks and there is no "pending" mechanism that would execute
2188 # logic only if hooks are about to run.
2188 # logic only if hooks are about to run.
2189 #
2189 #
2190 # Fixing this limitation of the transaction is also needed to track
2190 # Fixing this limitation of the transaction is also needed to track
2191 # other families of changes (bookmarks, phases, obsolescence).
2191 # other families of changes (bookmarks, phases, obsolescence).
2192 #
2192 #
2193 # This will have to be fixed before we remove the experimental
2193 # This will have to be fixed before we remove the experimental
2194 # gating.
2194 # gating.
2195 tracktags(tr2)
2195 tracktags(tr2)
2196 repo = reporef()
2196 repo = reporef()
2197
2197
2198 singleheadopt = (b'experimental', b'single-head-per-branch')
2198 singleheadopt = (b'experimental', b'single-head-per-branch')
2199 singlehead = repo.ui.configbool(*singleheadopt)
2199 singlehead = repo.ui.configbool(*singleheadopt)
2200 if singlehead:
2200 if singlehead:
2201 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2201 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2202 accountclosed = singleheadsub.get(
2202 accountclosed = singleheadsub.get(
2203 b"account-closed-heads", False
2203 b"account-closed-heads", False
2204 )
2204 )
2205 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2205 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2206 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2206 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2207 for name, (old, new) in sorted(
2207 for name, (old, new) in sorted(
2208 tr.changes[b'bookmarks'].items()
2208 tr.changes[b'bookmarks'].items()
2209 ):
2209 ):
2210 args = tr.hookargs.copy()
2210 args = tr.hookargs.copy()
2211 args.update(bookmarks.preparehookargs(name, old, new))
2211 args.update(bookmarks.preparehookargs(name, old, new))
2212 repo.hook(
2212 repo.hook(
2213 b'pretxnclose-bookmark',
2213 b'pretxnclose-bookmark',
2214 throw=True,
2214 throw=True,
2215 **pycompat.strkwargs(args)
2215 **pycompat.strkwargs(args)
2216 )
2216 )
2217 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2217 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2218 cl = repo.unfiltered().changelog
2218 cl = repo.unfiltered().changelog
2219 for revs, (old, new) in tr.changes[b'phases']:
2219 for revs, (old, new) in tr.changes[b'phases']:
2220 for rev in revs:
2220 for rev in revs:
2221 args = tr.hookargs.copy()
2221 args = tr.hookargs.copy()
2222 node = hex(cl.node(rev))
2222 node = hex(cl.node(rev))
2223 args.update(phases.preparehookargs(node, old, new))
2223 args.update(phases.preparehookargs(node, old, new))
2224 repo.hook(
2224 repo.hook(
2225 b'pretxnclose-phase',
2225 b'pretxnclose-phase',
2226 throw=True,
2226 throw=True,
2227 **pycompat.strkwargs(args)
2227 **pycompat.strkwargs(args)
2228 )
2228 )
2229
2229
2230 repo.hook(
2230 repo.hook(
2231 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2231 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2232 )
2232 )
2233
2233
2234 def releasefn(tr, success):
2234 def releasefn(tr, success):
2235 repo = reporef()
2235 repo = reporef()
2236 if repo is None:
2236 if repo is None:
2237 # If the repo has been GC'd (and this release function is being
2237 # If the repo has been GC'd (and this release function is being
2238 # called from transaction.__del__), there's not much we can do,
2238 # called from transaction.__del__), there's not much we can do,
2239 # so just leave the unfinished transaction there and let the
2239 # so just leave the unfinished transaction there and let the
2240 # user run `hg recover`.
2240 # user run `hg recover`.
2241 return
2241 return
2242 if success:
2242 if success:
2243 # this should be explicitly invoked here, because
2243 # this should be explicitly invoked here, because
2244 # in-memory changes aren't written out at closing
2244 # in-memory changes aren't written out at closing
2245 # transaction, if tr.addfilegenerator (via
2245 # transaction, if tr.addfilegenerator (via
2246 # dirstate.write or so) isn't invoked while
2246 # dirstate.write or so) isn't invoked while
2247 # transaction running
2247 # transaction running
2248 repo.dirstate.write(None)
2248 repo.dirstate.write(None)
2249 else:
2249 else:
2250 # discard all changes (including ones already written
2250 # discard all changes (including ones already written
2251 # out) in this transaction
2251 # out) in this transaction
2252 narrowspec.restorebackup(self, b'journal.narrowspec')
2252 narrowspec.restorebackup(self, b'journal.narrowspec')
2253 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2253 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2254 repo.dirstate.restorebackup(None, b'journal.dirstate')
2254 repo.dirstate.restorebackup(None, b'journal.dirstate')
2255
2255
2256 repo.invalidate(clearfilecache=True)
2256 repo.invalidate(clearfilecache=True)
2257
2257
2258 tr = transaction.transaction(
2258 tr = transaction.transaction(
2259 rp,
2259 rp,
2260 self.svfs,
2260 self.svfs,
2261 vfsmap,
2261 vfsmap,
2262 b"journal",
2262 b"journal",
2263 b"undo",
2263 b"undo",
2264 aftertrans(renames),
2264 aftertrans(renames),
2265 self.store.createmode,
2265 self.store.createmode,
2266 validator=validate,
2266 validator=validate,
2267 releasefn=releasefn,
2267 releasefn=releasefn,
2268 checkambigfiles=_cachedfiles,
2268 checkambigfiles=_cachedfiles,
2269 name=desc,
2269 name=desc,
2270 )
2270 )
2271 tr.changes[b'origrepolen'] = len(self)
2271 tr.changes[b'origrepolen'] = len(self)
2272 tr.changes[b'obsmarkers'] = set()
2272 tr.changes[b'obsmarkers'] = set()
2273 tr.changes[b'phases'] = []
2273 tr.changes[b'phases'] = []
2274 tr.changes[b'bookmarks'] = {}
2274 tr.changes[b'bookmarks'] = {}
2275
2275
2276 tr.hookargs[b'txnid'] = txnid
2276 tr.hookargs[b'txnid'] = txnid
2277 tr.hookargs[b'txnname'] = desc
2277 tr.hookargs[b'txnname'] = desc
2278 tr.hookargs[b'changes'] = tr.changes
2278 tr.hookargs[b'changes'] = tr.changes
2279 # note: writing the fncache only during finalize mean that the file is
2279 # note: writing the fncache only during finalize mean that the file is
2280 # outdated when running hooks. As fncache is used for streaming clone,
2280 # outdated when running hooks. As fncache is used for streaming clone,
2281 # this is not expected to break anything that happen during the hooks.
2281 # this is not expected to break anything that happen during the hooks.
2282 tr.addfinalize(b'flush-fncache', self.store.write)
2282 tr.addfinalize(b'flush-fncache', self.store.write)
2283
2283
2284 def txnclosehook(tr2):
2284 def txnclosehook(tr2):
2285 """To be run if transaction is successful, will schedule a hook run
2285 """To be run if transaction is successful, will schedule a hook run
2286 """
2286 """
2287 # Don't reference tr2 in hook() so we don't hold a reference.
2287 # Don't reference tr2 in hook() so we don't hold a reference.
2288 # This reduces memory consumption when there are multiple
2288 # This reduces memory consumption when there are multiple
2289 # transactions per lock. This can likely go away if issue5045
2289 # transactions per lock. This can likely go away if issue5045
2290 # fixes the function accumulation.
2290 # fixes the function accumulation.
2291 hookargs = tr2.hookargs
2291 hookargs = tr2.hookargs
2292
2292
2293 def hookfunc(unused_success):
2293 def hookfunc(unused_success):
2294 repo = reporef()
2294 repo = reporef()
2295 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2295 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2296 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2296 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2297 for name, (old, new) in bmchanges:
2297 for name, (old, new) in bmchanges:
2298 args = tr.hookargs.copy()
2298 args = tr.hookargs.copy()
2299 args.update(bookmarks.preparehookargs(name, old, new))
2299 args.update(bookmarks.preparehookargs(name, old, new))
2300 repo.hook(
2300 repo.hook(
2301 b'txnclose-bookmark',
2301 b'txnclose-bookmark',
2302 throw=False,
2302 throw=False,
2303 **pycompat.strkwargs(args)
2303 **pycompat.strkwargs(args)
2304 )
2304 )
2305
2305
2306 if hook.hashook(repo.ui, b'txnclose-phase'):
2306 if hook.hashook(repo.ui, b'txnclose-phase'):
2307 cl = repo.unfiltered().changelog
2307 cl = repo.unfiltered().changelog
2308 phasemv = sorted(
2308 phasemv = sorted(
2309 tr.changes[b'phases'], key=lambda r: r[0][0]
2309 tr.changes[b'phases'], key=lambda r: r[0][0]
2310 )
2310 )
2311 for revs, (old, new) in phasemv:
2311 for revs, (old, new) in phasemv:
2312 for rev in revs:
2312 for rev in revs:
2313 args = tr.hookargs.copy()
2313 args = tr.hookargs.copy()
2314 node = hex(cl.node(rev))
2314 node = hex(cl.node(rev))
2315 args.update(phases.preparehookargs(node, old, new))
2315 args.update(phases.preparehookargs(node, old, new))
2316 repo.hook(
2316 repo.hook(
2317 b'txnclose-phase',
2317 b'txnclose-phase',
2318 throw=False,
2318 throw=False,
2319 **pycompat.strkwargs(args)
2319 **pycompat.strkwargs(args)
2320 )
2320 )
2321
2321
2322 repo.hook(
2322 repo.hook(
2323 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2323 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2324 )
2324 )
2325
2325
2326 reporef()._afterlock(hookfunc)
2326 reporef()._afterlock(hookfunc)
2327
2327
2328 tr.addfinalize(b'txnclose-hook', txnclosehook)
2328 tr.addfinalize(b'txnclose-hook', txnclosehook)
2329 # Include a leading "-" to make it happen before the transaction summary
2329 # Include a leading "-" to make it happen before the transaction summary
2330 # reports registered via scmutil.registersummarycallback() whose names
2330 # reports registered via scmutil.registersummarycallback() whose names
2331 # are 00-txnreport etc. That way, the caches will be warm when the
2331 # are 00-txnreport etc. That way, the caches will be warm when the
2332 # callbacks run.
2332 # callbacks run.
2333 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2333 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2334
2334
2335 def txnaborthook(tr2):
2335 def txnaborthook(tr2):
2336 """To be run if transaction is aborted
2336 """To be run if transaction is aborted
2337 """
2337 """
2338 reporef().hook(
2338 reporef().hook(
2339 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2339 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2340 )
2340 )
2341
2341
2342 tr.addabort(b'txnabort-hook', txnaborthook)
2342 tr.addabort(b'txnabort-hook', txnaborthook)
2343 # avoid eager cache invalidation. in-memory data should be identical
2343 # avoid eager cache invalidation. in-memory data should be identical
2344 # to stored data if transaction has no error.
2344 # to stored data if transaction has no error.
2345 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2345 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2346 self._transref = weakref.ref(tr)
2346 self._transref = weakref.ref(tr)
2347 scmutil.registersummarycallback(self, tr, desc)
2347 scmutil.registersummarycallback(self, tr, desc)
2348 return tr
2348 return tr
2349
2349
2350 def _journalfiles(self):
2350 def _journalfiles(self):
2351 return (
2351 return (
2352 (self.svfs, b'journal'),
2352 (self.svfs, b'journal'),
2353 (self.svfs, b'journal.narrowspec'),
2353 (self.svfs, b'journal.narrowspec'),
2354 (self.vfs, b'journal.narrowspec.dirstate'),
2354 (self.vfs, b'journal.narrowspec.dirstate'),
2355 (self.vfs, b'journal.dirstate'),
2355 (self.vfs, b'journal.dirstate'),
2356 (self.vfs, b'journal.branch'),
2356 (self.vfs, b'journal.branch'),
2357 (self.vfs, b'journal.desc'),
2357 (self.vfs, b'journal.desc'),
2358 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2358 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2359 (self.svfs, b'journal.phaseroots'),
2359 (self.svfs, b'journal.phaseroots'),
2360 )
2360 )
2361
2361
2362 def undofiles(self):
2362 def undofiles(self):
2363 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2363 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2364
2364
2365 @unfilteredmethod
2365 @unfilteredmethod
2366 def _writejournal(self, desc):
2366 def _writejournal(self, desc):
2367 self.dirstate.savebackup(None, b'journal.dirstate')
2367 self.dirstate.savebackup(None, b'journal.dirstate')
2368 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2368 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2369 narrowspec.savebackup(self, b'journal.narrowspec')
2369 narrowspec.savebackup(self, b'journal.narrowspec')
2370 self.vfs.write(
2370 self.vfs.write(
2371 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2371 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2372 )
2372 )
2373 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2373 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2374 bookmarksvfs = bookmarks.bookmarksvfs(self)
2374 bookmarksvfs = bookmarks.bookmarksvfs(self)
2375 bookmarksvfs.write(
2375 bookmarksvfs.write(
2376 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2376 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2377 )
2377 )
2378 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2378 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2379
2379
2380 def recover(self):
2380 def recover(self):
2381 with self.lock():
2381 with self.lock():
2382 if self.svfs.exists(b"journal"):
2382 if self.svfs.exists(b"journal"):
2383 self.ui.status(_(b"rolling back interrupted transaction\n"))
2383 self.ui.status(_(b"rolling back interrupted transaction\n"))
2384 vfsmap = {
2384 vfsmap = {
2385 b'': self.svfs,
2385 b'': self.svfs,
2386 b'plain': self.vfs,
2386 b'plain': self.vfs,
2387 }
2387 }
2388 transaction.rollback(
2388 transaction.rollback(
2389 self.svfs,
2389 self.svfs,
2390 vfsmap,
2390 vfsmap,
2391 b"journal",
2391 b"journal",
2392 self.ui.warn,
2392 self.ui.warn,
2393 checkambigfiles=_cachedfiles,
2393 checkambigfiles=_cachedfiles,
2394 )
2394 )
2395 self.invalidate()
2395 self.invalidate()
2396 return True
2396 return True
2397 else:
2397 else:
2398 self.ui.warn(_(b"no interrupted transaction available\n"))
2398 self.ui.warn(_(b"no interrupted transaction available\n"))
2399 return False
2399 return False
2400
2400
2401 def rollback(self, dryrun=False, force=False):
2401 def rollback(self, dryrun=False, force=False):
2402 wlock = lock = dsguard = None
2402 wlock = lock = dsguard = None
2403 try:
2403 try:
2404 wlock = self.wlock()
2404 wlock = self.wlock()
2405 lock = self.lock()
2405 lock = self.lock()
2406 if self.svfs.exists(b"undo"):
2406 if self.svfs.exists(b"undo"):
2407 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2407 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2408
2408
2409 return self._rollback(dryrun, force, dsguard)
2409 return self._rollback(dryrun, force, dsguard)
2410 else:
2410 else:
2411 self.ui.warn(_(b"no rollback information available\n"))
2411 self.ui.warn(_(b"no rollback information available\n"))
2412 return 1
2412 return 1
2413 finally:
2413 finally:
2414 release(dsguard, lock, wlock)
2414 release(dsguard, lock, wlock)
2415
2415
2416 @unfilteredmethod # Until we get smarter cache management
2416 @unfilteredmethod # Until we get smarter cache management
2417 def _rollback(self, dryrun, force, dsguard):
2417 def _rollback(self, dryrun, force, dsguard):
2418 ui = self.ui
2418 ui = self.ui
2419 try:
2419 try:
2420 args = self.vfs.read(b'undo.desc').splitlines()
2420 args = self.vfs.read(b'undo.desc').splitlines()
2421 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2421 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2422 if len(args) >= 3:
2422 if len(args) >= 3:
2423 detail = args[2]
2423 detail = args[2]
2424 oldtip = oldlen - 1
2424 oldtip = oldlen - 1
2425
2425
2426 if detail and ui.verbose:
2426 if detail and ui.verbose:
2427 msg = _(
2427 msg = _(
2428 b'repository tip rolled back to revision %d'
2428 b'repository tip rolled back to revision %d'
2429 b' (undo %s: %s)\n'
2429 b' (undo %s: %s)\n'
2430 ) % (oldtip, desc, detail)
2430 ) % (oldtip, desc, detail)
2431 else:
2431 else:
2432 msg = _(
2432 msg = _(
2433 b'repository tip rolled back to revision %d (undo %s)\n'
2433 b'repository tip rolled back to revision %d (undo %s)\n'
2434 ) % (oldtip, desc)
2434 ) % (oldtip, desc)
2435 except IOError:
2435 except IOError:
2436 msg = _(b'rolling back unknown transaction\n')
2436 msg = _(b'rolling back unknown transaction\n')
2437 desc = None
2437 desc = None
2438
2438
2439 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2439 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2440 raise error.Abort(
2440 raise error.Abort(
2441 _(
2441 _(
2442 b'rollback of last commit while not checked out '
2442 b'rollback of last commit while not checked out '
2443 b'may lose data'
2443 b'may lose data'
2444 ),
2444 ),
2445 hint=_(b'use -f to force'),
2445 hint=_(b'use -f to force'),
2446 )
2446 )
2447
2447
2448 ui.status(msg)
2448 ui.status(msg)
2449 if dryrun:
2449 if dryrun:
2450 return 0
2450 return 0
2451
2451
2452 parents = self.dirstate.parents()
2452 parents = self.dirstate.parents()
2453 self.destroying()
2453 self.destroying()
2454 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2454 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2455 transaction.rollback(
2455 transaction.rollback(
2456 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2456 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2457 )
2457 )
2458 bookmarksvfs = bookmarks.bookmarksvfs(self)
2458 bookmarksvfs = bookmarks.bookmarksvfs(self)
2459 if bookmarksvfs.exists(b'undo.bookmarks'):
2459 if bookmarksvfs.exists(b'undo.bookmarks'):
2460 bookmarksvfs.rename(
2460 bookmarksvfs.rename(
2461 b'undo.bookmarks', b'bookmarks', checkambig=True
2461 b'undo.bookmarks', b'bookmarks', checkambig=True
2462 )
2462 )
2463 if self.svfs.exists(b'undo.phaseroots'):
2463 if self.svfs.exists(b'undo.phaseroots'):
2464 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2464 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2465 self.invalidate()
2465 self.invalidate()
2466
2466
2467 has_node = self.changelog.index.has_node
2467 has_node = self.changelog.index.has_node
2468 parentgone = any(not has_node(p) for p in parents)
2468 parentgone = any(not has_node(p) for p in parents)
2469 if parentgone:
2469 if parentgone:
2470 # prevent dirstateguard from overwriting already restored one
2470 # prevent dirstateguard from overwriting already restored one
2471 dsguard.close()
2471 dsguard.close()
2472
2472
2473 narrowspec.restorebackup(self, b'undo.narrowspec')
2473 narrowspec.restorebackup(self, b'undo.narrowspec')
2474 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2474 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2475 self.dirstate.restorebackup(None, b'undo.dirstate')
2475 self.dirstate.restorebackup(None, b'undo.dirstate')
2476 try:
2476 try:
2477 branch = self.vfs.read(b'undo.branch')
2477 branch = self.vfs.read(b'undo.branch')
2478 self.dirstate.setbranch(encoding.tolocal(branch))
2478 self.dirstate.setbranch(encoding.tolocal(branch))
2479 except IOError:
2479 except IOError:
2480 ui.warn(
2480 ui.warn(
2481 _(
2481 _(
2482 b'named branch could not be reset: '
2482 b'named branch could not be reset: '
2483 b'current branch is still \'%s\'\n'
2483 b'current branch is still \'%s\'\n'
2484 )
2484 )
2485 % self.dirstate.branch()
2485 % self.dirstate.branch()
2486 )
2486 )
2487
2487
2488 parents = tuple([p.rev() for p in self[None].parents()])
2488 parents = tuple([p.rev() for p in self[None].parents()])
2489 if len(parents) > 1:
2489 if len(parents) > 1:
2490 ui.status(
2490 ui.status(
2491 _(
2491 _(
2492 b'working directory now based on '
2492 b'working directory now based on '
2493 b'revisions %d and %d\n'
2493 b'revisions %d and %d\n'
2494 )
2494 )
2495 % parents
2495 % parents
2496 )
2496 )
2497 else:
2497 else:
2498 ui.status(
2498 ui.status(
2499 _(b'working directory now based on revision %d\n') % parents
2499 _(b'working directory now based on revision %d\n') % parents
2500 )
2500 )
2501 mergestatemod.mergestate.clean(self, self[b'.'].node())
2501 mergestatemod.mergestate.clean(self, self[b'.'].node())
2502
2502
2503 # TODO: if we know which new heads may result from this rollback, pass
2503 # TODO: if we know which new heads may result from this rollback, pass
2504 # them to destroy(), which will prevent the branchhead cache from being
2504 # them to destroy(), which will prevent the branchhead cache from being
2505 # invalidated.
2505 # invalidated.
2506 self.destroyed()
2506 self.destroyed()
2507 return 0
2507 return 0
2508
2508
2509 def _buildcacheupdater(self, newtransaction):
2509 def _buildcacheupdater(self, newtransaction):
2510 """called during transaction to build the callback updating cache
2510 """called during transaction to build the callback updating cache
2511
2511
2512 Lives on the repository to help extension who might want to augment
2512 Lives on the repository to help extension who might want to augment
2513 this logic. For this purpose, the created transaction is passed to the
2513 this logic. For this purpose, the created transaction is passed to the
2514 method.
2514 method.
2515 """
2515 """
2516 # we must avoid cyclic reference between repo and transaction.
2516 # we must avoid cyclic reference between repo and transaction.
2517 reporef = weakref.ref(self)
2517 reporef = weakref.ref(self)
2518
2518
2519 def updater(tr):
2519 def updater(tr):
2520 repo = reporef()
2520 repo = reporef()
2521 repo.updatecaches(tr)
2521 repo.updatecaches(tr)
2522
2522
2523 return updater
2523 return updater
2524
2524
2525 @unfilteredmethod
2525 @unfilteredmethod
2526 def updatecaches(self, tr=None, full=False):
2526 def updatecaches(self, tr=None, full=False):
2527 """warm appropriate caches
2527 """warm appropriate caches
2528
2528
2529 If this function is called after a transaction closed. The transaction
2529 If this function is called after a transaction closed. The transaction
2530 will be available in the 'tr' argument. This can be used to selectively
2530 will be available in the 'tr' argument. This can be used to selectively
2531 update caches relevant to the changes in that transaction.
2531 update caches relevant to the changes in that transaction.
2532
2532
2533 If 'full' is set, make sure all caches the function knows about have
2533 If 'full' is set, make sure all caches the function knows about have
2534 up-to-date data. Even the ones usually loaded more lazily.
2534 up-to-date data. Even the ones usually loaded more lazily.
2535 """
2535 """
2536 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2536 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2537 # During strip, many caches are invalid but
2537 # During strip, many caches are invalid but
2538 # later call to `destroyed` will refresh them.
2538 # later call to `destroyed` will refresh them.
2539 return
2539 return
2540
2540
2541 if tr is None or tr.changes[b'origrepolen'] < len(self):
2541 if tr is None or tr.changes[b'origrepolen'] < len(self):
2542 # accessing the 'ser ved' branchmap should refresh all the others,
2542 # accessing the 'ser ved' branchmap should refresh all the others,
2543 self.ui.debug(b'updating the branch cache\n')
2543 self.ui.debug(b'updating the branch cache\n')
2544 self.filtered(b'served').branchmap()
2544 self.filtered(b'served').branchmap()
2545 self.filtered(b'served.hidden').branchmap()
2545 self.filtered(b'served.hidden').branchmap()
2546
2546
2547 if full:
2547 if full:
2548 unfi = self.unfiltered()
2548 unfi = self.unfiltered()
2549
2549
2550 self.changelog.update_caches(transaction=tr)
2550 self.changelog.update_caches(transaction=tr)
2551 self.manifestlog.update_caches(transaction=tr)
2551 self.manifestlog.update_caches(transaction=tr)
2552
2552
2553 rbc = unfi.revbranchcache()
2553 rbc = unfi.revbranchcache()
2554 for r in unfi.changelog:
2554 for r in unfi.changelog:
2555 rbc.branchinfo(r)
2555 rbc.branchinfo(r)
2556 rbc.write()
2556 rbc.write()
2557
2557
2558 # ensure the working copy parents are in the manifestfulltextcache
2558 # ensure the working copy parents are in the manifestfulltextcache
2559 for ctx in self[b'.'].parents():
2559 for ctx in self[b'.'].parents():
2560 ctx.manifest() # accessing the manifest is enough
2560 ctx.manifest() # accessing the manifest is enough
2561
2561
2562 # accessing fnode cache warms the cache
2562 # accessing fnode cache warms the cache
2563 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2563 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2564 # accessing tags warm the cache
2564 # accessing tags warm the cache
2565 self.tags()
2565 self.tags()
2566 self.filtered(b'served').tags()
2566 self.filtered(b'served').tags()
2567
2567
2568 # The `full` arg is documented as updating even the lazily-loaded
2568 # The `full` arg is documented as updating even the lazily-loaded
2569 # caches immediately, so we're forcing a write to cause these caches
2569 # caches immediately, so we're forcing a write to cause these caches
2570 # to be warmed up even if they haven't explicitly been requested
2570 # to be warmed up even if they haven't explicitly been requested
2571 # yet (if they've never been used by hg, they won't ever have been
2571 # yet (if they've never been used by hg, they won't ever have been
2572 # written, even if they're a subset of another kind of cache that
2572 # written, even if they're a subset of another kind of cache that
2573 # *has* been used).
2573 # *has* been used).
2574 for filt in repoview.filtertable.keys():
2574 for filt in repoview.filtertable.keys():
2575 filtered = self.filtered(filt)
2575 filtered = self.filtered(filt)
2576 filtered.branchmap().write(filtered)
2576 filtered.branchmap().write(filtered)
2577
2577
2578 def invalidatecaches(self):
2578 def invalidatecaches(self):
2579
2579
2580 if '_tagscache' in vars(self):
2580 if '_tagscache' in vars(self):
2581 # can't use delattr on proxy
2581 # can't use delattr on proxy
2582 del self.__dict__['_tagscache']
2582 del self.__dict__['_tagscache']
2583
2583
2584 self._branchcaches.clear()
2584 self._branchcaches.clear()
2585 self.invalidatevolatilesets()
2585 self.invalidatevolatilesets()
2586 self._sparsesignaturecache.clear()
2586 self._sparsesignaturecache.clear()
2587
2587
2588 def invalidatevolatilesets(self):
2588 def invalidatevolatilesets(self):
2589 self.filteredrevcache.clear()
2589 self.filteredrevcache.clear()
2590 obsolete.clearobscaches(self)
2590 obsolete.clearobscaches(self)
2591 self._quick_access_changeid_invalidate()
2591 self._quick_access_changeid_invalidate()
2592
2592
2593 def invalidatedirstate(self):
2593 def invalidatedirstate(self):
2594 '''Invalidates the dirstate, causing the next call to dirstate
2594 '''Invalidates the dirstate, causing the next call to dirstate
2595 to check if it was modified since the last time it was read,
2595 to check if it was modified since the last time it was read,
2596 rereading it if it has.
2596 rereading it if it has.
2597
2597
2598 This is different to dirstate.invalidate() that it doesn't always
2598 This is different to dirstate.invalidate() that it doesn't always
2599 rereads the dirstate. Use dirstate.invalidate() if you want to
2599 rereads the dirstate. Use dirstate.invalidate() if you want to
2600 explicitly read the dirstate again (i.e. restoring it to a previous
2600 explicitly read the dirstate again (i.e. restoring it to a previous
2601 known good state).'''
2601 known good state).'''
2602 if hasunfilteredcache(self, 'dirstate'):
2602 if hasunfilteredcache(self, 'dirstate'):
2603 for k in self.dirstate._filecache:
2603 for k in self.dirstate._filecache:
2604 try:
2604 try:
2605 delattr(self.dirstate, k)
2605 delattr(self.dirstate, k)
2606 except AttributeError:
2606 except AttributeError:
2607 pass
2607 pass
2608 delattr(self.unfiltered(), 'dirstate')
2608 delattr(self.unfiltered(), 'dirstate')
2609
2609
2610 def invalidate(self, clearfilecache=False):
2610 def invalidate(self, clearfilecache=False):
2611 '''Invalidates both store and non-store parts other than dirstate
2611 '''Invalidates both store and non-store parts other than dirstate
2612
2612
2613 If a transaction is running, invalidation of store is omitted,
2613 If a transaction is running, invalidation of store is omitted,
2614 because discarding in-memory changes might cause inconsistency
2614 because discarding in-memory changes might cause inconsistency
2615 (e.g. incomplete fncache causes unintentional failure, but
2615 (e.g. incomplete fncache causes unintentional failure, but
2616 redundant one doesn't).
2616 redundant one doesn't).
2617 '''
2617 '''
2618 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2618 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2619 for k in list(self._filecache.keys()):
2619 for k in list(self._filecache.keys()):
2620 # dirstate is invalidated separately in invalidatedirstate()
2620 # dirstate is invalidated separately in invalidatedirstate()
2621 if k == b'dirstate':
2621 if k == b'dirstate':
2622 continue
2622 continue
2623 if (
2623 if (
2624 k == b'changelog'
2624 k == b'changelog'
2625 and self.currenttransaction()
2625 and self.currenttransaction()
2626 and self.changelog._delayed
2626 and self.changelog._delayed
2627 ):
2627 ):
2628 # The changelog object may store unwritten revisions. We don't
2628 # The changelog object may store unwritten revisions. We don't
2629 # want to lose them.
2629 # want to lose them.
2630 # TODO: Solve the problem instead of working around it.
2630 # TODO: Solve the problem instead of working around it.
2631 continue
2631 continue
2632
2632
2633 if clearfilecache:
2633 if clearfilecache:
2634 del self._filecache[k]
2634 del self._filecache[k]
2635 try:
2635 try:
2636 delattr(unfiltered, k)
2636 delattr(unfiltered, k)
2637 except AttributeError:
2637 except AttributeError:
2638 pass
2638 pass
2639 self.invalidatecaches()
2639 self.invalidatecaches()
2640 if not self.currenttransaction():
2640 if not self.currenttransaction():
2641 # TODO: Changing contents of store outside transaction
2641 # TODO: Changing contents of store outside transaction
2642 # causes inconsistency. We should make in-memory store
2642 # causes inconsistency. We should make in-memory store
2643 # changes detectable, and abort if changed.
2643 # changes detectable, and abort if changed.
2644 self.store.invalidatecaches()
2644 self.store.invalidatecaches()
2645
2645
2646 def invalidateall(self):
2646 def invalidateall(self):
2647 '''Fully invalidates both store and non-store parts, causing the
2647 '''Fully invalidates both store and non-store parts, causing the
2648 subsequent operation to reread any outside changes.'''
2648 subsequent operation to reread any outside changes.'''
2649 # extension should hook this to invalidate its caches
2649 # extension should hook this to invalidate its caches
2650 self.invalidate()
2650 self.invalidate()
2651 self.invalidatedirstate()
2651 self.invalidatedirstate()
2652
2652
2653 @unfilteredmethod
2653 @unfilteredmethod
2654 def _refreshfilecachestats(self, tr):
2654 def _refreshfilecachestats(self, tr):
2655 """Reload stats of cached files so that they are flagged as valid"""
2655 """Reload stats of cached files so that they are flagged as valid"""
2656 for k, ce in self._filecache.items():
2656 for k, ce in self._filecache.items():
2657 k = pycompat.sysstr(k)
2657 k = pycompat.sysstr(k)
2658 if k == 'dirstate' or k not in self.__dict__:
2658 if k == 'dirstate' or k not in self.__dict__:
2659 continue
2659 continue
2660 ce.refresh()
2660 ce.refresh()
2661
2661
2662 def _lock(
2662 def _lock(
2663 self,
2663 self,
2664 vfs,
2664 vfs,
2665 lockname,
2665 lockname,
2666 wait,
2666 wait,
2667 releasefn,
2667 releasefn,
2668 acquirefn,
2668 acquirefn,
2669 desc,
2669 desc,
2670 inheritchecker=None,
2670 inheritchecker=None,
2671 parentenvvar=None,
2671 parentenvvar=None,
2672 ):
2672 ):
2673 parentlock = None
2673 parentlock = None
2674 # the contents of parentenvvar are used by the underlying lock to
2674 # the contents of parentenvvar are used by the underlying lock to
2675 # determine whether it can be inherited
2675 # determine whether it can be inherited
2676 if parentenvvar is not None:
2676 if parentenvvar is not None:
2677 parentlock = encoding.environ.get(parentenvvar)
2677 parentlock = encoding.environ.get(parentenvvar)
2678
2678
2679 timeout = 0
2679 timeout = 0
2680 warntimeout = 0
2680 warntimeout = 0
2681 if wait:
2681 if wait:
2682 timeout = self.ui.configint(b"ui", b"timeout")
2682 timeout = self.ui.configint(b"ui", b"timeout")
2683 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2683 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2684 # internal config: ui.signal-safe-lock
2684 # internal config: ui.signal-safe-lock
2685 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2685 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2686
2686
2687 l = lockmod.trylock(
2687 l = lockmod.trylock(
2688 self.ui,
2688 self.ui,
2689 vfs,
2689 vfs,
2690 lockname,
2690 lockname,
2691 timeout,
2691 timeout,
2692 warntimeout,
2692 warntimeout,
2693 releasefn=releasefn,
2693 releasefn=releasefn,
2694 acquirefn=acquirefn,
2694 acquirefn=acquirefn,
2695 desc=desc,
2695 desc=desc,
2696 inheritchecker=inheritchecker,
2696 inheritchecker=inheritchecker,
2697 parentlock=parentlock,
2697 parentlock=parentlock,
2698 signalsafe=signalsafe,
2698 signalsafe=signalsafe,
2699 )
2699 )
2700 return l
2700 return l
2701
2701
2702 def _afterlock(self, callback):
2702 def _afterlock(self, callback):
2703 """add a callback to be run when the repository is fully unlocked
2703 """add a callback to be run when the repository is fully unlocked
2704
2704
2705 The callback will be executed when the outermost lock is released
2705 The callback will be executed when the outermost lock is released
2706 (with wlock being higher level than 'lock')."""
2706 (with wlock being higher level than 'lock')."""
2707 for ref in (self._wlockref, self._lockref):
2707 for ref in (self._wlockref, self._lockref):
2708 l = ref and ref()
2708 l = ref and ref()
2709 if l and l.held:
2709 if l and l.held:
2710 l.postrelease.append(callback)
2710 l.postrelease.append(callback)
2711 break
2711 break
2712 else: # no lock have been found.
2712 else: # no lock have been found.
2713 callback(True)
2713 callback(True)
2714
2714
2715 def lock(self, wait=True):
2715 def lock(self, wait=True):
2716 '''Lock the repository store (.hg/store) and return a weak reference
2716 '''Lock the repository store (.hg/store) and return a weak reference
2717 to the lock. Use this before modifying the store (e.g. committing or
2717 to the lock. Use this before modifying the store (e.g. committing or
2718 stripping). If you are opening a transaction, get a lock as well.)
2718 stripping). If you are opening a transaction, get a lock as well.)
2719
2719
2720 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2720 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2721 'wlock' first to avoid a dead-lock hazard.'''
2721 'wlock' first to avoid a dead-lock hazard.'''
2722 l = self._currentlock(self._lockref)
2722 l = self._currentlock(self._lockref)
2723 if l is not None:
2723 if l is not None:
2724 l.lock()
2724 l.lock()
2725 return l
2725 return l
2726
2726
2727 l = self._lock(
2727 l = self._lock(
2728 vfs=self.svfs,
2728 vfs=self.svfs,
2729 lockname=b"lock",
2729 lockname=b"lock",
2730 wait=wait,
2730 wait=wait,
2731 releasefn=None,
2731 releasefn=None,
2732 acquirefn=self.invalidate,
2732 acquirefn=self.invalidate,
2733 desc=_(b'repository %s') % self.origroot,
2733 desc=_(b'repository %s') % self.origroot,
2734 )
2734 )
2735 self._lockref = weakref.ref(l)
2735 self._lockref = weakref.ref(l)
2736 return l
2736 return l
2737
2737
2738 def _wlockchecktransaction(self):
2738 def _wlockchecktransaction(self):
2739 if self.currenttransaction() is not None:
2739 if self.currenttransaction() is not None:
2740 raise error.LockInheritanceContractViolation(
2740 raise error.LockInheritanceContractViolation(
2741 b'wlock cannot be inherited in the middle of a transaction'
2741 b'wlock cannot be inherited in the middle of a transaction'
2742 )
2742 )
2743
2743
2744 def wlock(self, wait=True):
2744 def wlock(self, wait=True):
2745 '''Lock the non-store parts of the repository (everything under
2745 '''Lock the non-store parts of the repository (everything under
2746 .hg except .hg/store) and return a weak reference to the lock.
2746 .hg except .hg/store) and return a weak reference to the lock.
2747
2747
2748 Use this before modifying files in .hg.
2748 Use this before modifying files in .hg.
2749
2749
2750 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2750 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2751 'wlock' first to avoid a dead-lock hazard.'''
2751 'wlock' first to avoid a dead-lock hazard.'''
2752 l = self._wlockref and self._wlockref()
2752 l = self._wlockref and self._wlockref()
2753 if l is not None and l.held:
2753 if l is not None and l.held:
2754 l.lock()
2754 l.lock()
2755 return l
2755 return l
2756
2756
2757 # We do not need to check for non-waiting lock acquisition. Such
2757 # We do not need to check for non-waiting lock acquisition. Such
2758 # acquisition would not cause dead-lock as they would just fail.
2758 # acquisition would not cause dead-lock as they would just fail.
2759 if wait and (
2759 if wait and (
2760 self.ui.configbool(b'devel', b'all-warnings')
2760 self.ui.configbool(b'devel', b'all-warnings')
2761 or self.ui.configbool(b'devel', b'check-locks')
2761 or self.ui.configbool(b'devel', b'check-locks')
2762 ):
2762 ):
2763 if self._currentlock(self._lockref) is not None:
2763 if self._currentlock(self._lockref) is not None:
2764 self.ui.develwarn(b'"wlock" acquired after "lock"')
2764 self.ui.develwarn(b'"wlock" acquired after "lock"')
2765
2765
2766 def unlock():
2766 def unlock():
2767 if self.dirstate.pendingparentchange():
2767 if self.dirstate.pendingparentchange():
2768 self.dirstate.invalidate()
2768 self.dirstate.invalidate()
2769 else:
2769 else:
2770 self.dirstate.write(None)
2770 self.dirstate.write(None)
2771
2771
2772 self._filecache[b'dirstate'].refresh()
2772 self._filecache[b'dirstate'].refresh()
2773
2773
2774 l = self._lock(
2774 l = self._lock(
2775 self.vfs,
2775 self.vfs,
2776 b"wlock",
2776 b"wlock",
2777 wait,
2777 wait,
2778 unlock,
2778 unlock,
2779 self.invalidatedirstate,
2779 self.invalidatedirstate,
2780 _(b'working directory of %s') % self.origroot,
2780 _(b'working directory of %s') % self.origroot,
2781 inheritchecker=self._wlockchecktransaction,
2781 inheritchecker=self._wlockchecktransaction,
2782 parentenvvar=b'HG_WLOCK_LOCKER',
2782 parentenvvar=b'HG_WLOCK_LOCKER',
2783 )
2783 )
2784 self._wlockref = weakref.ref(l)
2784 self._wlockref = weakref.ref(l)
2785 return l
2785 return l
2786
2786
2787 def _currentlock(self, lockref):
2787 def _currentlock(self, lockref):
2788 """Returns the lock if it's held, or None if it's not."""
2788 """Returns the lock if it's held, or None if it's not."""
2789 if lockref is None:
2789 if lockref is None:
2790 return None
2790 return None
2791 l = lockref()
2791 l = lockref()
2792 if l is None or not l.held:
2792 if l is None or not l.held:
2793 return None
2793 return None
2794 return l
2794 return l
2795
2795
2796 def currentwlock(self):
2796 def currentwlock(self):
2797 """Returns the wlock if it's held, or None if it's not."""
2797 """Returns the wlock if it's held, or None if it's not."""
2798 return self._currentlock(self._wlockref)
2798 return self._currentlock(self._wlockref)
2799
2799
2800 def checkcommitpatterns(self, wctx, match, status, fail):
2800 def checkcommitpatterns(self, wctx, match, status, fail):
2801 """check for commit arguments that aren't committable"""
2801 """check for commit arguments that aren't committable"""
2802 if match.isexact() or match.prefix():
2802 if match.isexact() or match.prefix():
2803 matched = set(status.modified + status.added + status.removed)
2803 matched = set(status.modified + status.added + status.removed)
2804
2804
2805 for f in match.files():
2805 for f in match.files():
2806 f = self.dirstate.normalize(f)
2806 f = self.dirstate.normalize(f)
2807 if f == b'.' or f in matched or f in wctx.substate:
2807 if f == b'.' or f in matched or f in wctx.substate:
2808 continue
2808 continue
2809 if f in status.deleted:
2809 if f in status.deleted:
2810 fail(f, _(b'file not found!'))
2810 fail(f, _(b'file not found!'))
2811 # Is it a directory that exists or used to exist?
2811 # Is it a directory that exists or used to exist?
2812 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2812 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2813 d = f + b'/'
2813 d = f + b'/'
2814 for mf in matched:
2814 for mf in matched:
2815 if mf.startswith(d):
2815 if mf.startswith(d):
2816 break
2816 break
2817 else:
2817 else:
2818 fail(f, _(b"no match under directory!"))
2818 fail(f, _(b"no match under directory!"))
2819 elif f not in self.dirstate:
2819 elif f not in self.dirstate:
2820 fail(f, _(b"file not tracked!"))
2820 fail(f, _(b"file not tracked!"))
2821
2821
2822 @unfilteredmethod
2822 @unfilteredmethod
2823 def commit(
2823 def commit(
2824 self,
2824 self,
2825 text=b"",
2825 text=b"",
2826 user=None,
2826 user=None,
2827 date=None,
2827 date=None,
2828 match=None,
2828 match=None,
2829 force=False,
2829 force=False,
2830 editor=None,
2830 editor=None,
2831 extra=None,
2831 extra=None,
2832 ):
2832 ):
2833 """Add a new revision to current repository.
2833 """Add a new revision to current repository.
2834
2834
2835 Revision information is gathered from the working directory,
2835 Revision information is gathered from the working directory,
2836 match can be used to filter the committed files. If editor is
2836 match can be used to filter the committed files. If editor is
2837 supplied, it is called to get a commit message.
2837 supplied, it is called to get a commit message.
2838 """
2838 """
2839 if extra is None:
2839 if extra is None:
2840 extra = {}
2840 extra = {}
2841
2841
2842 def fail(f, msg):
2842 def fail(f, msg):
2843 raise error.Abort(b'%s: %s' % (f, msg))
2843 raise error.Abort(b'%s: %s' % (f, msg))
2844
2844
2845 if not match:
2845 if not match:
2846 match = matchmod.always()
2846 match = matchmod.always()
2847
2847
2848 if not force:
2848 if not force:
2849 match.bad = fail
2849 match.bad = fail
2850
2850
2851 # lock() for recent changelog (see issue4368)
2851 # lock() for recent changelog (see issue4368)
2852 with self.wlock(), self.lock():
2852 with self.wlock(), self.lock():
2853 wctx = self[None]
2853 wctx = self[None]
2854 merge = len(wctx.parents()) > 1
2854 merge = len(wctx.parents()) > 1
2855
2855
2856 if not force and merge and not match.always():
2856 if not force and merge and not match.always():
2857 raise error.Abort(
2857 raise error.Abort(
2858 _(
2858 _(
2859 b'cannot partially commit a merge '
2859 b'cannot partially commit a merge '
2860 b'(do not specify files or patterns)'
2860 b'(do not specify files or patterns)'
2861 )
2861 )
2862 )
2862 )
2863
2863
2864 status = self.status(match=match, clean=force)
2864 status = self.status(match=match, clean=force)
2865 if force:
2865 if force:
2866 status.modified.extend(
2866 status.modified.extend(
2867 status.clean
2867 status.clean
2868 ) # mq may commit clean files
2868 ) # mq may commit clean files
2869
2869
2870 # check subrepos
2870 # check subrepos
2871 subs, commitsubs, newstate = subrepoutil.precommit(
2871 subs, commitsubs, newstate = subrepoutil.precommit(
2872 self.ui, wctx, status, match, force=force
2872 self.ui, wctx, status, match, force=force
2873 )
2873 )
2874
2874
2875 # make sure all explicit patterns are matched
2875 # make sure all explicit patterns are matched
2876 if not force:
2876 if not force:
2877 self.checkcommitpatterns(wctx, match, status, fail)
2877 self.checkcommitpatterns(wctx, match, status, fail)
2878
2878
2879 cctx = context.workingcommitctx(
2879 cctx = context.workingcommitctx(
2880 self, status, text, user, date, extra
2880 self, status, text, user, date, extra
2881 )
2881 )
2882
2882
2883 ms = mergestatemod.mergestate.read(self)
2883 ms = mergestatemod.mergestate.read(self)
2884 mergeutil.checkunresolved(ms)
2884 mergeutil.checkunresolved(ms)
2885
2885
2886 # internal config: ui.allowemptycommit
2886 # internal config: ui.allowemptycommit
2887 if cctx.isempty() and not self.ui.configbool(
2887 if cctx.isempty() and not self.ui.configbool(
2888 b'ui', b'allowemptycommit'
2888 b'ui', b'allowemptycommit'
2889 ):
2889 ):
2890 self.ui.debug(b'nothing to commit, clearing merge state\n')
2890 self.ui.debug(b'nothing to commit, clearing merge state\n')
2891 ms.reset()
2891 ms.reset()
2892 return None
2892 return None
2893
2893
2894 if merge and cctx.deleted():
2894 if merge and cctx.deleted():
2895 raise error.Abort(_(b"cannot commit merge with missing files"))
2895 raise error.Abort(_(b"cannot commit merge with missing files"))
2896
2896
2897 if editor:
2897 if editor:
2898 cctx._text = editor(self, cctx, subs)
2898 cctx._text = editor(self, cctx, subs)
2899 edited = text != cctx._text
2899 edited = text != cctx._text
2900
2900
2901 # Save commit message in case this transaction gets rolled back
2901 # Save commit message in case this transaction gets rolled back
2902 # (e.g. by a pretxncommit hook). Leave the content alone on
2902 # (e.g. by a pretxncommit hook). Leave the content alone on
2903 # the assumption that the user will use the same editor again.
2903 # the assumption that the user will use the same editor again.
2904 msgfn = self.savecommitmessage(cctx._text)
2904 msgfn = self.savecommitmessage(cctx._text)
2905
2905
2906 # commit subs and write new state
2906 # commit subs and write new state
2907 if subs:
2907 if subs:
2908 uipathfn = scmutil.getuipathfn(self)
2908 uipathfn = scmutil.getuipathfn(self)
2909 for s in sorted(commitsubs):
2909 for s in sorted(commitsubs):
2910 sub = wctx.sub(s)
2910 sub = wctx.sub(s)
2911 self.ui.status(
2911 self.ui.status(
2912 _(b'committing subrepository %s\n')
2912 _(b'committing subrepository %s\n')
2913 % uipathfn(subrepoutil.subrelpath(sub))
2913 % uipathfn(subrepoutil.subrelpath(sub))
2914 )
2914 )
2915 sr = sub.commit(cctx._text, user, date)
2915 sr = sub.commit(cctx._text, user, date)
2916 newstate[s] = (newstate[s][0], sr)
2916 newstate[s] = (newstate[s][0], sr)
2917 subrepoutil.writestate(self, newstate)
2917 subrepoutil.writestate(self, newstate)
2918
2918
2919 p1, p2 = self.dirstate.parents()
2919 p1, p2 = self.dirstate.parents()
2920 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2920 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2921 try:
2921 try:
2922 self.hook(
2922 self.hook(
2923 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2923 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2924 )
2924 )
2925 with self.transaction(b'commit'):
2925 with self.transaction(b'commit'):
2926 ret = self.commitctx(cctx, True)
2926 ret = self.commitctx(cctx, True)
2927 # update bookmarks, dirstate and mergestate
2927 # update bookmarks, dirstate and mergestate
2928 bookmarks.update(self, [p1, p2], ret)
2928 bookmarks.update(self, [p1, p2], ret)
2929 cctx.markcommitted(ret)
2929 cctx.markcommitted(ret)
2930 ms.reset()
2930 ms.reset()
2931 except: # re-raises
2931 except: # re-raises
2932 if edited:
2932 if edited:
2933 self.ui.write(
2933 self.ui.write(
2934 _(b'note: commit message saved in %s\n') % msgfn
2934 _(b'note: commit message saved in %s\n') % msgfn
2935 )
2935 )
2936 self.ui.write(
2936 self.ui.write(
2937 _(
2937 _(
2938 b"note: use 'hg commit --logfile "
2938 b"note: use 'hg commit --logfile "
2939 b".hg/last-message.txt --edit' to reuse it\n"
2939 b".hg/last-message.txt --edit' to reuse it\n"
2940 )
2940 )
2941 )
2941 )
2942 raise
2942 raise
2943
2943
2944 def commithook(unused_success):
2944 def commithook(unused_success):
2945 # hack for command that use a temporary commit (eg: histedit)
2945 # hack for command that use a temporary commit (eg: histedit)
2946 # temporary commit got stripped before hook release
2946 # temporary commit got stripped before hook release
2947 if self.changelog.hasnode(ret):
2947 if self.changelog.hasnode(ret):
2948 self.hook(
2948 self.hook(
2949 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2949 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2950 )
2950 )
2951
2951
2952 self._afterlock(commithook)
2952 self._afterlock(commithook)
2953 return ret
2953 return ret
2954
2954
2955 @unfilteredmethod
2955 @unfilteredmethod
2956 def commitctx(self, ctx, error=False, origctx=None):
2956 def commitctx(self, ctx, error=False, origctx=None):
2957 return commit.commitctx(self, ctx, error=error, origctx=origctx)
2957 return commit.commitctx(self, ctx, error=error, origctx=origctx)
2958
2958
2959 @unfilteredmethod
2959 @unfilteredmethod
2960 def destroying(self):
2960 def destroying(self):
2961 '''Inform the repository that nodes are about to be destroyed.
2961 '''Inform the repository that nodes are about to be destroyed.
2962 Intended for use by strip and rollback, so there's a common
2962 Intended for use by strip and rollback, so there's a common
2963 place for anything that has to be done before destroying history.
2963 place for anything that has to be done before destroying history.
2964
2964
2965 This is mostly useful for saving state that is in memory and waiting
2965 This is mostly useful for saving state that is in memory and waiting
2966 to be flushed when the current lock is released. Because a call to
2966 to be flushed when the current lock is released. Because a call to
2967 destroyed is imminent, the repo will be invalidated causing those
2967 destroyed is imminent, the repo will be invalidated causing those
2968 changes to stay in memory (waiting for the next unlock), or vanish
2968 changes to stay in memory (waiting for the next unlock), or vanish
2969 completely.
2969 completely.
2970 '''
2970 '''
2971 # When using the same lock to commit and strip, the phasecache is left
2971 # When using the same lock to commit and strip, the phasecache is left
2972 # dirty after committing. Then when we strip, the repo is invalidated,
2972 # dirty after committing. Then when we strip, the repo is invalidated,
2973 # causing those changes to disappear.
2973 # causing those changes to disappear.
2974 if '_phasecache' in vars(self):
2974 if '_phasecache' in vars(self):
2975 self._phasecache.write()
2975 self._phasecache.write()
2976
2976
2977 @unfilteredmethod
2977 @unfilteredmethod
2978 def destroyed(self):
2978 def destroyed(self):
2979 '''Inform the repository that nodes have been destroyed.
2979 '''Inform the repository that nodes have been destroyed.
2980 Intended for use by strip and rollback, so there's a common
2980 Intended for use by strip and rollback, so there's a common
2981 place for anything that has to be done after destroying history.
2981 place for anything that has to be done after destroying history.
2982 '''
2982 '''
2983 # When one tries to:
2983 # When one tries to:
2984 # 1) destroy nodes thus calling this method (e.g. strip)
2984 # 1) destroy nodes thus calling this method (e.g. strip)
2985 # 2) use phasecache somewhere (e.g. commit)
2985 # 2) use phasecache somewhere (e.g. commit)
2986 #
2986 #
2987 # then 2) will fail because the phasecache contains nodes that were
2987 # then 2) will fail because the phasecache contains nodes that were
2988 # removed. We can either remove phasecache from the filecache,
2988 # removed. We can either remove phasecache from the filecache,
2989 # causing it to reload next time it is accessed, or simply filter
2989 # causing it to reload next time it is accessed, or simply filter
2990 # the removed nodes now and write the updated cache.
2990 # the removed nodes now and write the updated cache.
2991 self._phasecache.filterunknown(self)
2991 self._phasecache.filterunknown(self)
2992 self._phasecache.write()
2992 self._phasecache.write()
2993
2993
2994 # refresh all repository caches
2994 # refresh all repository caches
2995 self.updatecaches()
2995 self.updatecaches()
2996
2996
2997 # Ensure the persistent tag cache is updated. Doing it now
2997 # Ensure the persistent tag cache is updated. Doing it now
2998 # means that the tag cache only has to worry about destroyed
2998 # means that the tag cache only has to worry about destroyed
2999 # heads immediately after a strip/rollback. That in turn
2999 # heads immediately after a strip/rollback. That in turn
3000 # guarantees that "cachetip == currenttip" (comparing both rev
3000 # guarantees that "cachetip == currenttip" (comparing both rev
3001 # and node) always means no nodes have been added or destroyed.
3001 # and node) always means no nodes have been added or destroyed.
3002
3002
3003 # XXX this is suboptimal when qrefresh'ing: we strip the current
3003 # XXX this is suboptimal when qrefresh'ing: we strip the current
3004 # head, refresh the tag cache, then immediately add a new head.
3004 # head, refresh the tag cache, then immediately add a new head.
3005 # But I think doing it this way is necessary for the "instant
3005 # But I think doing it this way is necessary for the "instant
3006 # tag cache retrieval" case to work.
3006 # tag cache retrieval" case to work.
3007 self.invalidate()
3007 self.invalidate()
3008
3008
3009 def status(
3009 def status(
3010 self,
3010 self,
3011 node1=b'.',
3011 node1=b'.',
3012 node2=None,
3012 node2=None,
3013 match=None,
3013 match=None,
3014 ignored=False,
3014 ignored=False,
3015 clean=False,
3015 clean=False,
3016 unknown=False,
3016 unknown=False,
3017 listsubrepos=False,
3017 listsubrepos=False,
3018 ):
3018 ):
3019 '''a convenience method that calls node1.status(node2)'''
3019 '''a convenience method that calls node1.status(node2)'''
3020 return self[node1].status(
3020 return self[node1].status(
3021 node2, match, ignored, clean, unknown, listsubrepos
3021 node2, match, ignored, clean, unknown, listsubrepos
3022 )
3022 )
3023
3023
3024 def addpostdsstatus(self, ps):
3024 def addpostdsstatus(self, ps):
3025 """Add a callback to run within the wlock, at the point at which status
3025 """Add a callback to run within the wlock, at the point at which status
3026 fixups happen.
3026 fixups happen.
3027
3027
3028 On status completion, callback(wctx, status) will be called with the
3028 On status completion, callback(wctx, status) will be called with the
3029 wlock held, unless the dirstate has changed from underneath or the wlock
3029 wlock held, unless the dirstate has changed from underneath or the wlock
3030 couldn't be grabbed.
3030 couldn't be grabbed.
3031
3031
3032 Callbacks should not capture and use a cached copy of the dirstate --
3032 Callbacks should not capture and use a cached copy of the dirstate --
3033 it might change in the meanwhile. Instead, they should access the
3033 it might change in the meanwhile. Instead, they should access the
3034 dirstate via wctx.repo().dirstate.
3034 dirstate via wctx.repo().dirstate.
3035
3035
3036 This list is emptied out after each status run -- extensions should
3036 This list is emptied out after each status run -- extensions should
3037 make sure it adds to this list each time dirstate.status is called.
3037 make sure it adds to this list each time dirstate.status is called.
3038 Extensions should also make sure they don't call this for statuses
3038 Extensions should also make sure they don't call this for statuses
3039 that don't involve the dirstate.
3039 that don't involve the dirstate.
3040 """
3040 """
3041
3041
3042 # The list is located here for uniqueness reasons -- it is actually
3042 # The list is located here for uniqueness reasons -- it is actually
3043 # managed by the workingctx, but that isn't unique per-repo.
3043 # managed by the workingctx, but that isn't unique per-repo.
3044 self._postdsstatus.append(ps)
3044 self._postdsstatus.append(ps)
3045
3045
3046 def postdsstatus(self):
3046 def postdsstatus(self):
3047 """Used by workingctx to get the list of post-dirstate-status hooks."""
3047 """Used by workingctx to get the list of post-dirstate-status hooks."""
3048 return self._postdsstatus
3048 return self._postdsstatus
3049
3049
3050 def clearpostdsstatus(self):
3050 def clearpostdsstatus(self):
3051 """Used by workingctx to clear post-dirstate-status hooks."""
3051 """Used by workingctx to clear post-dirstate-status hooks."""
3052 del self._postdsstatus[:]
3052 del self._postdsstatus[:]
3053
3053
3054 def heads(self, start=None):
3054 def heads(self, start=None):
3055 if start is None:
3055 if start is None:
3056 cl = self.changelog
3056 cl = self.changelog
3057 headrevs = reversed(cl.headrevs())
3057 headrevs = reversed(cl.headrevs())
3058 return [cl.node(rev) for rev in headrevs]
3058 return [cl.node(rev) for rev in headrevs]
3059
3059
3060 heads = self.changelog.heads(start)
3060 heads = self.changelog.heads(start)
3061 # sort the output in rev descending order
3061 # sort the output in rev descending order
3062 return sorted(heads, key=self.changelog.rev, reverse=True)
3062 return sorted(heads, key=self.changelog.rev, reverse=True)
3063
3063
3064 def branchheads(self, branch=None, start=None, closed=False):
3064 def branchheads(self, branch=None, start=None, closed=False):
3065 '''return a (possibly filtered) list of heads for the given branch
3065 '''return a (possibly filtered) list of heads for the given branch
3066
3066
3067 Heads are returned in topological order, from newest to oldest.
3067 Heads are returned in topological order, from newest to oldest.
3068 If branch is None, use the dirstate branch.
3068 If branch is None, use the dirstate branch.
3069 If start is not None, return only heads reachable from start.
3069 If start is not None, return only heads reachable from start.
3070 If closed is True, return heads that are marked as closed as well.
3070 If closed is True, return heads that are marked as closed as well.
3071 '''
3071 '''
3072 if branch is None:
3072 if branch is None:
3073 branch = self[None].branch()
3073 branch = self[None].branch()
3074 branches = self.branchmap()
3074 branches = self.branchmap()
3075 if not branches.hasbranch(branch):
3075 if not branches.hasbranch(branch):
3076 return []
3076 return []
3077 # the cache returns heads ordered lowest to highest
3077 # the cache returns heads ordered lowest to highest
3078 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3078 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3079 if start is not None:
3079 if start is not None:
3080 # filter out the heads that cannot be reached from startrev
3080 # filter out the heads that cannot be reached from startrev
3081 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3081 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3082 bheads = [h for h in bheads if h in fbheads]
3082 bheads = [h for h in bheads if h in fbheads]
3083 return bheads
3083 return bheads
3084
3084
3085 def branches(self, nodes):
3085 def branches(self, nodes):
3086 if not nodes:
3086 if not nodes:
3087 nodes = [self.changelog.tip()]
3087 nodes = [self.changelog.tip()]
3088 b = []
3088 b = []
3089 for n in nodes:
3089 for n in nodes:
3090 t = n
3090 t = n
3091 while True:
3091 while True:
3092 p = self.changelog.parents(n)
3092 p = self.changelog.parents(n)
3093 if p[1] != nullid or p[0] == nullid:
3093 if p[1] != nullid or p[0] == nullid:
3094 b.append((t, n, p[0], p[1]))
3094 b.append((t, n, p[0], p[1]))
3095 break
3095 break
3096 n = p[0]
3096 n = p[0]
3097 return b
3097 return b
3098
3098
3099 def between(self, pairs):
3099 def between(self, pairs):
3100 r = []
3100 r = []
3101
3101
3102 for top, bottom in pairs:
3102 for top, bottom in pairs:
3103 n, l, i = top, [], 0
3103 n, l, i = top, [], 0
3104 f = 1
3104 f = 1
3105
3105
3106 while n != bottom and n != nullid:
3106 while n != bottom and n != nullid:
3107 p = self.changelog.parents(n)[0]
3107 p = self.changelog.parents(n)[0]
3108 if i == f:
3108 if i == f:
3109 l.append(n)
3109 l.append(n)
3110 f = f * 2
3110 f = f * 2
3111 n = p
3111 n = p
3112 i += 1
3112 i += 1
3113
3113
3114 r.append(l)
3114 r.append(l)
3115
3115
3116 return r
3116 return r
3117
3117
3118 def checkpush(self, pushop):
3118 def checkpush(self, pushop):
3119 """Extensions can override this function if additional checks have
3119 """Extensions can override this function if additional checks have
3120 to be performed before pushing, or call it if they override push
3120 to be performed before pushing, or call it if they override push
3121 command.
3121 command.
3122 """
3122 """
3123
3123
3124 @unfilteredpropertycache
3124 @unfilteredpropertycache
3125 def prepushoutgoinghooks(self):
3125 def prepushoutgoinghooks(self):
3126 """Return util.hooks consists of a pushop with repo, remote, outgoing
3126 """Return util.hooks consists of a pushop with repo, remote, outgoing
3127 methods, which are called before pushing changesets.
3127 methods, which are called before pushing changesets.
3128 """
3128 """
3129 return util.hooks()
3129 return util.hooks()
3130
3130
3131 def pushkey(self, namespace, key, old, new):
3131 def pushkey(self, namespace, key, old, new):
3132 try:
3132 try:
3133 tr = self.currenttransaction()
3133 tr = self.currenttransaction()
3134 hookargs = {}
3134 hookargs = {}
3135 if tr is not None:
3135 if tr is not None:
3136 hookargs.update(tr.hookargs)
3136 hookargs.update(tr.hookargs)
3137 hookargs = pycompat.strkwargs(hookargs)
3137 hookargs = pycompat.strkwargs(hookargs)
3138 hookargs['namespace'] = namespace
3138 hookargs['namespace'] = namespace
3139 hookargs['key'] = key
3139 hookargs['key'] = key
3140 hookargs['old'] = old
3140 hookargs['old'] = old
3141 hookargs['new'] = new
3141 hookargs['new'] = new
3142 self.hook(b'prepushkey', throw=True, **hookargs)
3142 self.hook(b'prepushkey', throw=True, **hookargs)
3143 except error.HookAbort as exc:
3143 except error.HookAbort as exc:
3144 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3144 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3145 if exc.hint:
3145 if exc.hint:
3146 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3146 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3147 return False
3147 return False
3148 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3148 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3149 ret = pushkey.push(self, namespace, key, old, new)
3149 ret = pushkey.push(self, namespace, key, old, new)
3150
3150
3151 def runhook(unused_success):
3151 def runhook(unused_success):
3152 self.hook(
3152 self.hook(
3153 b'pushkey',
3153 b'pushkey',
3154 namespace=namespace,
3154 namespace=namespace,
3155 key=key,
3155 key=key,
3156 old=old,
3156 old=old,
3157 new=new,
3157 new=new,
3158 ret=ret,
3158 ret=ret,
3159 )
3159 )
3160
3160
3161 self._afterlock(runhook)
3161 self._afterlock(runhook)
3162 return ret
3162 return ret
3163
3163
3164 def listkeys(self, namespace):
3164 def listkeys(self, namespace):
3165 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3165 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3166 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3166 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3167 values = pushkey.list(self, namespace)
3167 values = pushkey.list(self, namespace)
3168 self.hook(b'listkeys', namespace=namespace, values=values)
3168 self.hook(b'listkeys', namespace=namespace, values=values)
3169 return values
3169 return values
3170
3170
3171 def debugwireargs(self, one, two, three=None, four=None, five=None):
3171 def debugwireargs(self, one, two, three=None, four=None, five=None):
3172 '''used to test argument passing over the wire'''
3172 '''used to test argument passing over the wire'''
3173 return b"%s %s %s %s %s" % (
3173 return b"%s %s %s %s %s" % (
3174 one,
3174 one,
3175 two,
3175 two,
3176 pycompat.bytestr(three),
3176 pycompat.bytestr(three),
3177 pycompat.bytestr(four),
3177 pycompat.bytestr(four),
3178 pycompat.bytestr(five),
3178 pycompat.bytestr(five),
3179 )
3179 )
3180
3180
3181 def savecommitmessage(self, text):
3181 def savecommitmessage(self, text):
3182 fp = self.vfs(b'last-message.txt', b'wb')
3182 fp = self.vfs(b'last-message.txt', b'wb')
3183 try:
3183 try:
3184 fp.write(text)
3184 fp.write(text)
3185 finally:
3185 finally:
3186 fp.close()
3186 fp.close()
3187 return self.pathto(fp.name[len(self.root) + 1 :])
3187 return self.pathto(fp.name[len(self.root) + 1 :])
3188
3188
3189
3189
3190 # used to avoid circular references so destructors work
3190 # used to avoid circular references so destructors work
3191 def aftertrans(files):
3191 def aftertrans(files):
3192 renamefiles = [tuple(t) for t in files]
3192 renamefiles = [tuple(t) for t in files]
3193
3193
3194 def a():
3194 def a():
3195 for vfs, src, dest in renamefiles:
3195 for vfs, src, dest in renamefiles:
3196 # if src and dest refer to a same file, vfs.rename is a no-op,
3196 # if src and dest refer to a same file, vfs.rename is a no-op,
3197 # leaving both src and dest on disk. delete dest to make sure
3197 # leaving both src and dest on disk. delete dest to make sure
3198 # the rename couldn't be such a no-op.
3198 # the rename couldn't be such a no-op.
3199 vfs.tryunlink(dest)
3199 vfs.tryunlink(dest)
3200 try:
3200 try:
3201 vfs.rename(src, dest)
3201 vfs.rename(src, dest)
3202 except OSError: # journal file does not yet exist
3202 except OSError: # journal file does not yet exist
3203 pass
3203 pass
3204
3204
3205 return a
3205 return a
3206
3206
3207
3207
3208 def undoname(fn):
3208 def undoname(fn):
3209 base, name = os.path.split(fn)
3209 base, name = os.path.split(fn)
3210 assert name.startswith(b'journal')
3210 assert name.startswith(b'journal')
3211 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3211 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3212
3212
3213
3213
3214 def instance(ui, path, create, intents=None, createopts=None):
3214 def instance(ui, path, create, intents=None, createopts=None):
3215 localpath = util.urllocalpath(path)
3215 localpath = util.urllocalpath(path)
3216 if create:
3216 if create:
3217 createrepository(ui, localpath, createopts=createopts)
3217 createrepository(ui, localpath, createopts=createopts)
3218
3218
3219 return makelocalrepository(ui, localpath, intents=intents)
3219 return makelocalrepository(ui, localpath, intents=intents)
3220
3220
3221
3221
3222 def islocal(path):
3222 def islocal(path):
3223 return True
3223 return True
3224
3224
3225
3225
3226 def defaultcreateopts(ui, createopts=None):
3226 def defaultcreateopts(ui, createopts=None):
3227 """Populate the default creation options for a repository.
3227 """Populate the default creation options for a repository.
3228
3228
3229 A dictionary of explicitly requested creation options can be passed
3229 A dictionary of explicitly requested creation options can be passed
3230 in. Missing keys will be populated.
3230 in. Missing keys will be populated.
3231 """
3231 """
3232 createopts = dict(createopts or {})
3232 createopts = dict(createopts or {})
3233
3233
3234 if b'backend' not in createopts:
3234 if b'backend' not in createopts:
3235 # experimental config: storage.new-repo-backend
3235 # experimental config: storage.new-repo-backend
3236 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3236 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3237
3237
3238 return createopts
3238 return createopts
3239
3239
3240
3240
3241 def newreporequirements(ui, createopts):
3241 def newreporequirements(ui, createopts):
3242 """Determine the set of requirements for a new local repository.
3242 """Determine the set of requirements for a new local repository.
3243
3243
3244 Extensions can wrap this function to specify custom requirements for
3244 Extensions can wrap this function to specify custom requirements for
3245 new repositories.
3245 new repositories.
3246 """
3246 """
3247 # If the repo is being created from a shared repository, we copy
3247 # If the repo is being created from a shared repository, we copy
3248 # its requirements.
3248 # its requirements.
3249 if b'sharedrepo' in createopts:
3249 if b'sharedrepo' in createopts:
3250 requirements = set(createopts[b'sharedrepo'].requirements)
3250 requirements = set(createopts[b'sharedrepo'].requirements)
3251 if createopts.get(b'sharedrelative'):
3251 if createopts.get(b'sharedrelative'):
3252 requirements.add(b'relshared')
3252 requirements.add(b'relshared')
3253 else:
3253 else:
3254 requirements.add(b'shared')
3254 requirements.add(b'shared')
3255
3255
3256 return requirements
3256 return requirements
3257
3257
3258 if b'backend' not in createopts:
3258 if b'backend' not in createopts:
3259 raise error.ProgrammingError(
3259 raise error.ProgrammingError(
3260 b'backend key not present in createopts; '
3260 b'backend key not present in createopts; '
3261 b'was defaultcreateopts() called?'
3261 b'was defaultcreateopts() called?'
3262 )
3262 )
3263
3263
3264 if createopts[b'backend'] != b'revlogv1':
3264 if createopts[b'backend'] != b'revlogv1':
3265 raise error.Abort(
3265 raise error.Abort(
3266 _(
3266 _(
3267 b'unable to determine repository requirements for '
3267 b'unable to determine repository requirements for '
3268 b'storage backend: %s'
3268 b'storage backend: %s'
3269 )
3269 )
3270 % createopts[b'backend']
3270 % createopts[b'backend']
3271 )
3271 )
3272
3272
3273 requirements = {b'revlogv1'}
3273 requirements = {b'revlogv1'}
3274 if ui.configbool(b'format', b'usestore'):
3274 if ui.configbool(b'format', b'usestore'):
3275 requirements.add(b'store')
3275 requirements.add(b'store')
3276 if ui.configbool(b'format', b'usefncache'):
3276 if ui.configbool(b'format', b'usefncache'):
3277 requirements.add(b'fncache')
3277 requirements.add(b'fncache')
3278 if ui.configbool(b'format', b'dotencode'):
3278 if ui.configbool(b'format', b'dotencode'):
3279 requirements.add(b'dotencode')
3279 requirements.add(b'dotencode')
3280
3280
3281 compengines = ui.configlist(b'format', b'revlog-compression')
3281 compengines = ui.configlist(b'format', b'revlog-compression')
3282 for compengine in compengines:
3282 for compengine in compengines:
3283 if compengine in util.compengines:
3283 if compengine in util.compengines:
3284 break
3284 break
3285 else:
3285 else:
3286 raise error.Abort(
3286 raise error.Abort(
3287 _(
3287 _(
3288 b'compression engines %s defined by '
3288 b'compression engines %s defined by '
3289 b'format.revlog-compression not available'
3289 b'format.revlog-compression not available'
3290 )
3290 )
3291 % b', '.join(b'"%s"' % e for e in compengines),
3291 % b', '.join(b'"%s"' % e for e in compengines),
3292 hint=_(
3292 hint=_(
3293 b'run "hg debuginstall" to list available '
3293 b'run "hg debuginstall" to list available '
3294 b'compression engines'
3294 b'compression engines'
3295 ),
3295 ),
3296 )
3296 )
3297
3297
3298 # zlib is the historical default and doesn't need an explicit requirement.
3298 # zlib is the historical default and doesn't need an explicit requirement.
3299 if compengine == b'zstd':
3299 if compengine == b'zstd':
3300 requirements.add(b'revlog-compression-zstd')
3300 requirements.add(b'revlog-compression-zstd')
3301 elif compengine != b'zlib':
3301 elif compengine != b'zlib':
3302 requirements.add(b'exp-compression-%s' % compengine)
3302 requirements.add(b'exp-compression-%s' % compengine)
3303
3303
3304 if scmutil.gdinitconfig(ui):
3304 if scmutil.gdinitconfig(ui):
3305 requirements.add(b'generaldelta')
3305 requirements.add(b'generaldelta')
3306 if ui.configbool(b'format', b'sparse-revlog'):
3306 if ui.configbool(b'format', b'sparse-revlog'):
3307 requirements.add(SPARSEREVLOG_REQUIREMENT)
3307 requirements.add(SPARSEREVLOG_REQUIREMENT)
3308
3308
3309 # experimental config: format.exp-use-side-data
3309 # experimental config: format.exp-use-side-data
3310 if ui.configbool(b'format', b'exp-use-side-data'):
3310 if ui.configbool(b'format', b'exp-use-side-data'):
3311 requirements.add(SIDEDATA_REQUIREMENT)
3311 requirements.add(SIDEDATA_REQUIREMENT)
3312 # experimental config: format.exp-use-copies-side-data-changeset
3312 # experimental config: format.exp-use-copies-side-data-changeset
3313 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3313 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3314 requirements.add(SIDEDATA_REQUIREMENT)
3314 requirements.add(SIDEDATA_REQUIREMENT)
3315 requirements.add(COPIESSDC_REQUIREMENT)
3315 requirements.add(COPIESSDC_REQUIREMENT)
3316 if ui.configbool(b'experimental', b'treemanifest'):
3316 if ui.configbool(b'experimental', b'treemanifest'):
3317 requirements.add(b'treemanifest')
3317 requirements.add(b'treemanifest')
3318
3318
3319 revlogv2 = ui.config(b'experimental', b'revlogv2')
3319 revlogv2 = ui.config(b'experimental', b'revlogv2')
3320 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3320 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3321 requirements.remove(b'revlogv1')
3321 requirements.remove(b'revlogv1')
3322 # generaldelta is implied by revlogv2.
3322 # generaldelta is implied by revlogv2.
3323 requirements.discard(b'generaldelta')
3323 requirements.discard(b'generaldelta')
3324 requirements.add(REVLOGV2_REQUIREMENT)
3324 requirements.add(REVLOGV2_REQUIREMENT)
3325 # experimental config: format.internal-phase
3325 # experimental config: format.internal-phase
3326 if ui.configbool(b'format', b'internal-phase'):
3326 if ui.configbool(b'format', b'internal-phase'):
3327 requirements.add(b'internal-phase')
3327 requirements.add(b'internal-phase')
3328
3328
3329 if createopts.get(b'narrowfiles'):
3329 if createopts.get(b'narrowfiles'):
3330 requirements.add(repository.NARROW_REQUIREMENT)
3330 requirements.add(repository.NARROW_REQUIREMENT)
3331
3331
3332 if createopts.get(b'lfs'):
3332 if createopts.get(b'lfs'):
3333 requirements.add(b'lfs')
3333 requirements.add(b'lfs')
3334
3334
3335 if ui.configbool(b'format', b'bookmarks-in-store'):
3335 if ui.configbool(b'format', b'bookmarks-in-store'):
3336 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3336 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3337
3337
3338 if ui.configbool(b'format', b'use-persistent-nodemap'):
3338 if ui.configbool(b'format', b'use-persistent-nodemap'):
3339 requirements.add(NODEMAP_REQUIREMENT)
3339 requirements.add(NODEMAP_REQUIREMENT)
3340
3340
3341 return requirements
3341 return requirements
3342
3342
3343
3343
3344 def checkrequirementscompat(ui, requirements):
3344 def checkrequirementscompat(ui, requirements):
3345 """ Checks compatibility of repository requirements enabled and disabled.
3345 """ Checks compatibility of repository requirements enabled and disabled.
3346
3346
3347 Returns a set of requirements which needs to be dropped because dependend
3347 Returns a set of requirements which needs to be dropped because dependend
3348 requirements are not enabled. Also warns users about it """
3348 requirements are not enabled. Also warns users about it """
3349
3349
3350 dropped = set()
3350 dropped = set()
3351
3351
3352 if b'store' not in requirements:
3352 if b'store' not in requirements:
3353 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3353 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3354 ui.warn(
3354 ui.warn(
3355 _(
3355 _(
3356 b'ignoring enabled \'format.bookmarks-in-store\' config '
3356 b'ignoring enabled \'format.bookmarks-in-store\' config '
3357 b'beacuse it is incompatible with disabled '
3357 b'beacuse it is incompatible with disabled '
3358 b'\'format.usestore\' config\n'
3358 b'\'format.usestore\' config\n'
3359 )
3359 )
3360 )
3360 )
3361 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3361 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3362
3362
3363 if b'shared' in requirements or b'relshared' in requirements:
3363 if b'shared' in requirements or b'relshared' in requirements:
3364 raise error.Abort(
3364 raise error.Abort(
3365 _(
3365 _(
3366 b"cannot create shared repository as source was created"
3366 b"cannot create shared repository as source was created"
3367 b" with 'format.usestore' config disabled"
3367 b" with 'format.usestore' config disabled"
3368 )
3368 )
3369 )
3369 )
3370
3370
3371 return dropped
3371 return dropped
3372
3372
3373
3373
3374 def filterknowncreateopts(ui, createopts):
3374 def filterknowncreateopts(ui, createopts):
3375 """Filters a dict of repo creation options against options that are known.
3375 """Filters a dict of repo creation options against options that are known.
3376
3376
3377 Receives a dict of repo creation options and returns a dict of those
3377 Receives a dict of repo creation options and returns a dict of those
3378 options that we don't know how to handle.
3378 options that we don't know how to handle.
3379
3379
3380 This function is called as part of repository creation. If the
3380 This function is called as part of repository creation. If the
3381 returned dict contains any items, repository creation will not
3381 returned dict contains any items, repository creation will not
3382 be allowed, as it means there was a request to create a repository
3382 be allowed, as it means there was a request to create a repository
3383 with options not recognized by loaded code.
3383 with options not recognized by loaded code.
3384
3384
3385 Extensions can wrap this function to filter out creation options
3385 Extensions can wrap this function to filter out creation options
3386 they know how to handle.
3386 they know how to handle.
3387 """
3387 """
3388 known = {
3388 known = {
3389 b'backend',
3389 b'backend',
3390 b'lfs',
3390 b'lfs',
3391 b'narrowfiles',
3391 b'narrowfiles',
3392 b'sharedrepo',
3392 b'sharedrepo',
3393 b'sharedrelative',
3393 b'sharedrelative',
3394 b'shareditems',
3394 b'shareditems',
3395 b'shallowfilestore',
3395 b'shallowfilestore',
3396 }
3396 }
3397
3397
3398 return {k: v for k, v in createopts.items() if k not in known}
3398 return {k: v for k, v in createopts.items() if k not in known}
3399
3399
3400
3400
3401 def createrepository(ui, path, createopts=None):
3401 def createrepository(ui, path, createopts=None):
3402 """Create a new repository in a vfs.
3402 """Create a new repository in a vfs.
3403
3403
3404 ``path`` path to the new repo's working directory.
3404 ``path`` path to the new repo's working directory.
3405 ``createopts`` options for the new repository.
3405 ``createopts`` options for the new repository.
3406
3406
3407 The following keys for ``createopts`` are recognized:
3407 The following keys for ``createopts`` are recognized:
3408
3408
3409 backend
3409 backend
3410 The storage backend to use.
3410 The storage backend to use.
3411 lfs
3411 lfs
3412 Repository will be created with ``lfs`` requirement. The lfs extension
3412 Repository will be created with ``lfs`` requirement. The lfs extension
3413 will automatically be loaded when the repository is accessed.
3413 will automatically be loaded when the repository is accessed.
3414 narrowfiles
3414 narrowfiles
3415 Set up repository to support narrow file storage.
3415 Set up repository to support narrow file storage.
3416 sharedrepo
3416 sharedrepo
3417 Repository object from which storage should be shared.
3417 Repository object from which storage should be shared.
3418 sharedrelative
3418 sharedrelative
3419 Boolean indicating if the path to the shared repo should be
3419 Boolean indicating if the path to the shared repo should be
3420 stored as relative. By default, the pointer to the "parent" repo
3420 stored as relative. By default, the pointer to the "parent" repo
3421 is stored as an absolute path.
3421 is stored as an absolute path.
3422 shareditems
3422 shareditems
3423 Set of items to share to the new repository (in addition to storage).
3423 Set of items to share to the new repository (in addition to storage).
3424 shallowfilestore
3424 shallowfilestore
3425 Indicates that storage for files should be shallow (not all ancestor
3425 Indicates that storage for files should be shallow (not all ancestor
3426 revisions are known).
3426 revisions are known).
3427 """
3427 """
3428 createopts = defaultcreateopts(ui, createopts=createopts)
3428 createopts = defaultcreateopts(ui, createopts=createopts)
3429
3429
3430 unknownopts = filterknowncreateopts(ui, createopts)
3430 unknownopts = filterknowncreateopts(ui, createopts)
3431
3431
3432 if not isinstance(unknownopts, dict):
3432 if not isinstance(unknownopts, dict):
3433 raise error.ProgrammingError(
3433 raise error.ProgrammingError(
3434 b'filterknowncreateopts() did not return a dict'
3434 b'filterknowncreateopts() did not return a dict'
3435 )
3435 )
3436
3436
3437 if unknownopts:
3437 if unknownopts:
3438 raise error.Abort(
3438 raise error.Abort(
3439 _(
3439 _(
3440 b'unable to create repository because of unknown '
3440 b'unable to create repository because of unknown '
3441 b'creation option: %s'
3441 b'creation option: %s'
3442 )
3442 )
3443 % b', '.join(sorted(unknownopts)),
3443 % b', '.join(sorted(unknownopts)),
3444 hint=_(b'is a required extension not loaded?'),
3444 hint=_(b'is a required extension not loaded?'),
3445 )
3445 )
3446
3446
3447 requirements = newreporequirements(ui, createopts=createopts)
3447 requirements = newreporequirements(ui, createopts=createopts)
3448 requirements -= checkrequirementscompat(ui, requirements)
3448 requirements -= checkrequirementscompat(ui, requirements)
3449
3449
3450 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3450 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3451
3451
3452 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3452 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3453 if hgvfs.exists():
3453 if hgvfs.exists():
3454 raise error.RepoError(_(b'repository %s already exists') % path)
3454 raise error.RepoError(_(b'repository %s already exists') % path)
3455
3455
3456 if b'sharedrepo' in createopts:
3456 if b'sharedrepo' in createopts:
3457 sharedpath = createopts[b'sharedrepo'].sharedpath
3457 sharedpath = createopts[b'sharedrepo'].sharedpath
3458
3458
3459 if createopts.get(b'sharedrelative'):
3459 if createopts.get(b'sharedrelative'):
3460 try:
3460 try:
3461 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3461 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3462 except (IOError, ValueError) as e:
3462 except (IOError, ValueError) as e:
3463 # ValueError is raised on Windows if the drive letters differ
3463 # ValueError is raised on Windows if the drive letters differ
3464 # on each path.
3464 # on each path.
3465 raise error.Abort(
3465 raise error.Abort(
3466 _(b'cannot calculate relative path'),
3466 _(b'cannot calculate relative path'),
3467 hint=stringutil.forcebytestr(e),
3467 hint=stringutil.forcebytestr(e),
3468 )
3468 )
3469
3469
3470 if not wdirvfs.exists():
3470 if not wdirvfs.exists():
3471 wdirvfs.makedirs()
3471 wdirvfs.makedirs()
3472
3472
3473 hgvfs.makedir(notindexed=True)
3473 hgvfs.makedir(notindexed=True)
3474 if b'sharedrepo' not in createopts:
3474 if b'sharedrepo' not in createopts:
3475 hgvfs.mkdir(b'cache')
3475 hgvfs.mkdir(b'cache')
3476 hgvfs.mkdir(b'wcache')
3476 hgvfs.mkdir(b'wcache')
3477
3477
3478 if b'store' in requirements and b'sharedrepo' not in createopts:
3478 if b'store' in requirements and b'sharedrepo' not in createopts:
3479 hgvfs.mkdir(b'store')
3479 hgvfs.mkdir(b'store')
3480
3480
3481 # We create an invalid changelog outside the store so very old
3481 # We create an invalid changelog outside the store so very old
3482 # Mercurial versions (which didn't know about the requirements
3482 # Mercurial versions (which didn't know about the requirements
3483 # file) encounter an error on reading the changelog. This
3483 # file) encounter an error on reading the changelog. This
3484 # effectively locks out old clients and prevents them from
3484 # effectively locks out old clients and prevents them from
3485 # mucking with a repo in an unknown format.
3485 # mucking with a repo in an unknown format.
3486 #
3486 #
3487 # The revlog header has version 2, which won't be recognized by
3487 # The revlog header has version 2, which won't be recognized by
3488 # such old clients.
3488 # such old clients.
3489 hgvfs.append(
3489 hgvfs.append(
3490 b'00changelog.i',
3490 b'00changelog.i',
3491 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3491 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3492 b'layout',
3492 b'layout',
3493 )
3493 )
3494
3494
3495 scmutil.writerequires(hgvfs, requirements)
3495 scmutil.writerequires(hgvfs, requirements)
3496
3496
3497 # Write out file telling readers where to find the shared store.
3497 # Write out file telling readers where to find the shared store.
3498 if b'sharedrepo' in createopts:
3498 if b'sharedrepo' in createopts:
3499 hgvfs.write(b'sharedpath', sharedpath)
3499 hgvfs.write(b'sharedpath', sharedpath)
3500
3500
3501 if createopts.get(b'shareditems'):
3501 if createopts.get(b'shareditems'):
3502 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3502 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3503 hgvfs.write(b'shared', shared)
3503 hgvfs.write(b'shared', shared)
3504
3504
3505
3505
3506 def poisonrepository(repo):
3506 def poisonrepository(repo):
3507 """Poison a repository instance so it can no longer be used."""
3507 """Poison a repository instance so it can no longer be used."""
3508 # Perform any cleanup on the instance.
3508 # Perform any cleanup on the instance.
3509 repo.close()
3509 repo.close()
3510
3510
3511 # Our strategy is to replace the type of the object with one that
3511 # Our strategy is to replace the type of the object with one that
3512 # has all attribute lookups result in error.
3512 # has all attribute lookups result in error.
3513 #
3513 #
3514 # But we have to allow the close() method because some constructors
3514 # But we have to allow the close() method because some constructors
3515 # of repos call close() on repo references.
3515 # of repos call close() on repo references.
3516 class poisonedrepository(object):
3516 class poisonedrepository(object):
3517 def __getattribute__(self, item):
3517 def __getattribute__(self, item):
3518 if item == 'close':
3518 if item == 'close':
3519 return object.__getattribute__(self, item)
3519 return object.__getattribute__(self, item)
3520
3520
3521 raise error.ProgrammingError(
3521 raise error.ProgrammingError(
3522 b'repo instances should not be used after unshare'
3522 b'repo instances should not be used after unshare'
3523 )
3523 )
3524
3524
3525 def close(self):
3525 def close(self):
3526 pass
3526 pass
3527
3527
3528 # We may have a repoview, which intercepts __setattr__. So be sure
3528 # We may have a repoview, which intercepts __setattr__. So be sure
3529 # we operate at the lowest level possible.
3529 # we operate at the lowest level possible.
3530 object.__setattr__(repo, '__class__', poisonedrepository)
3530 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,831 +1,833 b''
1 # sparse.py - functionality for sparse checkouts
1 # sparse.py - functionality for sparse checkouts
2 #
2 #
3 # Copyright 2014 Facebook, Inc.
3 # Copyright 2014 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11
11
12 from .i18n import _
12 from .i18n import _
13 from .node import (
13 from .node import (
14 hex,
14 hex,
15 nullid,
15 nullid,
16 )
16 )
17 from . import (
17 from . import (
18 error,
18 error,
19 match as matchmod,
19 match as matchmod,
20 merge as mergemod,
20 merge as mergemod,
21 mergestate as mergestatemod,
21 mergestate as mergestatemod,
22 pathutil,
22 pathutil,
23 pycompat,
23 pycompat,
24 scmutil,
24 scmutil,
25 util,
25 util,
26 )
26 )
27 from .interfaces import repository
27 from .utils import hashutil
28 from .utils import hashutil
28
29
30
29 # Whether sparse features are enabled. This variable is intended to be
31 # Whether sparse features are enabled. This variable is intended to be
30 # temporary to facilitate porting sparse to core. It should eventually be
32 # temporary to facilitate porting sparse to core. It should eventually be
31 # a per-repo option, possibly a repo requirement.
33 # a per-repo option, possibly a repo requirement.
32 enabled = False
34 enabled = False
33
35
34
36
35 def parseconfig(ui, raw, action):
37 def parseconfig(ui, raw, action):
36 """Parse sparse config file content.
38 """Parse sparse config file content.
37
39
38 action is the command which is trigerring this read, can be narrow, sparse
40 action is the command which is trigerring this read, can be narrow, sparse
39
41
40 Returns a tuple of includes, excludes, and profiles.
42 Returns a tuple of includes, excludes, and profiles.
41 """
43 """
42 includes = set()
44 includes = set()
43 excludes = set()
45 excludes = set()
44 profiles = set()
46 profiles = set()
45 current = None
47 current = None
46 havesection = False
48 havesection = False
47
49
48 for line in raw.split(b'\n'):
50 for line in raw.split(b'\n'):
49 line = line.strip()
51 line = line.strip()
50 if not line or line.startswith(b'#'):
52 if not line or line.startswith(b'#'):
51 # empty or comment line, skip
53 # empty or comment line, skip
52 continue
54 continue
53 elif line.startswith(b'%include '):
55 elif line.startswith(b'%include '):
54 line = line[9:].strip()
56 line = line[9:].strip()
55 if line:
57 if line:
56 profiles.add(line)
58 profiles.add(line)
57 elif line == b'[include]':
59 elif line == b'[include]':
58 if havesection and current != includes:
60 if havesection and current != includes:
59 # TODO pass filename into this API so we can report it.
61 # TODO pass filename into this API so we can report it.
60 raise error.Abort(
62 raise error.Abort(
61 _(
63 _(
62 b'%(action)s config cannot have includes '
64 b'%(action)s config cannot have includes '
63 b'after excludes'
65 b'after excludes'
64 )
66 )
65 % {b'action': action}
67 % {b'action': action}
66 )
68 )
67 havesection = True
69 havesection = True
68 current = includes
70 current = includes
69 continue
71 continue
70 elif line == b'[exclude]':
72 elif line == b'[exclude]':
71 havesection = True
73 havesection = True
72 current = excludes
74 current = excludes
73 elif line:
75 elif line:
74 if current is None:
76 if current is None:
75 raise error.Abort(
77 raise error.Abort(
76 _(
78 _(
77 b'%(action)s config entry outside of '
79 b'%(action)s config entry outside of '
78 b'section: %(line)s'
80 b'section: %(line)s'
79 )
81 )
80 % {b'action': action, b'line': line},
82 % {b'action': action, b'line': line},
81 hint=_(
83 hint=_(
82 b'add an [include] or [exclude] line '
84 b'add an [include] or [exclude] line '
83 b'to declare the entry type'
85 b'to declare the entry type'
84 ),
86 ),
85 )
87 )
86
88
87 if line.strip().startswith(b'/'):
89 if line.strip().startswith(b'/'):
88 ui.warn(
90 ui.warn(
89 _(
91 _(
90 b'warning: %(action)s profile cannot use'
92 b'warning: %(action)s profile cannot use'
91 b' paths starting with /, ignoring %(line)s\n'
93 b' paths starting with /, ignoring %(line)s\n'
92 )
94 )
93 % {b'action': action, b'line': line}
95 % {b'action': action, b'line': line}
94 )
96 )
95 continue
97 continue
96 current.add(line)
98 current.add(line)
97
99
98 return includes, excludes, profiles
100 return includes, excludes, profiles
99
101
100
102
101 # Exists as separate function to facilitate monkeypatching.
103 # Exists as separate function to facilitate monkeypatching.
102 def readprofile(repo, profile, changeid):
104 def readprofile(repo, profile, changeid):
103 """Resolve the raw content of a sparse profile file."""
105 """Resolve the raw content of a sparse profile file."""
104 # TODO add some kind of cache here because this incurs a manifest
106 # TODO add some kind of cache here because this incurs a manifest
105 # resolve and can be slow.
107 # resolve and can be slow.
106 return repo.filectx(profile, changeid=changeid).data()
108 return repo.filectx(profile, changeid=changeid).data()
107
109
108
110
109 def patternsforrev(repo, rev):
111 def patternsforrev(repo, rev):
110 """Obtain sparse checkout patterns for the given rev.
112 """Obtain sparse checkout patterns for the given rev.
111
113
112 Returns a tuple of iterables representing includes, excludes, and
114 Returns a tuple of iterables representing includes, excludes, and
113 patterns.
115 patterns.
114 """
116 """
115 # Feature isn't enabled. No-op.
117 # Feature isn't enabled. No-op.
116 if not enabled:
118 if not enabled:
117 return set(), set(), set()
119 return set(), set(), set()
118
120
119 raw = repo.vfs.tryread(b'sparse')
121 raw = repo.vfs.tryread(b'sparse')
120 if not raw:
122 if not raw:
121 return set(), set(), set()
123 return set(), set(), set()
122
124
123 if rev is None:
125 if rev is None:
124 raise error.Abort(
126 raise error.Abort(
125 _(b'cannot parse sparse patterns from working directory')
127 _(b'cannot parse sparse patterns from working directory')
126 )
128 )
127
129
128 includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
130 includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
129 ctx = repo[rev]
131 ctx = repo[rev]
130
132
131 if profiles:
133 if profiles:
132 visited = set()
134 visited = set()
133 while profiles:
135 while profiles:
134 profile = profiles.pop()
136 profile = profiles.pop()
135 if profile in visited:
137 if profile in visited:
136 continue
138 continue
137
139
138 visited.add(profile)
140 visited.add(profile)
139
141
140 try:
142 try:
141 raw = readprofile(repo, profile, rev)
143 raw = readprofile(repo, profile, rev)
142 except error.ManifestLookupError:
144 except error.ManifestLookupError:
143 msg = (
145 msg = (
144 b"warning: sparse profile '%s' not found "
146 b"warning: sparse profile '%s' not found "
145 b"in rev %s - ignoring it\n" % (profile, ctx)
147 b"in rev %s - ignoring it\n" % (profile, ctx)
146 )
148 )
147 # experimental config: sparse.missingwarning
149 # experimental config: sparse.missingwarning
148 if repo.ui.configbool(b'sparse', b'missingwarning'):
150 if repo.ui.configbool(b'sparse', b'missingwarning'):
149 repo.ui.warn(msg)
151 repo.ui.warn(msg)
150 else:
152 else:
151 repo.ui.debug(msg)
153 repo.ui.debug(msg)
152 continue
154 continue
153
155
154 pincludes, pexcludes, subprofs = parseconfig(
156 pincludes, pexcludes, subprofs = parseconfig(
155 repo.ui, raw, b'sparse'
157 repo.ui, raw, b'sparse'
156 )
158 )
157 includes.update(pincludes)
159 includes.update(pincludes)
158 excludes.update(pexcludes)
160 excludes.update(pexcludes)
159 profiles.update(subprofs)
161 profiles.update(subprofs)
160
162
161 profiles = visited
163 profiles = visited
162
164
163 if includes:
165 if includes:
164 includes.add(b'.hg*')
166 includes.add(b'.hg*')
165
167
166 return includes, excludes, profiles
168 return includes, excludes, profiles
167
169
168
170
169 def activeconfig(repo):
171 def activeconfig(repo):
170 """Determine the active sparse config rules.
172 """Determine the active sparse config rules.
171
173
172 Rules are constructed by reading the current sparse config and bringing in
174 Rules are constructed by reading the current sparse config and bringing in
173 referenced profiles from parents of the working directory.
175 referenced profiles from parents of the working directory.
174 """
176 """
175 revs = [
177 revs = [
176 repo.changelog.rev(node)
178 repo.changelog.rev(node)
177 for node in repo.dirstate.parents()
179 for node in repo.dirstate.parents()
178 if node != nullid
180 if node != nullid
179 ]
181 ]
180
182
181 allincludes = set()
183 allincludes = set()
182 allexcludes = set()
184 allexcludes = set()
183 allprofiles = set()
185 allprofiles = set()
184
186
185 for rev in revs:
187 for rev in revs:
186 includes, excludes, profiles = patternsforrev(repo, rev)
188 includes, excludes, profiles = patternsforrev(repo, rev)
187 allincludes |= includes
189 allincludes |= includes
188 allexcludes |= excludes
190 allexcludes |= excludes
189 allprofiles |= profiles
191 allprofiles |= profiles
190
192
191 return allincludes, allexcludes, allprofiles
193 return allincludes, allexcludes, allprofiles
192
194
193
195
194 def configsignature(repo, includetemp=True):
196 def configsignature(repo, includetemp=True):
195 """Obtain the signature string for the current sparse configuration.
197 """Obtain the signature string for the current sparse configuration.
196
198
197 This is used to construct a cache key for matchers.
199 This is used to construct a cache key for matchers.
198 """
200 """
199 cache = repo._sparsesignaturecache
201 cache = repo._sparsesignaturecache
200
202
201 signature = cache.get(b'signature')
203 signature = cache.get(b'signature')
202
204
203 if includetemp:
205 if includetemp:
204 tempsignature = cache.get(b'tempsignature')
206 tempsignature = cache.get(b'tempsignature')
205 else:
207 else:
206 tempsignature = b'0'
208 tempsignature = b'0'
207
209
208 if signature is None or (includetemp and tempsignature is None):
210 if signature is None or (includetemp and tempsignature is None):
209 signature = hex(hashutil.sha1(repo.vfs.tryread(b'sparse')).digest())
211 signature = hex(hashutil.sha1(repo.vfs.tryread(b'sparse')).digest())
210 cache[b'signature'] = signature
212 cache[b'signature'] = signature
211
213
212 if includetemp:
214 if includetemp:
213 raw = repo.vfs.tryread(b'tempsparse')
215 raw = repo.vfs.tryread(b'tempsparse')
214 tempsignature = hex(hashutil.sha1(raw).digest())
216 tempsignature = hex(hashutil.sha1(raw).digest())
215 cache[b'tempsignature'] = tempsignature
217 cache[b'tempsignature'] = tempsignature
216
218
217 return b'%s %s' % (signature, tempsignature)
219 return b'%s %s' % (signature, tempsignature)
218
220
219
221
220 def writeconfig(repo, includes, excludes, profiles):
222 def writeconfig(repo, includes, excludes, profiles):
221 """Write the sparse config file given a sparse configuration."""
223 """Write the sparse config file given a sparse configuration."""
222 with repo.vfs(b'sparse', b'wb') as fh:
224 with repo.vfs(b'sparse', b'wb') as fh:
223 for p in sorted(profiles):
225 for p in sorted(profiles):
224 fh.write(b'%%include %s\n' % p)
226 fh.write(b'%%include %s\n' % p)
225
227
226 if includes:
228 if includes:
227 fh.write(b'[include]\n')
229 fh.write(b'[include]\n')
228 for i in sorted(includes):
230 for i in sorted(includes):
229 fh.write(i)
231 fh.write(i)
230 fh.write(b'\n')
232 fh.write(b'\n')
231
233
232 if excludes:
234 if excludes:
233 fh.write(b'[exclude]\n')
235 fh.write(b'[exclude]\n')
234 for e in sorted(excludes):
236 for e in sorted(excludes):
235 fh.write(e)
237 fh.write(e)
236 fh.write(b'\n')
238 fh.write(b'\n')
237
239
238 repo._sparsesignaturecache.clear()
240 repo._sparsesignaturecache.clear()
239
241
240
242
241 def readtemporaryincludes(repo):
243 def readtemporaryincludes(repo):
242 raw = repo.vfs.tryread(b'tempsparse')
244 raw = repo.vfs.tryread(b'tempsparse')
243 if not raw:
245 if not raw:
244 return set()
246 return set()
245
247
246 return set(raw.split(b'\n'))
248 return set(raw.split(b'\n'))
247
249
248
250
249 def writetemporaryincludes(repo, includes):
251 def writetemporaryincludes(repo, includes):
250 repo.vfs.write(b'tempsparse', b'\n'.join(sorted(includes)))
252 repo.vfs.write(b'tempsparse', b'\n'.join(sorted(includes)))
251 repo._sparsesignaturecache.clear()
253 repo._sparsesignaturecache.clear()
252
254
253
255
254 def addtemporaryincludes(repo, additional):
256 def addtemporaryincludes(repo, additional):
255 includes = readtemporaryincludes(repo)
257 includes = readtemporaryincludes(repo)
256 for i in additional:
258 for i in additional:
257 includes.add(i)
259 includes.add(i)
258 writetemporaryincludes(repo, includes)
260 writetemporaryincludes(repo, includes)
259
261
260
262
261 def prunetemporaryincludes(repo):
263 def prunetemporaryincludes(repo):
262 if not enabled or not repo.vfs.exists(b'tempsparse'):
264 if not enabled or not repo.vfs.exists(b'tempsparse'):
263 return
265 return
264
266
265 s = repo.status()
267 s = repo.status()
266 if s.modified or s.added or s.removed or s.deleted:
268 if s.modified or s.added or s.removed or s.deleted:
267 # Still have pending changes. Don't bother trying to prune.
269 # Still have pending changes. Don't bother trying to prune.
268 return
270 return
269
271
270 sparsematch = matcher(repo, includetemp=False)
272 sparsematch = matcher(repo, includetemp=False)
271 dirstate = repo.dirstate
273 dirstate = repo.dirstate
272 mresult = mergemod.mergeresult()
274 mresult = mergemod.mergeresult()
273 dropped = []
275 dropped = []
274 tempincludes = readtemporaryincludes(repo)
276 tempincludes = readtemporaryincludes(repo)
275 for file in tempincludes:
277 for file in tempincludes:
276 if file in dirstate and not sparsematch(file):
278 if file in dirstate and not sparsematch(file):
277 message = _(b'dropping temporarily included sparse files')
279 message = _(b'dropping temporarily included sparse files')
278 mresult.addfile(file, mergestatemod.ACTION_REMOVE, None, message)
280 mresult.addfile(file, mergestatemod.ACTION_REMOVE, None, message)
279 dropped.append(file)
281 dropped.append(file)
280
282
281 mergemod.applyupdates(
283 mergemod.applyupdates(
282 repo, mresult, repo[None], repo[b'.'], False, wantfiledata=False
284 repo, mresult, repo[None], repo[b'.'], False, wantfiledata=False
283 )
285 )
284
286
285 # Fix dirstate
287 # Fix dirstate
286 for file in dropped:
288 for file in dropped:
287 dirstate.drop(file)
289 dirstate.drop(file)
288
290
289 repo.vfs.unlink(b'tempsparse')
291 repo.vfs.unlink(b'tempsparse')
290 repo._sparsesignaturecache.clear()
292 repo._sparsesignaturecache.clear()
291 msg = _(
293 msg = _(
292 b'cleaned up %d temporarily added file(s) from the '
294 b'cleaned up %d temporarily added file(s) from the '
293 b'sparse checkout\n'
295 b'sparse checkout\n'
294 )
296 )
295 repo.ui.status(msg % len(tempincludes))
297 repo.ui.status(msg % len(tempincludes))
296
298
297
299
298 def forceincludematcher(matcher, includes):
300 def forceincludematcher(matcher, includes):
299 """Returns a matcher that returns true for any of the forced includes
301 """Returns a matcher that returns true for any of the forced includes
300 before testing against the actual matcher."""
302 before testing against the actual matcher."""
301 kindpats = [(b'path', include, b'') for include in includes]
303 kindpats = [(b'path', include, b'') for include in includes]
302 includematcher = matchmod.includematcher(b'', kindpats)
304 includematcher = matchmod.includematcher(b'', kindpats)
303 return matchmod.unionmatcher([includematcher, matcher])
305 return matchmod.unionmatcher([includematcher, matcher])
304
306
305
307
306 def matcher(repo, revs=None, includetemp=True):
308 def matcher(repo, revs=None, includetemp=True):
307 """Obtain a matcher for sparse working directories for the given revs.
309 """Obtain a matcher for sparse working directories for the given revs.
308
310
309 If multiple revisions are specified, the matcher is the union of all
311 If multiple revisions are specified, the matcher is the union of all
310 revs.
312 revs.
311
313
312 ``includetemp`` indicates whether to use the temporary sparse profile.
314 ``includetemp`` indicates whether to use the temporary sparse profile.
313 """
315 """
314 # If sparse isn't enabled, sparse matcher matches everything.
316 # If sparse isn't enabled, sparse matcher matches everything.
315 if not enabled:
317 if not enabled:
316 return matchmod.always()
318 return matchmod.always()
317
319
318 if not revs or revs == [None]:
320 if not revs or revs == [None]:
319 revs = [
321 revs = [
320 repo.changelog.rev(node)
322 repo.changelog.rev(node)
321 for node in repo.dirstate.parents()
323 for node in repo.dirstate.parents()
322 if node != nullid
324 if node != nullid
323 ]
325 ]
324
326
325 signature = configsignature(repo, includetemp=includetemp)
327 signature = configsignature(repo, includetemp=includetemp)
326
328
327 key = b'%s %s' % (signature, b' '.join(map(pycompat.bytestr, revs)))
329 key = b'%s %s' % (signature, b' '.join(map(pycompat.bytestr, revs)))
328
330
329 result = repo._sparsematchercache.get(key)
331 result = repo._sparsematchercache.get(key)
330 if result:
332 if result:
331 return result
333 return result
332
334
333 matchers = []
335 matchers = []
334 for rev in revs:
336 for rev in revs:
335 try:
337 try:
336 includes, excludes, profiles = patternsforrev(repo, rev)
338 includes, excludes, profiles = patternsforrev(repo, rev)
337
339
338 if includes or excludes:
340 if includes or excludes:
339 matcher = matchmod.match(
341 matcher = matchmod.match(
340 repo.root,
342 repo.root,
341 b'',
343 b'',
342 [],
344 [],
343 include=includes,
345 include=includes,
344 exclude=excludes,
346 exclude=excludes,
345 default=b'relpath',
347 default=b'relpath',
346 )
348 )
347 matchers.append(matcher)
349 matchers.append(matcher)
348 except IOError:
350 except IOError:
349 pass
351 pass
350
352
351 if not matchers:
353 if not matchers:
352 result = matchmod.always()
354 result = matchmod.always()
353 elif len(matchers) == 1:
355 elif len(matchers) == 1:
354 result = matchers[0]
356 result = matchers[0]
355 else:
357 else:
356 result = matchmod.unionmatcher(matchers)
358 result = matchmod.unionmatcher(matchers)
357
359
358 if includetemp:
360 if includetemp:
359 tempincludes = readtemporaryincludes(repo)
361 tempincludes = readtemporaryincludes(repo)
360 result = forceincludematcher(result, tempincludes)
362 result = forceincludematcher(result, tempincludes)
361
363
362 repo._sparsematchercache[key] = result
364 repo._sparsematchercache[key] = result
363
365
364 return result
366 return result
365
367
366
368
367 def filterupdatesactions(repo, wctx, mctx, branchmerge, mresult):
369 def filterupdatesactions(repo, wctx, mctx, branchmerge, mresult):
368 """Filter updates to only lay out files that match the sparse rules."""
370 """Filter updates to only lay out files that match the sparse rules."""
369 if not enabled:
371 if not enabled:
370 return
372 return
371
373
372 oldrevs = [pctx.rev() for pctx in wctx.parents()]
374 oldrevs = [pctx.rev() for pctx in wctx.parents()]
373 oldsparsematch = matcher(repo, oldrevs)
375 oldsparsematch = matcher(repo, oldrevs)
374
376
375 if oldsparsematch.always():
377 if oldsparsematch.always():
376 return
378 return
377
379
378 files = set()
380 files = set()
379 prunedactions = {}
381 prunedactions = {}
380
382
381 if branchmerge:
383 if branchmerge:
382 # If we're merging, use the wctx filter, since we're merging into
384 # If we're merging, use the wctx filter, since we're merging into
383 # the wctx.
385 # the wctx.
384 sparsematch = matcher(repo, [wctx.p1().rev()])
386 sparsematch = matcher(repo, [wctx.p1().rev()])
385 else:
387 else:
386 # If we're updating, use the target context's filter, since we're
388 # If we're updating, use the target context's filter, since we're
387 # moving to the target context.
389 # moving to the target context.
388 sparsematch = matcher(repo, [mctx.rev()])
390 sparsematch = matcher(repo, [mctx.rev()])
389
391
390 temporaryfiles = []
392 temporaryfiles = []
391 for file, action in mresult.filemap():
393 for file, action in mresult.filemap():
392 type, args, msg = action
394 type, args, msg = action
393 files.add(file)
395 files.add(file)
394 if sparsematch(file):
396 if sparsematch(file):
395 prunedactions[file] = action
397 prunedactions[file] = action
396 elif type == mergestatemod.ACTION_MERGE:
398 elif type == mergestatemod.ACTION_MERGE:
397 temporaryfiles.append(file)
399 temporaryfiles.append(file)
398 prunedactions[file] = action
400 prunedactions[file] = action
399 elif branchmerge:
401 elif branchmerge:
400 if type != mergestatemod.ACTION_KEEP:
402 if type != mergestatemod.ACTION_KEEP:
401 temporaryfiles.append(file)
403 temporaryfiles.append(file)
402 prunedactions[file] = action
404 prunedactions[file] = action
403 elif type == mergestatemod.ACTION_FORGET:
405 elif type == mergestatemod.ACTION_FORGET:
404 prunedactions[file] = action
406 prunedactions[file] = action
405 elif file in wctx:
407 elif file in wctx:
406 prunedactions[file] = (mergestatemod.ACTION_REMOVE, args, msg)
408 prunedactions[file] = (mergestatemod.ACTION_REMOVE, args, msg)
407
409
408 # in case or rename on one side, it is possible that f1 might not
410 # in case or rename on one side, it is possible that f1 might not
409 # be present in sparse checkout we should include it
411 # be present in sparse checkout we should include it
410 # TODO: should we do the same for f2?
412 # TODO: should we do the same for f2?
411 # exists as a separate check because file can be in sparse and hence
413 # exists as a separate check because file can be in sparse and hence
412 # if we try to club this condition in above `elif type == ACTION_MERGE`
414 # if we try to club this condition in above `elif type == ACTION_MERGE`
413 # it won't be triggered
415 # it won't be triggered
414 if branchmerge and type == mergestatemod.ACTION_MERGE:
416 if branchmerge and type == mergestatemod.ACTION_MERGE:
415 f1, f2, fa, move, anc = args
417 f1, f2, fa, move, anc = args
416 if not sparsematch(f1):
418 if not sparsematch(f1):
417 temporaryfiles.append(f1)
419 temporaryfiles.append(f1)
418
420
419 if len(temporaryfiles) > 0:
421 if len(temporaryfiles) > 0:
420 repo.ui.status(
422 repo.ui.status(
421 _(
423 _(
422 b'temporarily included %d file(s) in the sparse '
424 b'temporarily included %d file(s) in the sparse '
423 b'checkout for merging\n'
425 b'checkout for merging\n'
424 )
426 )
425 % len(temporaryfiles)
427 % len(temporaryfiles)
426 )
428 )
427 addtemporaryincludes(repo, temporaryfiles)
429 addtemporaryincludes(repo, temporaryfiles)
428
430
429 # Add the new files to the working copy so they can be merged, etc
431 # Add the new files to the working copy so they can be merged, etc
430 tmresult = mergemod.mergeresult()
432 tmresult = mergemod.mergeresult()
431 message = b'temporarily adding to sparse checkout'
433 message = b'temporarily adding to sparse checkout'
432 wctxmanifest = repo[None].manifest()
434 wctxmanifest = repo[None].manifest()
433 for file in temporaryfiles:
435 for file in temporaryfiles:
434 if file in wctxmanifest:
436 if file in wctxmanifest:
435 fctx = repo[None][file]
437 fctx = repo[None][file]
436 tmresult.addfile(
438 tmresult.addfile(
437 file,
439 file,
438 mergestatemod.ACTION_GET,
440 mergestatemod.ACTION_GET,
439 (fctx.flags(), False),
441 (fctx.flags(), False),
440 message,
442 message,
441 )
443 )
442
444
443 mergemod.applyupdates(
445 mergemod.applyupdates(
444 repo, tmresult, repo[None], repo[b'.'], False, wantfiledata=False
446 repo, tmresult, repo[None], repo[b'.'], False, wantfiledata=False
445 )
447 )
446
448
447 dirstate = repo.dirstate
449 dirstate = repo.dirstate
448 for file, flags, msg in tmresult.getactions([mergestatemod.ACTION_GET]):
450 for file, flags, msg in tmresult.getactions([mergestatemod.ACTION_GET]):
449 dirstate.normal(file)
451 dirstate.normal(file)
450
452
451 profiles = activeconfig(repo)[2]
453 profiles = activeconfig(repo)[2]
452 changedprofiles = profiles & files
454 changedprofiles = profiles & files
453 # If an active profile changed during the update, refresh the checkout.
455 # If an active profile changed during the update, refresh the checkout.
454 # Don't do this during a branch merge, since all incoming changes should
456 # Don't do this during a branch merge, since all incoming changes should
455 # have been handled by the temporary includes above.
457 # have been handled by the temporary includes above.
456 if changedprofiles and not branchmerge:
458 if changedprofiles and not branchmerge:
457 mf = mctx.manifest()
459 mf = mctx.manifest()
458 for file in mf:
460 for file in mf:
459 old = oldsparsematch(file)
461 old = oldsparsematch(file)
460 new = sparsematch(file)
462 new = sparsematch(file)
461 if not old and new:
463 if not old and new:
462 flags = mf.flags(file)
464 flags = mf.flags(file)
463 prunedactions[file] = (
465 prunedactions[file] = (
464 mergestatemod.ACTION_GET,
466 mergestatemod.ACTION_GET,
465 (flags, False),
467 (flags, False),
466 b'',
468 b'',
467 )
469 )
468 elif old and not new:
470 elif old and not new:
469 prunedactions[file] = (mergestatemod.ACTION_REMOVE, [], b'')
471 prunedactions[file] = (mergestatemod.ACTION_REMOVE, [], b'')
470
472
471 mresult.setactions(prunedactions)
473 mresult.setactions(prunedactions)
472
474
473
475
474 def refreshwdir(repo, origstatus, origsparsematch, force=False):
476 def refreshwdir(repo, origstatus, origsparsematch, force=False):
475 """Refreshes working directory by taking sparse config into account.
477 """Refreshes working directory by taking sparse config into account.
476
478
477 The old status and sparse matcher is compared against the current sparse
479 The old status and sparse matcher is compared against the current sparse
478 matcher.
480 matcher.
479
481
480 Will abort if a file with pending changes is being excluded or included
482 Will abort if a file with pending changes is being excluded or included
481 unless ``force`` is True.
483 unless ``force`` is True.
482 """
484 """
483 # Verify there are no pending changes
485 # Verify there are no pending changes
484 pending = set()
486 pending = set()
485 pending.update(origstatus.modified)
487 pending.update(origstatus.modified)
486 pending.update(origstatus.added)
488 pending.update(origstatus.added)
487 pending.update(origstatus.removed)
489 pending.update(origstatus.removed)
488 sparsematch = matcher(repo)
490 sparsematch = matcher(repo)
489 abort = False
491 abort = False
490
492
491 for f in pending:
493 for f in pending:
492 if not sparsematch(f):
494 if not sparsematch(f):
493 repo.ui.warn(_(b"pending changes to '%s'\n") % f)
495 repo.ui.warn(_(b"pending changes to '%s'\n") % f)
494 abort = not force
496 abort = not force
495
497
496 if abort:
498 if abort:
497 raise error.Abort(
499 raise error.Abort(
498 _(b'could not update sparseness due to pending changes')
500 _(b'could not update sparseness due to pending changes')
499 )
501 )
500
502
501 # Calculate merge result
503 # Calculate merge result
502 dirstate = repo.dirstate
504 dirstate = repo.dirstate
503 ctx = repo[b'.']
505 ctx = repo[b'.']
504 added = []
506 added = []
505 lookup = []
507 lookup = []
506 dropped = []
508 dropped = []
507 mf = ctx.manifest()
509 mf = ctx.manifest()
508 files = set(mf)
510 files = set(mf)
509 mresult = mergemod.mergeresult()
511 mresult = mergemod.mergeresult()
510
512
511 for file in files:
513 for file in files:
512 old = origsparsematch(file)
514 old = origsparsematch(file)
513 new = sparsematch(file)
515 new = sparsematch(file)
514 # Add files that are newly included, or that don't exist in
516 # Add files that are newly included, or that don't exist in
515 # the dirstate yet.
517 # the dirstate yet.
516 if (new and not old) or (old and new and not file in dirstate):
518 if (new and not old) or (old and new and not file in dirstate):
517 fl = mf.flags(file)
519 fl = mf.flags(file)
518 if repo.wvfs.exists(file):
520 if repo.wvfs.exists(file):
519 mresult.addfile(file, mergestatemod.ACTION_EXEC, (fl,), b'')
521 mresult.addfile(file, mergestatemod.ACTION_EXEC, (fl,), b'')
520 lookup.append(file)
522 lookup.append(file)
521 else:
523 else:
522 mresult.addfile(
524 mresult.addfile(
523 file, mergestatemod.ACTION_GET, (fl, False), b''
525 file, mergestatemod.ACTION_GET, (fl, False), b''
524 )
526 )
525 added.append(file)
527 added.append(file)
526 # Drop files that are newly excluded, or that still exist in
528 # Drop files that are newly excluded, or that still exist in
527 # the dirstate.
529 # the dirstate.
528 elif (old and not new) or (not old and not new and file in dirstate):
530 elif (old and not new) or (not old and not new and file in dirstate):
529 dropped.append(file)
531 dropped.append(file)
530 if file not in pending:
532 if file not in pending:
531 mresult.addfile(file, mergestatemod.ACTION_REMOVE, [], b'')
533 mresult.addfile(file, mergestatemod.ACTION_REMOVE, [], b'')
532
534
533 # Verify there are no pending changes in newly included files
535 # Verify there are no pending changes in newly included files
534 abort = False
536 abort = False
535 for file in lookup:
537 for file in lookup:
536 repo.ui.warn(_(b"pending changes to '%s'\n") % file)
538 repo.ui.warn(_(b"pending changes to '%s'\n") % file)
537 abort = not force
539 abort = not force
538 if abort:
540 if abort:
539 raise error.Abort(
541 raise error.Abort(
540 _(
542 _(
541 b'cannot change sparseness due to pending '
543 b'cannot change sparseness due to pending '
542 b'changes (delete the files or use '
544 b'changes (delete the files or use '
543 b'--force to bring them back dirty)'
545 b'--force to bring them back dirty)'
544 )
546 )
545 )
547 )
546
548
547 # Check for files that were only in the dirstate.
549 # Check for files that were only in the dirstate.
548 for file, state in pycompat.iteritems(dirstate):
550 for file, state in pycompat.iteritems(dirstate):
549 if not file in files:
551 if not file in files:
550 old = origsparsematch(file)
552 old = origsparsematch(file)
551 new = sparsematch(file)
553 new = sparsematch(file)
552 if old and not new:
554 if old and not new:
553 dropped.append(file)
555 dropped.append(file)
554
556
555 mergemod.applyupdates(
557 mergemod.applyupdates(
556 repo, mresult, repo[None], repo[b'.'], False, wantfiledata=False
558 repo, mresult, repo[None], repo[b'.'], False, wantfiledata=False
557 )
559 )
558
560
559 # Fix dirstate
561 # Fix dirstate
560 for file in added:
562 for file in added:
561 dirstate.normal(file)
563 dirstate.normal(file)
562
564
563 for file in dropped:
565 for file in dropped:
564 dirstate.drop(file)
566 dirstate.drop(file)
565
567
566 for file in lookup:
568 for file in lookup:
567 # File exists on disk, and we're bringing it back in an unknown state.
569 # File exists on disk, and we're bringing it back in an unknown state.
568 dirstate.normallookup(file)
570 dirstate.normallookup(file)
569
571
570 return added, dropped, lookup
572 return added, dropped, lookup
571
573
572
574
573 def aftercommit(repo, node):
575 def aftercommit(repo, node):
574 """Perform actions after a working directory commit."""
576 """Perform actions after a working directory commit."""
575 # This function is called unconditionally, even if sparse isn't
577 # This function is called unconditionally, even if sparse isn't
576 # enabled.
578 # enabled.
577 ctx = repo[node]
579 ctx = repo[node]
578
580
579 profiles = patternsforrev(repo, ctx.rev())[2]
581 profiles = patternsforrev(repo, ctx.rev())[2]
580
582
581 # profiles will only have data if sparse is enabled.
583 # profiles will only have data if sparse is enabled.
582 if profiles & set(ctx.files()):
584 if profiles & set(ctx.files()):
583 origstatus = repo.status()
585 origstatus = repo.status()
584 origsparsematch = matcher(repo)
586 origsparsematch = matcher(repo)
585 refreshwdir(repo, origstatus, origsparsematch, force=True)
587 refreshwdir(repo, origstatus, origsparsematch, force=True)
586
588
587 prunetemporaryincludes(repo)
589 prunetemporaryincludes(repo)
588
590
589
591
590 def _updateconfigandrefreshwdir(
592 def _updateconfigandrefreshwdir(
591 repo, includes, excludes, profiles, force=False, removing=False
593 repo, includes, excludes, profiles, force=False, removing=False
592 ):
594 ):
593 """Update the sparse config and working directory state."""
595 """Update the sparse config and working directory state."""
594 raw = repo.vfs.tryread(b'sparse')
596 raw = repo.vfs.tryread(b'sparse')
595 oldincludes, oldexcludes, oldprofiles = parseconfig(repo.ui, raw, b'sparse')
597 oldincludes, oldexcludes, oldprofiles = parseconfig(repo.ui, raw, b'sparse')
596
598
597 oldstatus = repo.status()
599 oldstatus = repo.status()
598 oldmatch = matcher(repo)
600 oldmatch = matcher(repo)
599 oldrequires = set(repo.requirements)
601 oldrequires = set(repo.requirements)
600
602
601 # TODO remove this try..except once the matcher integrates better
603 # TODO remove this try..except once the matcher integrates better
602 # with dirstate. We currently have to write the updated config
604 # with dirstate. We currently have to write the updated config
603 # because that will invalidate the matcher cache and force a
605 # because that will invalidate the matcher cache and force a
604 # re-read. We ideally want to update the cached matcher on the
606 # re-read. We ideally want to update the cached matcher on the
605 # repo instance then flush the new config to disk once wdir is
607 # repo instance then flush the new config to disk once wdir is
606 # updated. But this requires massive rework to matcher() and its
608 # updated. But this requires massive rework to matcher() and its
607 # consumers.
609 # consumers.
608
610
609 if b'exp-sparse' in oldrequires and removing:
611 if repository.SPARSE_REQUIREMENT in oldrequires and removing:
610 repo.requirements.discard(b'exp-sparse')
612 repo.requirements.discard(repository.SPARSE_REQUIREMENT)
611 scmutil.writereporequirements(repo)
613 scmutil.writereporequirements(repo)
612 elif b'exp-sparse' not in oldrequires:
614 elif repository.SPARSE_REQUIREMENT not in oldrequires:
613 repo.requirements.add(b'exp-sparse')
615 repo.requirements.add(repository.SPARSE_REQUIREMENT)
614 scmutil.writereporequirements(repo)
616 scmutil.writereporequirements(repo)
615
617
616 try:
618 try:
617 writeconfig(repo, includes, excludes, profiles)
619 writeconfig(repo, includes, excludes, profiles)
618 return refreshwdir(repo, oldstatus, oldmatch, force=force)
620 return refreshwdir(repo, oldstatus, oldmatch, force=force)
619 except Exception:
621 except Exception:
620 if repo.requirements != oldrequires:
622 if repo.requirements != oldrequires:
621 repo.requirements.clear()
623 repo.requirements.clear()
622 repo.requirements |= oldrequires
624 repo.requirements |= oldrequires
623 scmutil.writereporequirements(repo)
625 scmutil.writereporequirements(repo)
624 writeconfig(repo, oldincludes, oldexcludes, oldprofiles)
626 writeconfig(repo, oldincludes, oldexcludes, oldprofiles)
625 raise
627 raise
626
628
627
629
628 def clearrules(repo, force=False):
630 def clearrules(repo, force=False):
629 """Clears include/exclude rules from the sparse config.
631 """Clears include/exclude rules from the sparse config.
630
632
631 The remaining sparse config only has profiles, if defined. The working
633 The remaining sparse config only has profiles, if defined. The working
632 directory is refreshed, as needed.
634 directory is refreshed, as needed.
633 """
635 """
634 with repo.wlock():
636 with repo.wlock():
635 raw = repo.vfs.tryread(b'sparse')
637 raw = repo.vfs.tryread(b'sparse')
636 includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
638 includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
637
639
638 if not includes and not excludes:
640 if not includes and not excludes:
639 return
641 return
640
642
641 _updateconfigandrefreshwdir(repo, set(), set(), profiles, force=force)
643 _updateconfigandrefreshwdir(repo, set(), set(), profiles, force=force)
642
644
643
645
644 def importfromfiles(repo, opts, paths, force=False):
646 def importfromfiles(repo, opts, paths, force=False):
645 """Import sparse config rules from files.
647 """Import sparse config rules from files.
646
648
647 The updated sparse config is written out and the working directory
649 The updated sparse config is written out and the working directory
648 is refreshed, as needed.
650 is refreshed, as needed.
649 """
651 """
650 with repo.wlock():
652 with repo.wlock():
651 # read current configuration
653 # read current configuration
652 raw = repo.vfs.tryread(b'sparse')
654 raw = repo.vfs.tryread(b'sparse')
653 includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
655 includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
654 aincludes, aexcludes, aprofiles = activeconfig(repo)
656 aincludes, aexcludes, aprofiles = activeconfig(repo)
655
657
656 # Import rules on top; only take in rules that are not yet
658 # Import rules on top; only take in rules that are not yet
657 # part of the active rules.
659 # part of the active rules.
658 changed = False
660 changed = False
659 for p in paths:
661 for p in paths:
660 with util.posixfile(util.expandpath(p), mode=b'rb') as fh:
662 with util.posixfile(util.expandpath(p), mode=b'rb') as fh:
661 raw = fh.read()
663 raw = fh.read()
662
664
663 iincludes, iexcludes, iprofiles = parseconfig(
665 iincludes, iexcludes, iprofiles = parseconfig(
664 repo.ui, raw, b'sparse'
666 repo.ui, raw, b'sparse'
665 )
667 )
666 oldsize = len(includes) + len(excludes) + len(profiles)
668 oldsize = len(includes) + len(excludes) + len(profiles)
667 includes.update(iincludes - aincludes)
669 includes.update(iincludes - aincludes)
668 excludes.update(iexcludes - aexcludes)
670 excludes.update(iexcludes - aexcludes)
669 profiles.update(iprofiles - aprofiles)
671 profiles.update(iprofiles - aprofiles)
670 if len(includes) + len(excludes) + len(profiles) > oldsize:
672 if len(includes) + len(excludes) + len(profiles) > oldsize:
671 changed = True
673 changed = True
672
674
673 profilecount = includecount = excludecount = 0
675 profilecount = includecount = excludecount = 0
674 fcounts = (0, 0, 0)
676 fcounts = (0, 0, 0)
675
677
676 if changed:
678 if changed:
677 profilecount = len(profiles - aprofiles)
679 profilecount = len(profiles - aprofiles)
678 includecount = len(includes - aincludes)
680 includecount = len(includes - aincludes)
679 excludecount = len(excludes - aexcludes)
681 excludecount = len(excludes - aexcludes)
680
682
681 fcounts = map(
683 fcounts = map(
682 len,
684 len,
683 _updateconfigandrefreshwdir(
685 _updateconfigandrefreshwdir(
684 repo, includes, excludes, profiles, force=force
686 repo, includes, excludes, profiles, force=force
685 ),
687 ),
686 )
688 )
687
689
688 printchanges(
690 printchanges(
689 repo.ui, opts, profilecount, includecount, excludecount, *fcounts
691 repo.ui, opts, profilecount, includecount, excludecount, *fcounts
690 )
692 )
691
693
692
694
693 def updateconfig(
695 def updateconfig(
694 repo,
696 repo,
695 pats,
697 pats,
696 opts,
698 opts,
697 include=False,
699 include=False,
698 exclude=False,
700 exclude=False,
699 reset=False,
701 reset=False,
700 delete=False,
702 delete=False,
701 enableprofile=False,
703 enableprofile=False,
702 disableprofile=False,
704 disableprofile=False,
703 force=False,
705 force=False,
704 usereporootpaths=False,
706 usereporootpaths=False,
705 ):
707 ):
706 """Perform a sparse config update.
708 """Perform a sparse config update.
707
709
708 Only one of the actions may be performed.
710 Only one of the actions may be performed.
709
711
710 The new config is written out and a working directory refresh is performed.
712 The new config is written out and a working directory refresh is performed.
711 """
713 """
712 with repo.wlock():
714 with repo.wlock():
713 raw = repo.vfs.tryread(b'sparse')
715 raw = repo.vfs.tryread(b'sparse')
714 oldinclude, oldexclude, oldprofiles = parseconfig(
716 oldinclude, oldexclude, oldprofiles = parseconfig(
715 repo.ui, raw, b'sparse'
717 repo.ui, raw, b'sparse'
716 )
718 )
717
719
718 if reset:
720 if reset:
719 newinclude = set()
721 newinclude = set()
720 newexclude = set()
722 newexclude = set()
721 newprofiles = set()
723 newprofiles = set()
722 else:
724 else:
723 newinclude = set(oldinclude)
725 newinclude = set(oldinclude)
724 newexclude = set(oldexclude)
726 newexclude = set(oldexclude)
725 newprofiles = set(oldprofiles)
727 newprofiles = set(oldprofiles)
726
728
727 if any(os.path.isabs(pat) for pat in pats):
729 if any(os.path.isabs(pat) for pat in pats):
728 raise error.Abort(_(b'paths cannot be absolute'))
730 raise error.Abort(_(b'paths cannot be absolute'))
729
731
730 if not usereporootpaths:
732 if not usereporootpaths:
731 # let's treat paths as relative to cwd
733 # let's treat paths as relative to cwd
732 root, cwd = repo.root, repo.getcwd()
734 root, cwd = repo.root, repo.getcwd()
733 abspats = []
735 abspats = []
734 for kindpat in pats:
736 for kindpat in pats:
735 kind, pat = matchmod._patsplit(kindpat, None)
737 kind, pat = matchmod._patsplit(kindpat, None)
736 if kind in matchmod.cwdrelativepatternkinds or kind is None:
738 if kind in matchmod.cwdrelativepatternkinds or kind is None:
737 ap = (kind + b':' if kind else b'') + pathutil.canonpath(
739 ap = (kind + b':' if kind else b'') + pathutil.canonpath(
738 root, cwd, pat
740 root, cwd, pat
739 )
741 )
740 abspats.append(ap)
742 abspats.append(ap)
741 else:
743 else:
742 abspats.append(kindpat)
744 abspats.append(kindpat)
743 pats = abspats
745 pats = abspats
744
746
745 if include:
747 if include:
746 newinclude.update(pats)
748 newinclude.update(pats)
747 elif exclude:
749 elif exclude:
748 newexclude.update(pats)
750 newexclude.update(pats)
749 elif enableprofile:
751 elif enableprofile:
750 newprofiles.update(pats)
752 newprofiles.update(pats)
751 elif disableprofile:
753 elif disableprofile:
752 newprofiles.difference_update(pats)
754 newprofiles.difference_update(pats)
753 elif delete:
755 elif delete:
754 newinclude.difference_update(pats)
756 newinclude.difference_update(pats)
755 newexclude.difference_update(pats)
757 newexclude.difference_update(pats)
756
758
757 profilecount = len(newprofiles - oldprofiles) - len(
759 profilecount = len(newprofiles - oldprofiles) - len(
758 oldprofiles - newprofiles
760 oldprofiles - newprofiles
759 )
761 )
760 includecount = len(newinclude - oldinclude) - len(
762 includecount = len(newinclude - oldinclude) - len(
761 oldinclude - newinclude
763 oldinclude - newinclude
762 )
764 )
763 excludecount = len(newexclude - oldexclude) - len(
765 excludecount = len(newexclude - oldexclude) - len(
764 oldexclude - newexclude
766 oldexclude - newexclude
765 )
767 )
766
768
767 fcounts = map(
769 fcounts = map(
768 len,
770 len,
769 _updateconfigandrefreshwdir(
771 _updateconfigandrefreshwdir(
770 repo,
772 repo,
771 newinclude,
773 newinclude,
772 newexclude,
774 newexclude,
773 newprofiles,
775 newprofiles,
774 force=force,
776 force=force,
775 removing=reset,
777 removing=reset,
776 ),
778 ),
777 )
779 )
778
780
779 printchanges(
781 printchanges(
780 repo.ui, opts, profilecount, includecount, excludecount, *fcounts
782 repo.ui, opts, profilecount, includecount, excludecount, *fcounts
781 )
783 )
782
784
783
785
784 def printchanges(
786 def printchanges(
785 ui,
787 ui,
786 opts,
788 opts,
787 profilecount=0,
789 profilecount=0,
788 includecount=0,
790 includecount=0,
789 excludecount=0,
791 excludecount=0,
790 added=0,
792 added=0,
791 dropped=0,
793 dropped=0,
792 conflicting=0,
794 conflicting=0,
793 ):
795 ):
794 """Print output summarizing sparse config changes."""
796 """Print output summarizing sparse config changes."""
795 with ui.formatter(b'sparse', opts) as fm:
797 with ui.formatter(b'sparse', opts) as fm:
796 fm.startitem()
798 fm.startitem()
797 fm.condwrite(
799 fm.condwrite(
798 ui.verbose,
800 ui.verbose,
799 b'profiles_added',
801 b'profiles_added',
800 _(b'Profiles changed: %d\n'),
802 _(b'Profiles changed: %d\n'),
801 profilecount,
803 profilecount,
802 )
804 )
803 fm.condwrite(
805 fm.condwrite(
804 ui.verbose,
806 ui.verbose,
805 b'include_rules_added',
807 b'include_rules_added',
806 _(b'Include rules changed: %d\n'),
808 _(b'Include rules changed: %d\n'),
807 includecount,
809 includecount,
808 )
810 )
809 fm.condwrite(
811 fm.condwrite(
810 ui.verbose,
812 ui.verbose,
811 b'exclude_rules_added',
813 b'exclude_rules_added',
812 _(b'Exclude rules changed: %d\n'),
814 _(b'Exclude rules changed: %d\n'),
813 excludecount,
815 excludecount,
814 )
816 )
815
817
816 # In 'plain' verbose mode, mergemod.applyupdates already outputs what
818 # In 'plain' verbose mode, mergemod.applyupdates already outputs what
817 # files are added or removed outside of the templating formatter
819 # files are added or removed outside of the templating formatter
818 # framework. No point in repeating ourselves in that case.
820 # framework. No point in repeating ourselves in that case.
819 if not fm.isplain():
821 if not fm.isplain():
820 fm.condwrite(
822 fm.condwrite(
821 ui.verbose, b'files_added', _(b'Files added: %d\n'), added
823 ui.verbose, b'files_added', _(b'Files added: %d\n'), added
822 )
824 )
823 fm.condwrite(
825 fm.condwrite(
824 ui.verbose, b'files_dropped', _(b'Files dropped: %d\n'), dropped
826 ui.verbose, b'files_dropped', _(b'Files dropped: %d\n'), dropped
825 )
827 )
826 fm.condwrite(
828 fm.condwrite(
827 ui.verbose,
829 ui.verbose,
828 b'files_conflicting',
830 b'files_conflicting',
829 _(b'Files conflicting: %d\n'),
831 _(b'Files conflicting: %d\n'),
830 conflicting,
832 conflicting,
831 )
833 )
General Comments 0
You need to be logged in to leave comments. Login now