##// END OF EJS Templates
repository: introduce constant for internal phase repo requirement and use it...
Pulkit Goyal -
r45915:f025b97f default
parent child Browse files
Show More
@@ -1,1978 +1,1982 b''
1 # repository.py - Interfaces and base classes for repositories and peers.
1 # repository.py - Interfaces and base classes for repositories and peers.
2 #
2 #
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from ..i18n import _
10 from ..i18n import _
11 from .. import error
11 from .. import error
12 from . import util as interfaceutil
12 from . import util as interfaceutil
13
13
14 # When narrowing is finalized and no longer subject to format changes,
14 # When narrowing is finalized and no longer subject to format changes,
15 # we should move this to just "narrow" or similar.
15 # we should move this to just "narrow" or similar.
16 NARROW_REQUIREMENT = b'narrowhg-experimental'
16 NARROW_REQUIREMENT = b'narrowhg-experimental'
17
17
18 # Enables sparse working directory usage
18 # Enables sparse working directory usage
19 SPARSE_REQUIREMENT = b'exp-sparse'
19 SPARSE_REQUIREMENT = b'exp-sparse'
20
20
21 # Enables the internal phase which is used to hide changesets instead
22 # of stripping them
23 INTERNAL_PHASE_REQUIREMENT = b'internal-phase'
24
21 # Local repository feature string.
25 # Local repository feature string.
22
26
23 # Revlogs are being used for file storage.
27 # Revlogs are being used for file storage.
24 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
28 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
25 # The storage part of the repository is shared from an external source.
29 # The storage part of the repository is shared from an external source.
26 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
30 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
27 # LFS supported for backing file storage.
31 # LFS supported for backing file storage.
28 REPO_FEATURE_LFS = b'lfs'
32 REPO_FEATURE_LFS = b'lfs'
29 # Repository supports being stream cloned.
33 # Repository supports being stream cloned.
30 REPO_FEATURE_STREAM_CLONE = b'streamclone'
34 REPO_FEATURE_STREAM_CLONE = b'streamclone'
31 # Files storage may lack data for all ancestors.
35 # Files storage may lack data for all ancestors.
32 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
36 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
33
37
34 REVISION_FLAG_CENSORED = 1 << 15
38 REVISION_FLAG_CENSORED = 1 << 15
35 REVISION_FLAG_ELLIPSIS = 1 << 14
39 REVISION_FLAG_ELLIPSIS = 1 << 14
36 REVISION_FLAG_EXTSTORED = 1 << 13
40 REVISION_FLAG_EXTSTORED = 1 << 13
37 REVISION_FLAG_SIDEDATA = 1 << 12
41 REVISION_FLAG_SIDEDATA = 1 << 12
38
42
39 REVISION_FLAGS_KNOWN = (
43 REVISION_FLAGS_KNOWN = (
40 REVISION_FLAG_CENSORED
44 REVISION_FLAG_CENSORED
41 | REVISION_FLAG_ELLIPSIS
45 | REVISION_FLAG_ELLIPSIS
42 | REVISION_FLAG_EXTSTORED
46 | REVISION_FLAG_EXTSTORED
43 | REVISION_FLAG_SIDEDATA
47 | REVISION_FLAG_SIDEDATA
44 )
48 )
45
49
46 CG_DELTAMODE_STD = b'default'
50 CG_DELTAMODE_STD = b'default'
47 CG_DELTAMODE_PREV = b'previous'
51 CG_DELTAMODE_PREV = b'previous'
48 CG_DELTAMODE_FULL = b'fulltext'
52 CG_DELTAMODE_FULL = b'fulltext'
49 CG_DELTAMODE_P1 = b'p1'
53 CG_DELTAMODE_P1 = b'p1'
50
54
51
55
52 class ipeerconnection(interfaceutil.Interface):
56 class ipeerconnection(interfaceutil.Interface):
53 """Represents a "connection" to a repository.
57 """Represents a "connection" to a repository.
54
58
55 This is the base interface for representing a connection to a repository.
59 This is the base interface for representing a connection to a repository.
56 It holds basic properties and methods applicable to all peer types.
60 It holds basic properties and methods applicable to all peer types.
57
61
58 This is not a complete interface definition and should not be used
62 This is not a complete interface definition and should not be used
59 outside of this module.
63 outside of this module.
60 """
64 """
61
65
62 ui = interfaceutil.Attribute("""ui.ui instance""")
66 ui = interfaceutil.Attribute("""ui.ui instance""")
63
67
64 def url():
68 def url():
65 """Returns a URL string representing this peer.
69 """Returns a URL string representing this peer.
66
70
67 Currently, implementations expose the raw URL used to construct the
71 Currently, implementations expose the raw URL used to construct the
68 instance. It may contain credentials as part of the URL. The
72 instance. It may contain credentials as part of the URL. The
69 expectations of the value aren't well-defined and this could lead to
73 expectations of the value aren't well-defined and this could lead to
70 data leakage.
74 data leakage.
71
75
72 TODO audit/clean consumers and more clearly define the contents of this
76 TODO audit/clean consumers and more clearly define the contents of this
73 value.
77 value.
74 """
78 """
75
79
76 def local():
80 def local():
77 """Returns a local repository instance.
81 """Returns a local repository instance.
78
82
79 If the peer represents a local repository, returns an object that
83 If the peer represents a local repository, returns an object that
80 can be used to interface with it. Otherwise returns ``None``.
84 can be used to interface with it. Otherwise returns ``None``.
81 """
85 """
82
86
83 def peer():
87 def peer():
84 """Returns an object conforming to this interface.
88 """Returns an object conforming to this interface.
85
89
86 Most implementations will ``return self``.
90 Most implementations will ``return self``.
87 """
91 """
88
92
89 def canpush():
93 def canpush():
90 """Returns a boolean indicating if this peer can be pushed to."""
94 """Returns a boolean indicating if this peer can be pushed to."""
91
95
92 def close():
96 def close():
93 """Close the connection to this peer.
97 """Close the connection to this peer.
94
98
95 This is called when the peer will no longer be used. Resources
99 This is called when the peer will no longer be used. Resources
96 associated with the peer should be cleaned up.
100 associated with the peer should be cleaned up.
97 """
101 """
98
102
99
103
100 class ipeercapabilities(interfaceutil.Interface):
104 class ipeercapabilities(interfaceutil.Interface):
101 """Peer sub-interface related to capabilities."""
105 """Peer sub-interface related to capabilities."""
102
106
103 def capable(name):
107 def capable(name):
104 """Determine support for a named capability.
108 """Determine support for a named capability.
105
109
106 Returns ``False`` if capability not supported.
110 Returns ``False`` if capability not supported.
107
111
108 Returns ``True`` if boolean capability is supported. Returns a string
112 Returns ``True`` if boolean capability is supported. Returns a string
109 if capability support is non-boolean.
113 if capability support is non-boolean.
110
114
111 Capability strings may or may not map to wire protocol capabilities.
115 Capability strings may or may not map to wire protocol capabilities.
112 """
116 """
113
117
114 def requirecap(name, purpose):
118 def requirecap(name, purpose):
115 """Require a capability to be present.
119 """Require a capability to be present.
116
120
117 Raises a ``CapabilityError`` if the capability isn't present.
121 Raises a ``CapabilityError`` if the capability isn't present.
118 """
122 """
119
123
120
124
121 class ipeercommands(interfaceutil.Interface):
125 class ipeercommands(interfaceutil.Interface):
122 """Client-side interface for communicating over the wire protocol.
126 """Client-side interface for communicating over the wire protocol.
123
127
124 This interface is used as a gateway to the Mercurial wire protocol.
128 This interface is used as a gateway to the Mercurial wire protocol.
125 methods commonly call wire protocol commands of the same name.
129 methods commonly call wire protocol commands of the same name.
126 """
130 """
127
131
128 def branchmap():
132 def branchmap():
129 """Obtain heads in named branches.
133 """Obtain heads in named branches.
130
134
131 Returns a dict mapping branch name to an iterable of nodes that are
135 Returns a dict mapping branch name to an iterable of nodes that are
132 heads on that branch.
136 heads on that branch.
133 """
137 """
134
138
135 def capabilities():
139 def capabilities():
136 """Obtain capabilities of the peer.
140 """Obtain capabilities of the peer.
137
141
138 Returns a set of string capabilities.
142 Returns a set of string capabilities.
139 """
143 """
140
144
141 def clonebundles():
145 def clonebundles():
142 """Obtains the clone bundles manifest for the repo.
146 """Obtains the clone bundles manifest for the repo.
143
147
144 Returns the manifest as unparsed bytes.
148 Returns the manifest as unparsed bytes.
145 """
149 """
146
150
147 def debugwireargs(one, two, three=None, four=None, five=None):
151 def debugwireargs(one, two, three=None, four=None, five=None):
148 """Used to facilitate debugging of arguments passed over the wire."""
152 """Used to facilitate debugging of arguments passed over the wire."""
149
153
150 def getbundle(source, **kwargs):
154 def getbundle(source, **kwargs):
151 """Obtain remote repository data as a bundle.
155 """Obtain remote repository data as a bundle.
152
156
153 This command is how the bulk of repository data is transferred from
157 This command is how the bulk of repository data is transferred from
154 the peer to the local repository
158 the peer to the local repository
155
159
156 Returns a generator of bundle data.
160 Returns a generator of bundle data.
157 """
161 """
158
162
159 def heads():
163 def heads():
160 """Determine all known head revisions in the peer.
164 """Determine all known head revisions in the peer.
161
165
162 Returns an iterable of binary nodes.
166 Returns an iterable of binary nodes.
163 """
167 """
164
168
165 def known(nodes):
169 def known(nodes):
166 """Determine whether multiple nodes are known.
170 """Determine whether multiple nodes are known.
167
171
168 Accepts an iterable of nodes whose presence to check for.
172 Accepts an iterable of nodes whose presence to check for.
169
173
170 Returns an iterable of booleans indicating of the corresponding node
174 Returns an iterable of booleans indicating of the corresponding node
171 at that index is known to the peer.
175 at that index is known to the peer.
172 """
176 """
173
177
174 def listkeys(namespace):
178 def listkeys(namespace):
175 """Obtain all keys in a pushkey namespace.
179 """Obtain all keys in a pushkey namespace.
176
180
177 Returns an iterable of key names.
181 Returns an iterable of key names.
178 """
182 """
179
183
180 def lookup(key):
184 def lookup(key):
181 """Resolve a value to a known revision.
185 """Resolve a value to a known revision.
182
186
183 Returns a binary node of the resolved revision on success.
187 Returns a binary node of the resolved revision on success.
184 """
188 """
185
189
186 def pushkey(namespace, key, old, new):
190 def pushkey(namespace, key, old, new):
187 """Set a value using the ``pushkey`` protocol.
191 """Set a value using the ``pushkey`` protocol.
188
192
189 Arguments correspond to the pushkey namespace and key to operate on and
193 Arguments correspond to the pushkey namespace and key to operate on and
190 the old and new values for that key.
194 the old and new values for that key.
191
195
192 Returns a string with the peer result. The value inside varies by the
196 Returns a string with the peer result. The value inside varies by the
193 namespace.
197 namespace.
194 """
198 """
195
199
196 def stream_out():
200 def stream_out():
197 """Obtain streaming clone data.
201 """Obtain streaming clone data.
198
202
199 Successful result should be a generator of data chunks.
203 Successful result should be a generator of data chunks.
200 """
204 """
201
205
202 def unbundle(bundle, heads, url):
206 def unbundle(bundle, heads, url):
203 """Transfer repository data to the peer.
207 """Transfer repository data to the peer.
204
208
205 This is how the bulk of data during a push is transferred.
209 This is how the bulk of data during a push is transferred.
206
210
207 Returns the integer number of heads added to the peer.
211 Returns the integer number of heads added to the peer.
208 """
212 """
209
213
210
214
211 class ipeerlegacycommands(interfaceutil.Interface):
215 class ipeerlegacycommands(interfaceutil.Interface):
212 """Interface for implementing support for legacy wire protocol commands.
216 """Interface for implementing support for legacy wire protocol commands.
213
217
214 Wire protocol commands transition to legacy status when they are no longer
218 Wire protocol commands transition to legacy status when they are no longer
215 used by modern clients. To facilitate identifying which commands are
219 used by modern clients. To facilitate identifying which commands are
216 legacy, the interfaces are split.
220 legacy, the interfaces are split.
217 """
221 """
218
222
219 def between(pairs):
223 def between(pairs):
220 """Obtain nodes between pairs of nodes.
224 """Obtain nodes between pairs of nodes.
221
225
222 ``pairs`` is an iterable of node pairs.
226 ``pairs`` is an iterable of node pairs.
223
227
224 Returns an iterable of iterables of nodes corresponding to each
228 Returns an iterable of iterables of nodes corresponding to each
225 requested pair.
229 requested pair.
226 """
230 """
227
231
228 def branches(nodes):
232 def branches(nodes):
229 """Obtain ancestor changesets of specific nodes back to a branch point.
233 """Obtain ancestor changesets of specific nodes back to a branch point.
230
234
231 For each requested node, the peer finds the first ancestor node that is
235 For each requested node, the peer finds the first ancestor node that is
232 a DAG root or is a merge.
236 a DAG root or is a merge.
233
237
234 Returns an iterable of iterables with the resolved values for each node.
238 Returns an iterable of iterables with the resolved values for each node.
235 """
239 """
236
240
237 def changegroup(nodes, source):
241 def changegroup(nodes, source):
238 """Obtain a changegroup with data for descendants of specified nodes."""
242 """Obtain a changegroup with data for descendants of specified nodes."""
239
243
240 def changegroupsubset(bases, heads, source):
244 def changegroupsubset(bases, heads, source):
241 pass
245 pass
242
246
243
247
244 class ipeercommandexecutor(interfaceutil.Interface):
248 class ipeercommandexecutor(interfaceutil.Interface):
245 """Represents a mechanism to execute remote commands.
249 """Represents a mechanism to execute remote commands.
246
250
247 This is the primary interface for requesting that wire protocol commands
251 This is the primary interface for requesting that wire protocol commands
248 be executed. Instances of this interface are active in a context manager
252 be executed. Instances of this interface are active in a context manager
249 and have a well-defined lifetime. When the context manager exits, all
253 and have a well-defined lifetime. When the context manager exits, all
250 outstanding requests are waited on.
254 outstanding requests are waited on.
251 """
255 """
252
256
253 def callcommand(name, args):
257 def callcommand(name, args):
254 """Request that a named command be executed.
258 """Request that a named command be executed.
255
259
256 Receives the command name and a dictionary of command arguments.
260 Receives the command name and a dictionary of command arguments.
257
261
258 Returns a ``concurrent.futures.Future`` that will resolve to the
262 Returns a ``concurrent.futures.Future`` that will resolve to the
259 result of that command request. That exact value is left up to
263 result of that command request. That exact value is left up to
260 the implementation and possibly varies by command.
264 the implementation and possibly varies by command.
261
265
262 Not all commands can coexist with other commands in an executor
266 Not all commands can coexist with other commands in an executor
263 instance: it depends on the underlying wire protocol transport being
267 instance: it depends on the underlying wire protocol transport being
264 used and the command itself.
268 used and the command itself.
265
269
266 Implementations MAY call ``sendcommands()`` automatically if the
270 Implementations MAY call ``sendcommands()`` automatically if the
267 requested command can not coexist with other commands in this executor.
271 requested command can not coexist with other commands in this executor.
268
272
269 Implementations MAY call ``sendcommands()`` automatically when the
273 Implementations MAY call ``sendcommands()`` automatically when the
270 future's ``result()`` is called. So, consumers using multiple
274 future's ``result()`` is called. So, consumers using multiple
271 commands with an executor MUST ensure that ``result()`` is not called
275 commands with an executor MUST ensure that ``result()`` is not called
272 until all command requests have been issued.
276 until all command requests have been issued.
273 """
277 """
274
278
275 def sendcommands():
279 def sendcommands():
276 """Trigger submission of queued command requests.
280 """Trigger submission of queued command requests.
277
281
278 Not all transports submit commands as soon as they are requested to
282 Not all transports submit commands as soon as they are requested to
279 run. When called, this method forces queued command requests to be
283 run. When called, this method forces queued command requests to be
280 issued. It will no-op if all commands have already been sent.
284 issued. It will no-op if all commands have already been sent.
281
285
282 When called, no more new commands may be issued with this executor.
286 When called, no more new commands may be issued with this executor.
283 """
287 """
284
288
285 def close():
289 def close():
286 """Signal that this command request is finished.
290 """Signal that this command request is finished.
287
291
288 When called, no more new commands may be issued. All outstanding
292 When called, no more new commands may be issued. All outstanding
289 commands that have previously been issued are waited on before
293 commands that have previously been issued are waited on before
290 returning. This not only includes waiting for the futures to resolve,
294 returning. This not only includes waiting for the futures to resolve,
291 but also waiting for all response data to arrive. In other words,
295 but also waiting for all response data to arrive. In other words,
292 calling this waits for all on-wire state for issued command requests
296 calling this waits for all on-wire state for issued command requests
293 to finish.
297 to finish.
294
298
295 When used as a context manager, this method is called when exiting the
299 When used as a context manager, this method is called when exiting the
296 context manager.
300 context manager.
297
301
298 This method may call ``sendcommands()`` if there are buffered commands.
302 This method may call ``sendcommands()`` if there are buffered commands.
299 """
303 """
300
304
301
305
302 class ipeerrequests(interfaceutil.Interface):
306 class ipeerrequests(interfaceutil.Interface):
303 """Interface for executing commands on a peer."""
307 """Interface for executing commands on a peer."""
304
308
305 limitedarguments = interfaceutil.Attribute(
309 limitedarguments = interfaceutil.Attribute(
306 """True if the peer cannot receive large argument value for commands."""
310 """True if the peer cannot receive large argument value for commands."""
307 )
311 )
308
312
309 def commandexecutor():
313 def commandexecutor():
310 """A context manager that resolves to an ipeercommandexecutor.
314 """A context manager that resolves to an ipeercommandexecutor.
311
315
312 The object this resolves to can be used to issue command requests
316 The object this resolves to can be used to issue command requests
313 to the peer.
317 to the peer.
314
318
315 Callers should call its ``callcommand`` method to issue command
319 Callers should call its ``callcommand`` method to issue command
316 requests.
320 requests.
317
321
318 A new executor should be obtained for each distinct set of commands
322 A new executor should be obtained for each distinct set of commands
319 (possibly just a single command) that the consumer wants to execute
323 (possibly just a single command) that the consumer wants to execute
320 as part of a single operation or round trip. This is because some
324 as part of a single operation or round trip. This is because some
321 peers are half-duplex and/or don't support persistent connections.
325 peers are half-duplex and/or don't support persistent connections.
322 e.g. in the case of HTTP peers, commands sent to an executor represent
326 e.g. in the case of HTTP peers, commands sent to an executor represent
323 a single HTTP request. While some peers may support multiple command
327 a single HTTP request. While some peers may support multiple command
324 sends over the wire per executor, consumers need to code to the least
328 sends over the wire per executor, consumers need to code to the least
325 capable peer. So it should be assumed that command executors buffer
329 capable peer. So it should be assumed that command executors buffer
326 called commands until they are told to send them and that each
330 called commands until they are told to send them and that each
327 command executor could result in a new connection or wire-level request
331 command executor could result in a new connection or wire-level request
328 being issued.
332 being issued.
329 """
333 """
330
334
331
335
332 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
336 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
333 """Unified interface for peer repositories.
337 """Unified interface for peer repositories.
334
338
335 All peer instances must conform to this interface.
339 All peer instances must conform to this interface.
336 """
340 """
337
341
338
342
339 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
343 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
340 """Unified peer interface for wire protocol version 2 peers."""
344 """Unified peer interface for wire protocol version 2 peers."""
341
345
342 apidescriptor = interfaceutil.Attribute(
346 apidescriptor = interfaceutil.Attribute(
343 """Data structure holding description of server API."""
347 """Data structure holding description of server API."""
344 )
348 )
345
349
346
350
347 @interfaceutil.implementer(ipeerbase)
351 @interfaceutil.implementer(ipeerbase)
348 class peer(object):
352 class peer(object):
349 """Base class for peer repositories."""
353 """Base class for peer repositories."""
350
354
351 limitedarguments = False
355 limitedarguments = False
352
356
353 def capable(self, name):
357 def capable(self, name):
354 caps = self.capabilities()
358 caps = self.capabilities()
355 if name in caps:
359 if name in caps:
356 return True
360 return True
357
361
358 name = b'%s=' % name
362 name = b'%s=' % name
359 for cap in caps:
363 for cap in caps:
360 if cap.startswith(name):
364 if cap.startswith(name):
361 return cap[len(name) :]
365 return cap[len(name) :]
362
366
363 return False
367 return False
364
368
365 def requirecap(self, name, purpose):
369 def requirecap(self, name, purpose):
366 if self.capable(name):
370 if self.capable(name):
367 return
371 return
368
372
369 raise error.CapabilityError(
373 raise error.CapabilityError(
370 _(
374 _(
371 b'cannot %s; remote repository does not support the '
375 b'cannot %s; remote repository does not support the '
372 b'\'%s\' capability'
376 b'\'%s\' capability'
373 )
377 )
374 % (purpose, name)
378 % (purpose, name)
375 )
379 )
376
380
377
381
378 class iverifyproblem(interfaceutil.Interface):
382 class iverifyproblem(interfaceutil.Interface):
379 """Represents a problem with the integrity of the repository.
383 """Represents a problem with the integrity of the repository.
380
384
381 Instances of this interface are emitted to describe an integrity issue
385 Instances of this interface are emitted to describe an integrity issue
382 with a repository (e.g. corrupt storage, missing data, etc).
386 with a repository (e.g. corrupt storage, missing data, etc).
383
387
384 Instances are essentially messages associated with severity.
388 Instances are essentially messages associated with severity.
385 """
389 """
386
390
387 warning = interfaceutil.Attribute(
391 warning = interfaceutil.Attribute(
388 """Message indicating a non-fatal problem."""
392 """Message indicating a non-fatal problem."""
389 )
393 )
390
394
391 error = interfaceutil.Attribute("""Message indicating a fatal problem.""")
395 error = interfaceutil.Attribute("""Message indicating a fatal problem.""")
392
396
393 node = interfaceutil.Attribute(
397 node = interfaceutil.Attribute(
394 """Revision encountering the problem.
398 """Revision encountering the problem.
395
399
396 ``None`` means the problem doesn't apply to a single revision.
400 ``None`` means the problem doesn't apply to a single revision.
397 """
401 """
398 )
402 )
399
403
400
404
401 class irevisiondelta(interfaceutil.Interface):
405 class irevisiondelta(interfaceutil.Interface):
402 """Represents a delta between one revision and another.
406 """Represents a delta between one revision and another.
403
407
404 Instances convey enough information to allow a revision to be exchanged
408 Instances convey enough information to allow a revision to be exchanged
405 with another repository.
409 with another repository.
406
410
407 Instances represent the fulltext revision data or a delta against
411 Instances represent the fulltext revision data or a delta against
408 another revision. Therefore the ``revision`` and ``delta`` attributes
412 another revision. Therefore the ``revision`` and ``delta`` attributes
409 are mutually exclusive.
413 are mutually exclusive.
410
414
411 Typically used for changegroup generation.
415 Typically used for changegroup generation.
412 """
416 """
413
417
414 node = interfaceutil.Attribute("""20 byte node of this revision.""")
418 node = interfaceutil.Attribute("""20 byte node of this revision.""")
415
419
416 p1node = interfaceutil.Attribute(
420 p1node = interfaceutil.Attribute(
417 """20 byte node of 1st parent of this revision."""
421 """20 byte node of 1st parent of this revision."""
418 )
422 )
419
423
420 p2node = interfaceutil.Attribute(
424 p2node = interfaceutil.Attribute(
421 """20 byte node of 2nd parent of this revision."""
425 """20 byte node of 2nd parent of this revision."""
422 )
426 )
423
427
424 linknode = interfaceutil.Attribute(
428 linknode = interfaceutil.Attribute(
425 """20 byte node of the changelog revision this node is linked to."""
429 """20 byte node of the changelog revision this node is linked to."""
426 )
430 )
427
431
428 flags = interfaceutil.Attribute(
432 flags = interfaceutil.Attribute(
429 """2 bytes of integer flags that apply to this revision.
433 """2 bytes of integer flags that apply to this revision.
430
434
431 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
435 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
432 """
436 """
433 )
437 )
434
438
435 basenode = interfaceutil.Attribute(
439 basenode = interfaceutil.Attribute(
436 """20 byte node of the revision this data is a delta against.
440 """20 byte node of the revision this data is a delta against.
437
441
438 ``nullid`` indicates that the revision is a full revision and not
442 ``nullid`` indicates that the revision is a full revision and not
439 a delta.
443 a delta.
440 """
444 """
441 )
445 )
442
446
443 baserevisionsize = interfaceutil.Attribute(
447 baserevisionsize = interfaceutil.Attribute(
444 """Size of base revision this delta is against.
448 """Size of base revision this delta is against.
445
449
446 May be ``None`` if ``basenode`` is ``nullid``.
450 May be ``None`` if ``basenode`` is ``nullid``.
447 """
451 """
448 )
452 )
449
453
450 revision = interfaceutil.Attribute(
454 revision = interfaceutil.Attribute(
451 """Raw fulltext of revision data for this node."""
455 """Raw fulltext of revision data for this node."""
452 )
456 )
453
457
454 delta = interfaceutil.Attribute(
458 delta = interfaceutil.Attribute(
455 """Delta between ``basenode`` and ``node``.
459 """Delta between ``basenode`` and ``node``.
456
460
457 Stored in the bdiff delta format.
461 Stored in the bdiff delta format.
458 """
462 """
459 )
463 )
460
464
461
465
462 class ifilerevisionssequence(interfaceutil.Interface):
466 class ifilerevisionssequence(interfaceutil.Interface):
463 """Contains index data for all revisions of a file.
467 """Contains index data for all revisions of a file.
464
468
465 Types implementing this behave like lists of tuples. The index
469 Types implementing this behave like lists of tuples. The index
466 in the list corresponds to the revision number. The values contain
470 in the list corresponds to the revision number. The values contain
467 index metadata.
471 index metadata.
468
472
469 The *null* revision (revision number -1) is always the last item
473 The *null* revision (revision number -1) is always the last item
470 in the index.
474 in the index.
471 """
475 """
472
476
473 def __len__():
477 def __len__():
474 """The total number of revisions."""
478 """The total number of revisions."""
475
479
476 def __getitem__(rev):
480 def __getitem__(rev):
477 """Returns the object having a specific revision number.
481 """Returns the object having a specific revision number.
478
482
479 Returns an 8-tuple with the following fields:
483 Returns an 8-tuple with the following fields:
480
484
481 offset+flags
485 offset+flags
482 Contains the offset and flags for the revision. 64-bit unsigned
486 Contains the offset and flags for the revision. 64-bit unsigned
483 integer where first 6 bytes are the offset and the next 2 bytes
487 integer where first 6 bytes are the offset and the next 2 bytes
484 are flags. The offset can be 0 if it is not used by the store.
488 are flags. The offset can be 0 if it is not used by the store.
485 compressed size
489 compressed size
486 Size of the revision data in the store. It can be 0 if it isn't
490 Size of the revision data in the store. It can be 0 if it isn't
487 needed by the store.
491 needed by the store.
488 uncompressed size
492 uncompressed size
489 Fulltext size. It can be 0 if it isn't needed by the store.
493 Fulltext size. It can be 0 if it isn't needed by the store.
490 base revision
494 base revision
491 Revision number of revision the delta for storage is encoded
495 Revision number of revision the delta for storage is encoded
492 against. -1 indicates not encoded against a base revision.
496 against. -1 indicates not encoded against a base revision.
493 link revision
497 link revision
494 Revision number of changelog revision this entry is related to.
498 Revision number of changelog revision this entry is related to.
495 p1 revision
499 p1 revision
496 Revision number of 1st parent. -1 if no 1st parent.
500 Revision number of 1st parent. -1 if no 1st parent.
497 p2 revision
501 p2 revision
498 Revision number of 2nd parent. -1 if no 1st parent.
502 Revision number of 2nd parent. -1 if no 1st parent.
499 node
503 node
500 Binary node value for this revision number.
504 Binary node value for this revision number.
501
505
502 Negative values should index off the end of the sequence. ``-1``
506 Negative values should index off the end of the sequence. ``-1``
503 should return the null revision. ``-2`` should return the most
507 should return the null revision. ``-2`` should return the most
504 recent revision.
508 recent revision.
505 """
509 """
506
510
507 def __contains__(rev):
511 def __contains__(rev):
508 """Whether a revision number exists."""
512 """Whether a revision number exists."""
509
513
510 def insert(self, i, entry):
514 def insert(self, i, entry):
511 """Add an item to the index at specific revision."""
515 """Add an item to the index at specific revision."""
512
516
513
517
514 class ifileindex(interfaceutil.Interface):
518 class ifileindex(interfaceutil.Interface):
515 """Storage interface for index data of a single file.
519 """Storage interface for index data of a single file.
516
520
517 File storage data is divided into index metadata and data storage.
521 File storage data is divided into index metadata and data storage.
518 This interface defines the index portion of the interface.
522 This interface defines the index portion of the interface.
519
523
520 The index logically consists of:
524 The index logically consists of:
521
525
522 * A mapping between revision numbers and nodes.
526 * A mapping between revision numbers and nodes.
523 * DAG data (storing and querying the relationship between nodes).
527 * DAG data (storing and querying the relationship between nodes).
524 * Metadata to facilitate storage.
528 * Metadata to facilitate storage.
525 """
529 """
526
530
527 def __len__():
531 def __len__():
528 """Obtain the number of revisions stored for this file."""
532 """Obtain the number of revisions stored for this file."""
529
533
530 def __iter__():
534 def __iter__():
531 """Iterate over revision numbers for this file."""
535 """Iterate over revision numbers for this file."""
532
536
533 def hasnode(node):
537 def hasnode(node):
534 """Returns a bool indicating if a node is known to this store.
538 """Returns a bool indicating if a node is known to this store.
535
539
536 Implementations must only return True for full, binary node values:
540 Implementations must only return True for full, binary node values:
537 hex nodes, revision numbers, and partial node matches must be
541 hex nodes, revision numbers, and partial node matches must be
538 rejected.
542 rejected.
539
543
540 The null node is never present.
544 The null node is never present.
541 """
545 """
542
546
543 def revs(start=0, stop=None):
547 def revs(start=0, stop=None):
544 """Iterate over revision numbers for this file, with control."""
548 """Iterate over revision numbers for this file, with control."""
545
549
546 def parents(node):
550 def parents(node):
547 """Returns a 2-tuple of parent nodes for a revision.
551 """Returns a 2-tuple of parent nodes for a revision.
548
552
549 Values will be ``nullid`` if the parent is empty.
553 Values will be ``nullid`` if the parent is empty.
550 """
554 """
551
555
552 def parentrevs(rev):
556 def parentrevs(rev):
553 """Like parents() but operates on revision numbers."""
557 """Like parents() but operates on revision numbers."""
554
558
555 def rev(node):
559 def rev(node):
556 """Obtain the revision number given a node.
560 """Obtain the revision number given a node.
557
561
558 Raises ``error.LookupError`` if the node is not known.
562 Raises ``error.LookupError`` if the node is not known.
559 """
563 """
560
564
561 def node(rev):
565 def node(rev):
562 """Obtain the node value given a revision number.
566 """Obtain the node value given a revision number.
563
567
564 Raises ``IndexError`` if the node is not known.
568 Raises ``IndexError`` if the node is not known.
565 """
569 """
566
570
567 def lookup(node):
571 def lookup(node):
568 """Attempt to resolve a value to a node.
572 """Attempt to resolve a value to a node.
569
573
570 Value can be a binary node, hex node, revision number, or a string
574 Value can be a binary node, hex node, revision number, or a string
571 that can be converted to an integer.
575 that can be converted to an integer.
572
576
573 Raises ``error.LookupError`` if a node could not be resolved.
577 Raises ``error.LookupError`` if a node could not be resolved.
574 """
578 """
575
579
576 def linkrev(rev):
580 def linkrev(rev):
577 """Obtain the changeset revision number a revision is linked to."""
581 """Obtain the changeset revision number a revision is linked to."""
578
582
579 def iscensored(rev):
583 def iscensored(rev):
580 """Return whether a revision's content has been censored."""
584 """Return whether a revision's content has been censored."""
581
585
582 def commonancestorsheads(node1, node2):
586 def commonancestorsheads(node1, node2):
583 """Obtain an iterable of nodes containing heads of common ancestors.
587 """Obtain an iterable of nodes containing heads of common ancestors.
584
588
585 See ``ancestor.commonancestorsheads()``.
589 See ``ancestor.commonancestorsheads()``.
586 """
590 """
587
591
588 def descendants(revs):
592 def descendants(revs):
589 """Obtain descendant revision numbers for a set of revision numbers.
593 """Obtain descendant revision numbers for a set of revision numbers.
590
594
591 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
595 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
592 """
596 """
593
597
594 def heads(start=None, stop=None):
598 def heads(start=None, stop=None):
595 """Obtain a list of nodes that are DAG heads, with control.
599 """Obtain a list of nodes that are DAG heads, with control.
596
600
597 The set of revisions examined can be limited by specifying
601 The set of revisions examined can be limited by specifying
598 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
602 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
599 iterable of nodes. DAG traversal starts at earlier revision
603 iterable of nodes. DAG traversal starts at earlier revision
600 ``start`` and iterates forward until any node in ``stop`` is
604 ``start`` and iterates forward until any node in ``stop`` is
601 encountered.
605 encountered.
602 """
606 """
603
607
604 def children(node):
608 def children(node):
605 """Obtain nodes that are children of a node.
609 """Obtain nodes that are children of a node.
606
610
607 Returns a list of nodes.
611 Returns a list of nodes.
608 """
612 """
609
613
610
614
611 class ifiledata(interfaceutil.Interface):
615 class ifiledata(interfaceutil.Interface):
612 """Storage interface for data storage of a specific file.
616 """Storage interface for data storage of a specific file.
613
617
614 This complements ``ifileindex`` and provides an interface for accessing
618 This complements ``ifileindex`` and provides an interface for accessing
615 data for a tracked file.
619 data for a tracked file.
616 """
620 """
617
621
618 def size(rev):
622 def size(rev):
619 """Obtain the fulltext size of file data.
623 """Obtain the fulltext size of file data.
620
624
621 Any metadata is excluded from size measurements.
625 Any metadata is excluded from size measurements.
622 """
626 """
623
627
624 def revision(node, raw=False):
628 def revision(node, raw=False):
625 """"Obtain fulltext data for a node.
629 """"Obtain fulltext data for a node.
626
630
627 By default, any storage transformations are applied before the data
631 By default, any storage transformations are applied before the data
628 is returned. If ``raw`` is True, non-raw storage transformations
632 is returned. If ``raw`` is True, non-raw storage transformations
629 are not applied.
633 are not applied.
630
634
631 The fulltext data may contain a header containing metadata. Most
635 The fulltext data may contain a header containing metadata. Most
632 consumers should use ``read()`` to obtain the actual file data.
636 consumers should use ``read()`` to obtain the actual file data.
633 """
637 """
634
638
635 def rawdata(node):
639 def rawdata(node):
636 """Obtain raw data for a node.
640 """Obtain raw data for a node.
637 """
641 """
638
642
639 def read(node):
643 def read(node):
640 """Resolve file fulltext data.
644 """Resolve file fulltext data.
641
645
642 This is similar to ``revision()`` except any metadata in the data
646 This is similar to ``revision()`` except any metadata in the data
643 headers is stripped.
647 headers is stripped.
644 """
648 """
645
649
646 def renamed(node):
650 def renamed(node):
647 """Obtain copy metadata for a node.
651 """Obtain copy metadata for a node.
648
652
649 Returns ``False`` if no copy metadata is stored or a 2-tuple of
653 Returns ``False`` if no copy metadata is stored or a 2-tuple of
650 (path, node) from which this revision was copied.
654 (path, node) from which this revision was copied.
651 """
655 """
652
656
653 def cmp(node, fulltext):
657 def cmp(node, fulltext):
654 """Compare fulltext to another revision.
658 """Compare fulltext to another revision.
655
659
656 Returns True if the fulltext is different from what is stored.
660 Returns True if the fulltext is different from what is stored.
657
661
658 This takes copy metadata into account.
662 This takes copy metadata into account.
659
663
660 TODO better document the copy metadata and censoring logic.
664 TODO better document the copy metadata and censoring logic.
661 """
665 """
662
666
663 def emitrevisions(
667 def emitrevisions(
664 nodes,
668 nodes,
665 nodesorder=None,
669 nodesorder=None,
666 revisiondata=False,
670 revisiondata=False,
667 assumehaveparentrevisions=False,
671 assumehaveparentrevisions=False,
668 deltamode=CG_DELTAMODE_STD,
672 deltamode=CG_DELTAMODE_STD,
669 ):
673 ):
670 """Produce ``irevisiondelta`` for revisions.
674 """Produce ``irevisiondelta`` for revisions.
671
675
672 Given an iterable of nodes, emits objects conforming to the
676 Given an iterable of nodes, emits objects conforming to the
673 ``irevisiondelta`` interface that describe revisions in storage.
677 ``irevisiondelta`` interface that describe revisions in storage.
674
678
675 This method is a generator.
679 This method is a generator.
676
680
677 The input nodes may be unordered. Implementations must ensure that a
681 The input nodes may be unordered. Implementations must ensure that a
678 node's parents are emitted before the node itself. Transitively, this
682 node's parents are emitted before the node itself. Transitively, this
679 means that a node may only be emitted once all its ancestors in
683 means that a node may only be emitted once all its ancestors in
680 ``nodes`` have also been emitted.
684 ``nodes`` have also been emitted.
681
685
682 By default, emits "index" data (the ``node``, ``p1node``, and
686 By default, emits "index" data (the ``node``, ``p1node``, and
683 ``p2node`` attributes). If ``revisiondata`` is set, revision data
687 ``p2node`` attributes). If ``revisiondata`` is set, revision data
684 will also be present on the emitted objects.
688 will also be present on the emitted objects.
685
689
686 With default argument values, implementations can choose to emit
690 With default argument values, implementations can choose to emit
687 either fulltext revision data or a delta. When emitting deltas,
691 either fulltext revision data or a delta. When emitting deltas,
688 implementations must consider whether the delta's base revision
692 implementations must consider whether the delta's base revision
689 fulltext is available to the receiver.
693 fulltext is available to the receiver.
690
694
691 The base revision fulltext is guaranteed to be available if any of
695 The base revision fulltext is guaranteed to be available if any of
692 the following are met:
696 the following are met:
693
697
694 * Its fulltext revision was emitted by this method call.
698 * Its fulltext revision was emitted by this method call.
695 * A delta for that revision was emitted by this method call.
699 * A delta for that revision was emitted by this method call.
696 * ``assumehaveparentrevisions`` is True and the base revision is a
700 * ``assumehaveparentrevisions`` is True and the base revision is a
697 parent of the node.
701 parent of the node.
698
702
699 ``nodesorder`` can be used to control the order that revisions are
703 ``nodesorder`` can be used to control the order that revisions are
700 emitted. By default, revisions can be reordered as long as they are
704 emitted. By default, revisions can be reordered as long as they are
701 in DAG topological order (see above). If the value is ``nodes``,
705 in DAG topological order (see above). If the value is ``nodes``,
702 the iteration order from ``nodes`` should be used. If the value is
706 the iteration order from ``nodes`` should be used. If the value is
703 ``storage``, then the native order from the backing storage layer
707 ``storage``, then the native order from the backing storage layer
704 is used. (Not all storage layers will have strong ordering and behavior
708 is used. (Not all storage layers will have strong ordering and behavior
705 of this mode is storage-dependent.) ``nodes`` ordering can force
709 of this mode is storage-dependent.) ``nodes`` ordering can force
706 revisions to be emitted before their ancestors, so consumers should
710 revisions to be emitted before their ancestors, so consumers should
707 use it with care.
711 use it with care.
708
712
709 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
713 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
710 be set and it is the caller's responsibility to resolve it, if needed.
714 be set and it is the caller's responsibility to resolve it, if needed.
711
715
712 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
716 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
713 all revision data should be emitted as deltas against the revision
717 all revision data should be emitted as deltas against the revision
714 emitted just prior. The initial revision should be a delta against its
718 emitted just prior. The initial revision should be a delta against its
715 1st parent.
719 1st parent.
716 """
720 """
717
721
718
722
719 class ifilemutation(interfaceutil.Interface):
723 class ifilemutation(interfaceutil.Interface):
720 """Storage interface for mutation events of a tracked file."""
724 """Storage interface for mutation events of a tracked file."""
721
725
722 def add(filedata, meta, transaction, linkrev, p1, p2):
726 def add(filedata, meta, transaction, linkrev, p1, p2):
723 """Add a new revision to the store.
727 """Add a new revision to the store.
724
728
725 Takes file data, dictionary of metadata, a transaction, linkrev,
729 Takes file data, dictionary of metadata, a transaction, linkrev,
726 and parent nodes.
730 and parent nodes.
727
731
728 Returns the node that was added.
732 Returns the node that was added.
729
733
730 May no-op if a revision matching the supplied data is already stored.
734 May no-op if a revision matching the supplied data is already stored.
731 """
735 """
732
736
733 def addrevision(
737 def addrevision(
734 revisiondata,
738 revisiondata,
735 transaction,
739 transaction,
736 linkrev,
740 linkrev,
737 p1,
741 p1,
738 p2,
742 p2,
739 node=None,
743 node=None,
740 flags=0,
744 flags=0,
741 cachedelta=None,
745 cachedelta=None,
742 ):
746 ):
743 """Add a new revision to the store.
747 """Add a new revision to the store.
744
748
745 This is similar to ``add()`` except it operates at a lower level.
749 This is similar to ``add()`` except it operates at a lower level.
746
750
747 The data passed in already contains a metadata header, if any.
751 The data passed in already contains a metadata header, if any.
748
752
749 ``node`` and ``flags`` can be used to define the expected node and
753 ``node`` and ``flags`` can be used to define the expected node and
750 the flags to use with storage. ``flags`` is a bitwise value composed
754 the flags to use with storage. ``flags`` is a bitwise value composed
751 of the various ``REVISION_FLAG_*`` constants.
755 of the various ``REVISION_FLAG_*`` constants.
752
756
753 ``add()`` is usually called when adding files from e.g. the working
757 ``add()`` is usually called when adding files from e.g. the working
754 directory. ``addrevision()`` is often called by ``add()`` and for
758 directory. ``addrevision()`` is often called by ``add()`` and for
755 scenarios where revision data has already been computed, such as when
759 scenarios where revision data has already been computed, such as when
756 applying raw data from a peer repo.
760 applying raw data from a peer repo.
757 """
761 """
758
762
759 def addgroup(
763 def addgroup(
760 deltas,
764 deltas,
761 linkmapper,
765 linkmapper,
762 transaction,
766 transaction,
763 addrevisioncb=None,
767 addrevisioncb=None,
764 maybemissingparents=False,
768 maybemissingparents=False,
765 ):
769 ):
766 """Process a series of deltas for storage.
770 """Process a series of deltas for storage.
767
771
768 ``deltas`` is an iterable of 7-tuples of
772 ``deltas`` is an iterable of 7-tuples of
769 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
773 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
770 to add.
774 to add.
771
775
772 The ``delta`` field contains ``mpatch`` data to apply to a base
776 The ``delta`` field contains ``mpatch`` data to apply to a base
773 revision, identified by ``deltabase``. The base node can be
777 revision, identified by ``deltabase``. The base node can be
774 ``nullid``, in which case the header from the delta can be ignored
778 ``nullid``, in which case the header from the delta can be ignored
775 and the delta used as the fulltext.
779 and the delta used as the fulltext.
776
780
777 ``addrevisioncb`` should be called for each node as it is committed.
781 ``addrevisioncb`` should be called for each node as it is committed.
778
782
779 ``maybemissingparents`` is a bool indicating whether the incoming
783 ``maybemissingparents`` is a bool indicating whether the incoming
780 data may reference parents/ancestor revisions that aren't present.
784 data may reference parents/ancestor revisions that aren't present.
781 This flag is set when receiving data into a "shallow" store that
785 This flag is set when receiving data into a "shallow" store that
782 doesn't hold all history.
786 doesn't hold all history.
783
787
784 Returns a list of nodes that were processed. A node will be in the list
788 Returns a list of nodes that were processed. A node will be in the list
785 even if it existed in the store previously.
789 even if it existed in the store previously.
786 """
790 """
787
791
788 def censorrevision(tr, node, tombstone=b''):
792 def censorrevision(tr, node, tombstone=b''):
789 """Remove the content of a single revision.
793 """Remove the content of a single revision.
790
794
791 The specified ``node`` will have its content purged from storage.
795 The specified ``node`` will have its content purged from storage.
792 Future attempts to access the revision data for this node will
796 Future attempts to access the revision data for this node will
793 result in failure.
797 result in failure.
794
798
795 A ``tombstone`` message can optionally be stored. This message may be
799 A ``tombstone`` message can optionally be stored. This message may be
796 displayed to users when they attempt to access the missing revision
800 displayed to users when they attempt to access the missing revision
797 data.
801 data.
798
802
799 Storage backends may have stored deltas against the previous content
803 Storage backends may have stored deltas against the previous content
800 in this revision. As part of censoring a revision, these storage
804 in this revision. As part of censoring a revision, these storage
801 backends are expected to rewrite any internally stored deltas such
805 backends are expected to rewrite any internally stored deltas such
802 that they no longer reference the deleted content.
806 that they no longer reference the deleted content.
803 """
807 """
804
808
805 def getstrippoint(minlink):
809 def getstrippoint(minlink):
806 """Find the minimum revision that must be stripped to strip a linkrev.
810 """Find the minimum revision that must be stripped to strip a linkrev.
807
811
808 Returns a 2-tuple containing the minimum revision number and a set
812 Returns a 2-tuple containing the minimum revision number and a set
809 of all revisions numbers that would be broken by this strip.
813 of all revisions numbers that would be broken by this strip.
810
814
811 TODO this is highly revlog centric and should be abstracted into
815 TODO this is highly revlog centric and should be abstracted into
812 a higher-level deletion API. ``repair.strip()`` relies on this.
816 a higher-level deletion API. ``repair.strip()`` relies on this.
813 """
817 """
814
818
815 def strip(minlink, transaction):
819 def strip(minlink, transaction):
816 """Remove storage of items starting at a linkrev.
820 """Remove storage of items starting at a linkrev.
817
821
818 This uses ``getstrippoint()`` to determine the first node to remove.
822 This uses ``getstrippoint()`` to determine the first node to remove.
819 Then it effectively truncates storage for all revisions after that.
823 Then it effectively truncates storage for all revisions after that.
820
824
821 TODO this is highly revlog centric and should be abstracted into a
825 TODO this is highly revlog centric and should be abstracted into a
822 higher-level deletion API.
826 higher-level deletion API.
823 """
827 """
824
828
825
829
826 class ifilestorage(ifileindex, ifiledata, ifilemutation):
830 class ifilestorage(ifileindex, ifiledata, ifilemutation):
827 """Complete storage interface for a single tracked file."""
831 """Complete storage interface for a single tracked file."""
828
832
829 def files():
833 def files():
830 """Obtain paths that are backing storage for this file.
834 """Obtain paths that are backing storage for this file.
831
835
832 TODO this is used heavily by verify code and there should probably
836 TODO this is used heavily by verify code and there should probably
833 be a better API for that.
837 be a better API for that.
834 """
838 """
835
839
836 def storageinfo(
840 def storageinfo(
837 exclusivefiles=False,
841 exclusivefiles=False,
838 sharedfiles=False,
842 sharedfiles=False,
839 revisionscount=False,
843 revisionscount=False,
840 trackedsize=False,
844 trackedsize=False,
841 storedsize=False,
845 storedsize=False,
842 ):
846 ):
843 """Obtain information about storage for this file's data.
847 """Obtain information about storage for this file's data.
844
848
845 Returns a dict describing storage for this tracked path. The keys
849 Returns a dict describing storage for this tracked path. The keys
846 in the dict map to arguments of the same. The arguments are bools
850 in the dict map to arguments of the same. The arguments are bools
847 indicating whether to calculate and obtain that data.
851 indicating whether to calculate and obtain that data.
848
852
849 exclusivefiles
853 exclusivefiles
850 Iterable of (vfs, path) describing files that are exclusively
854 Iterable of (vfs, path) describing files that are exclusively
851 used to back storage for this tracked path.
855 used to back storage for this tracked path.
852
856
853 sharedfiles
857 sharedfiles
854 Iterable of (vfs, path) describing files that are used to back
858 Iterable of (vfs, path) describing files that are used to back
855 storage for this tracked path. Those files may also provide storage
859 storage for this tracked path. Those files may also provide storage
856 for other stored entities.
860 for other stored entities.
857
861
858 revisionscount
862 revisionscount
859 Number of revisions available for retrieval.
863 Number of revisions available for retrieval.
860
864
861 trackedsize
865 trackedsize
862 Total size in bytes of all tracked revisions. This is a sum of the
866 Total size in bytes of all tracked revisions. This is a sum of the
863 length of the fulltext of all revisions.
867 length of the fulltext of all revisions.
864
868
865 storedsize
869 storedsize
866 Total size in bytes used to store data for all tracked revisions.
870 Total size in bytes used to store data for all tracked revisions.
867 This is commonly less than ``trackedsize`` due to internal usage
871 This is commonly less than ``trackedsize`` due to internal usage
868 of deltas rather than fulltext revisions.
872 of deltas rather than fulltext revisions.
869
873
870 Not all storage backends may support all queries are have a reasonable
874 Not all storage backends may support all queries are have a reasonable
871 value to use. In that case, the value should be set to ``None`` and
875 value to use. In that case, the value should be set to ``None`` and
872 callers are expected to handle this special value.
876 callers are expected to handle this special value.
873 """
877 """
874
878
875 def verifyintegrity(state):
879 def verifyintegrity(state):
876 """Verifies the integrity of file storage.
880 """Verifies the integrity of file storage.
877
881
878 ``state`` is a dict holding state of the verifier process. It can be
882 ``state`` is a dict holding state of the verifier process. It can be
879 used to communicate data between invocations of multiple storage
883 used to communicate data between invocations of multiple storage
880 primitives.
884 primitives.
881
885
882 If individual revisions cannot have their revision content resolved,
886 If individual revisions cannot have their revision content resolved,
883 the method is expected to set the ``skipread`` key to a set of nodes
887 the method is expected to set the ``skipread`` key to a set of nodes
884 that encountered problems. If set, the method can also add the node(s)
888 that encountered problems. If set, the method can also add the node(s)
885 to ``safe_renamed`` in order to indicate nodes that may perform the
889 to ``safe_renamed`` in order to indicate nodes that may perform the
886 rename checks with currently accessible data.
890 rename checks with currently accessible data.
887
891
888 The method yields objects conforming to the ``iverifyproblem``
892 The method yields objects conforming to the ``iverifyproblem``
889 interface.
893 interface.
890 """
894 """
891
895
892
896
893 class idirs(interfaceutil.Interface):
897 class idirs(interfaceutil.Interface):
894 """Interface representing a collection of directories from paths.
898 """Interface representing a collection of directories from paths.
895
899
896 This interface is essentially a derived data structure representing
900 This interface is essentially a derived data structure representing
897 directories from a collection of paths.
901 directories from a collection of paths.
898 """
902 """
899
903
900 def addpath(path):
904 def addpath(path):
901 """Add a path to the collection.
905 """Add a path to the collection.
902
906
903 All directories in the path will be added to the collection.
907 All directories in the path will be added to the collection.
904 """
908 """
905
909
906 def delpath(path):
910 def delpath(path):
907 """Remove a path from the collection.
911 """Remove a path from the collection.
908
912
909 If the removal was the last path in a particular directory, the
913 If the removal was the last path in a particular directory, the
910 directory is removed from the collection.
914 directory is removed from the collection.
911 """
915 """
912
916
913 def __iter__():
917 def __iter__():
914 """Iterate over the directories in this collection of paths."""
918 """Iterate over the directories in this collection of paths."""
915
919
916 def __contains__(path):
920 def __contains__(path):
917 """Whether a specific directory is in this collection."""
921 """Whether a specific directory is in this collection."""
918
922
919
923
920 class imanifestdict(interfaceutil.Interface):
924 class imanifestdict(interfaceutil.Interface):
921 """Interface representing a manifest data structure.
925 """Interface representing a manifest data structure.
922
926
923 A manifest is effectively a dict mapping paths to entries. Each entry
927 A manifest is effectively a dict mapping paths to entries. Each entry
924 consists of a binary node and extra flags affecting that entry.
928 consists of a binary node and extra flags affecting that entry.
925 """
929 """
926
930
927 def __getitem__(path):
931 def __getitem__(path):
928 """Returns the binary node value for a path in the manifest.
932 """Returns the binary node value for a path in the manifest.
929
933
930 Raises ``KeyError`` if the path does not exist in the manifest.
934 Raises ``KeyError`` if the path does not exist in the manifest.
931
935
932 Equivalent to ``self.find(path)[0]``.
936 Equivalent to ``self.find(path)[0]``.
933 """
937 """
934
938
935 def find(path):
939 def find(path):
936 """Returns the entry for a path in the manifest.
940 """Returns the entry for a path in the manifest.
937
941
938 Returns a 2-tuple of (node, flags).
942 Returns a 2-tuple of (node, flags).
939
943
940 Raises ``KeyError`` if the path does not exist in the manifest.
944 Raises ``KeyError`` if the path does not exist in the manifest.
941 """
945 """
942
946
943 def __len__():
947 def __len__():
944 """Return the number of entries in the manifest."""
948 """Return the number of entries in the manifest."""
945
949
946 def __nonzero__():
950 def __nonzero__():
947 """Returns True if the manifest has entries, False otherwise."""
951 """Returns True if the manifest has entries, False otherwise."""
948
952
949 __bool__ = __nonzero__
953 __bool__ = __nonzero__
950
954
951 def __setitem__(path, node):
955 def __setitem__(path, node):
952 """Define the node value for a path in the manifest.
956 """Define the node value for a path in the manifest.
953
957
954 If the path is already in the manifest, its flags will be copied to
958 If the path is already in the manifest, its flags will be copied to
955 the new entry.
959 the new entry.
956 """
960 """
957
961
958 def __contains__(path):
962 def __contains__(path):
959 """Whether a path exists in the manifest."""
963 """Whether a path exists in the manifest."""
960
964
961 def __delitem__(path):
965 def __delitem__(path):
962 """Remove a path from the manifest.
966 """Remove a path from the manifest.
963
967
964 Raises ``KeyError`` if the path is not in the manifest.
968 Raises ``KeyError`` if the path is not in the manifest.
965 """
969 """
966
970
967 def __iter__():
971 def __iter__():
968 """Iterate over paths in the manifest."""
972 """Iterate over paths in the manifest."""
969
973
970 def iterkeys():
974 def iterkeys():
971 """Iterate over paths in the manifest."""
975 """Iterate over paths in the manifest."""
972
976
973 def keys():
977 def keys():
974 """Obtain a list of paths in the manifest."""
978 """Obtain a list of paths in the manifest."""
975
979
976 def filesnotin(other, match=None):
980 def filesnotin(other, match=None):
977 """Obtain the set of paths in this manifest but not in another.
981 """Obtain the set of paths in this manifest but not in another.
978
982
979 ``match`` is an optional matcher function to be applied to both
983 ``match`` is an optional matcher function to be applied to both
980 manifests.
984 manifests.
981
985
982 Returns a set of paths.
986 Returns a set of paths.
983 """
987 """
984
988
985 def dirs():
989 def dirs():
986 """Returns an object implementing the ``idirs`` interface."""
990 """Returns an object implementing the ``idirs`` interface."""
987
991
988 def hasdir(dir):
992 def hasdir(dir):
989 """Returns a bool indicating if a directory is in this manifest."""
993 """Returns a bool indicating if a directory is in this manifest."""
990
994
991 def walk(match):
995 def walk(match):
992 """Generator of paths in manifest satisfying a matcher.
996 """Generator of paths in manifest satisfying a matcher.
993
997
994 If the matcher has explicit files listed and they don't exist in
998 If the matcher has explicit files listed and they don't exist in
995 the manifest, ``match.bad()`` is called for each missing file.
999 the manifest, ``match.bad()`` is called for each missing file.
996 """
1000 """
997
1001
998 def diff(other, match=None, clean=False):
1002 def diff(other, match=None, clean=False):
999 """Find differences between this manifest and another.
1003 """Find differences between this manifest and another.
1000
1004
1001 This manifest is compared to ``other``.
1005 This manifest is compared to ``other``.
1002
1006
1003 If ``match`` is provided, the two manifests are filtered against this
1007 If ``match`` is provided, the two manifests are filtered against this
1004 matcher and only entries satisfying the matcher are compared.
1008 matcher and only entries satisfying the matcher are compared.
1005
1009
1006 If ``clean`` is True, unchanged files are included in the returned
1010 If ``clean`` is True, unchanged files are included in the returned
1007 object.
1011 object.
1008
1012
1009 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
1013 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
1010 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
1014 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
1011 represents the node and flags for this manifest and ``(node2, flag2)``
1015 represents the node and flags for this manifest and ``(node2, flag2)``
1012 are the same for the other manifest.
1016 are the same for the other manifest.
1013 """
1017 """
1014
1018
1015 def setflag(path, flag):
1019 def setflag(path, flag):
1016 """Set the flag value for a given path.
1020 """Set the flag value for a given path.
1017
1021
1018 Raises ``KeyError`` if the path is not already in the manifest.
1022 Raises ``KeyError`` if the path is not already in the manifest.
1019 """
1023 """
1020
1024
1021 def get(path, default=None):
1025 def get(path, default=None):
1022 """Obtain the node value for a path or a default value if missing."""
1026 """Obtain the node value for a path or a default value if missing."""
1023
1027
1024 def flags(path):
1028 def flags(path):
1025 """Return the flags value for a path (default: empty bytestring)."""
1029 """Return the flags value for a path (default: empty bytestring)."""
1026
1030
1027 def copy():
1031 def copy():
1028 """Return a copy of this manifest."""
1032 """Return a copy of this manifest."""
1029
1033
1030 def items():
1034 def items():
1031 """Returns an iterable of (path, node) for items in this manifest."""
1035 """Returns an iterable of (path, node) for items in this manifest."""
1032
1036
1033 def iteritems():
1037 def iteritems():
1034 """Identical to items()."""
1038 """Identical to items()."""
1035
1039
1036 def iterentries():
1040 def iterentries():
1037 """Returns an iterable of (path, node, flags) for this manifest.
1041 """Returns an iterable of (path, node, flags) for this manifest.
1038
1042
1039 Similar to ``iteritems()`` except items are a 3-tuple and include
1043 Similar to ``iteritems()`` except items are a 3-tuple and include
1040 flags.
1044 flags.
1041 """
1045 """
1042
1046
1043 def text():
1047 def text():
1044 """Obtain the raw data representation for this manifest.
1048 """Obtain the raw data representation for this manifest.
1045
1049
1046 Result is used to create a manifest revision.
1050 Result is used to create a manifest revision.
1047 """
1051 """
1048
1052
1049 def fastdelta(base, changes):
1053 def fastdelta(base, changes):
1050 """Obtain a delta between this manifest and another given changes.
1054 """Obtain a delta between this manifest and another given changes.
1051
1055
1052 ``base`` in the raw data representation for another manifest.
1056 ``base`` in the raw data representation for another manifest.
1053
1057
1054 ``changes`` is an iterable of ``(path, to_delete)``.
1058 ``changes`` is an iterable of ``(path, to_delete)``.
1055
1059
1056 Returns a 2-tuple containing ``bytearray(self.text())`` and the
1060 Returns a 2-tuple containing ``bytearray(self.text())`` and the
1057 delta between ``base`` and this manifest.
1061 delta between ``base`` and this manifest.
1058
1062
1059 If this manifest implementation can't support ``fastdelta()``,
1063 If this manifest implementation can't support ``fastdelta()``,
1060 raise ``mercurial.manifest.FastdeltaUnavailable``.
1064 raise ``mercurial.manifest.FastdeltaUnavailable``.
1061 """
1065 """
1062
1066
1063
1067
1064 class imanifestrevisionbase(interfaceutil.Interface):
1068 class imanifestrevisionbase(interfaceutil.Interface):
1065 """Base interface representing a single revision of a manifest.
1069 """Base interface representing a single revision of a manifest.
1066
1070
1067 Should not be used as a primary interface: should always be inherited
1071 Should not be used as a primary interface: should always be inherited
1068 as part of a larger interface.
1072 as part of a larger interface.
1069 """
1073 """
1070
1074
1071 def copy():
1075 def copy():
1072 """Obtain a copy of this manifest instance.
1076 """Obtain a copy of this manifest instance.
1073
1077
1074 Returns an object conforming to the ``imanifestrevisionwritable``
1078 Returns an object conforming to the ``imanifestrevisionwritable``
1075 interface. The instance will be associated with the same
1079 interface. The instance will be associated with the same
1076 ``imanifestlog`` collection as this instance.
1080 ``imanifestlog`` collection as this instance.
1077 """
1081 """
1078
1082
1079 def read():
1083 def read():
1080 """Obtain the parsed manifest data structure.
1084 """Obtain the parsed manifest data structure.
1081
1085
1082 The returned object conforms to the ``imanifestdict`` interface.
1086 The returned object conforms to the ``imanifestdict`` interface.
1083 """
1087 """
1084
1088
1085
1089
1086 class imanifestrevisionstored(imanifestrevisionbase):
1090 class imanifestrevisionstored(imanifestrevisionbase):
1087 """Interface representing a manifest revision committed to storage."""
1091 """Interface representing a manifest revision committed to storage."""
1088
1092
1089 def node():
1093 def node():
1090 """The binary node for this manifest."""
1094 """The binary node for this manifest."""
1091
1095
1092 parents = interfaceutil.Attribute(
1096 parents = interfaceutil.Attribute(
1093 """List of binary nodes that are parents for this manifest revision."""
1097 """List of binary nodes that are parents for this manifest revision."""
1094 )
1098 )
1095
1099
1096 def readdelta(shallow=False):
1100 def readdelta(shallow=False):
1097 """Obtain the manifest data structure representing changes from parent.
1101 """Obtain the manifest data structure representing changes from parent.
1098
1102
1099 This manifest is compared to its 1st parent. A new manifest representing
1103 This manifest is compared to its 1st parent. A new manifest representing
1100 those differences is constructed.
1104 those differences is constructed.
1101
1105
1102 The returned object conforms to the ``imanifestdict`` interface.
1106 The returned object conforms to the ``imanifestdict`` interface.
1103 """
1107 """
1104
1108
1105 def readfast(shallow=False):
1109 def readfast(shallow=False):
1106 """Calls either ``read()`` or ``readdelta()``.
1110 """Calls either ``read()`` or ``readdelta()``.
1107
1111
1108 The faster of the two options is called.
1112 The faster of the two options is called.
1109 """
1113 """
1110
1114
1111 def find(key):
1115 def find(key):
1112 """Calls self.read().find(key)``.
1116 """Calls self.read().find(key)``.
1113
1117
1114 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1118 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1115 """
1119 """
1116
1120
1117
1121
1118 class imanifestrevisionwritable(imanifestrevisionbase):
1122 class imanifestrevisionwritable(imanifestrevisionbase):
1119 """Interface representing a manifest revision that can be committed."""
1123 """Interface representing a manifest revision that can be committed."""
1120
1124
1121 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1125 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1122 """Add this revision to storage.
1126 """Add this revision to storage.
1123
1127
1124 Takes a transaction object, the changeset revision number it will
1128 Takes a transaction object, the changeset revision number it will
1125 be associated with, its parent nodes, and lists of added and
1129 be associated with, its parent nodes, and lists of added and
1126 removed paths.
1130 removed paths.
1127
1131
1128 If match is provided, storage can choose not to inspect or write out
1132 If match is provided, storage can choose not to inspect or write out
1129 items that do not match. Storage is still required to be able to provide
1133 items that do not match. Storage is still required to be able to provide
1130 the full manifest in the future for any directories written (these
1134 the full manifest in the future for any directories written (these
1131 manifests should not be "narrowed on disk").
1135 manifests should not be "narrowed on disk").
1132
1136
1133 Returns the binary node of the created revision.
1137 Returns the binary node of the created revision.
1134 """
1138 """
1135
1139
1136
1140
1137 class imanifeststorage(interfaceutil.Interface):
1141 class imanifeststorage(interfaceutil.Interface):
1138 """Storage interface for manifest data."""
1142 """Storage interface for manifest data."""
1139
1143
1140 tree = interfaceutil.Attribute(
1144 tree = interfaceutil.Attribute(
1141 """The path to the directory this manifest tracks.
1145 """The path to the directory this manifest tracks.
1142
1146
1143 The empty bytestring represents the root manifest.
1147 The empty bytestring represents the root manifest.
1144 """
1148 """
1145 )
1149 )
1146
1150
1147 index = interfaceutil.Attribute(
1151 index = interfaceutil.Attribute(
1148 """An ``ifilerevisionssequence`` instance."""
1152 """An ``ifilerevisionssequence`` instance."""
1149 )
1153 )
1150
1154
1151 indexfile = interfaceutil.Attribute(
1155 indexfile = interfaceutil.Attribute(
1152 """Path of revlog index file.
1156 """Path of revlog index file.
1153
1157
1154 TODO this is revlog specific and should not be exposed.
1158 TODO this is revlog specific and should not be exposed.
1155 """
1159 """
1156 )
1160 )
1157
1161
1158 opener = interfaceutil.Attribute(
1162 opener = interfaceutil.Attribute(
1159 """VFS opener to use to access underlying files used for storage.
1163 """VFS opener to use to access underlying files used for storage.
1160
1164
1161 TODO this is revlog specific and should not be exposed.
1165 TODO this is revlog specific and should not be exposed.
1162 """
1166 """
1163 )
1167 )
1164
1168
1165 version = interfaceutil.Attribute(
1169 version = interfaceutil.Attribute(
1166 """Revlog version number.
1170 """Revlog version number.
1167
1171
1168 TODO this is revlog specific and should not be exposed.
1172 TODO this is revlog specific and should not be exposed.
1169 """
1173 """
1170 )
1174 )
1171
1175
1172 _generaldelta = interfaceutil.Attribute(
1176 _generaldelta = interfaceutil.Attribute(
1173 """Whether generaldelta storage is being used.
1177 """Whether generaldelta storage is being used.
1174
1178
1175 TODO this is revlog specific and should not be exposed.
1179 TODO this is revlog specific and should not be exposed.
1176 """
1180 """
1177 )
1181 )
1178
1182
1179 fulltextcache = interfaceutil.Attribute(
1183 fulltextcache = interfaceutil.Attribute(
1180 """Dict with cache of fulltexts.
1184 """Dict with cache of fulltexts.
1181
1185
1182 TODO this doesn't feel appropriate for the storage interface.
1186 TODO this doesn't feel appropriate for the storage interface.
1183 """
1187 """
1184 )
1188 )
1185
1189
1186 def __len__():
1190 def __len__():
1187 """Obtain the number of revisions stored for this manifest."""
1191 """Obtain the number of revisions stored for this manifest."""
1188
1192
1189 def __iter__():
1193 def __iter__():
1190 """Iterate over revision numbers for this manifest."""
1194 """Iterate over revision numbers for this manifest."""
1191
1195
1192 def rev(node):
1196 def rev(node):
1193 """Obtain the revision number given a binary node.
1197 """Obtain the revision number given a binary node.
1194
1198
1195 Raises ``error.LookupError`` if the node is not known.
1199 Raises ``error.LookupError`` if the node is not known.
1196 """
1200 """
1197
1201
1198 def node(rev):
1202 def node(rev):
1199 """Obtain the node value given a revision number.
1203 """Obtain the node value given a revision number.
1200
1204
1201 Raises ``error.LookupError`` if the revision is not known.
1205 Raises ``error.LookupError`` if the revision is not known.
1202 """
1206 """
1203
1207
1204 def lookup(value):
1208 def lookup(value):
1205 """Attempt to resolve a value to a node.
1209 """Attempt to resolve a value to a node.
1206
1210
1207 Value can be a binary node, hex node, revision number, or a bytes
1211 Value can be a binary node, hex node, revision number, or a bytes
1208 that can be converted to an integer.
1212 that can be converted to an integer.
1209
1213
1210 Raises ``error.LookupError`` if a ndoe could not be resolved.
1214 Raises ``error.LookupError`` if a ndoe could not be resolved.
1211 """
1215 """
1212
1216
1213 def parents(node):
1217 def parents(node):
1214 """Returns a 2-tuple of parent nodes for a node.
1218 """Returns a 2-tuple of parent nodes for a node.
1215
1219
1216 Values will be ``nullid`` if the parent is empty.
1220 Values will be ``nullid`` if the parent is empty.
1217 """
1221 """
1218
1222
1219 def parentrevs(rev):
1223 def parentrevs(rev):
1220 """Like parents() but operates on revision numbers."""
1224 """Like parents() but operates on revision numbers."""
1221
1225
1222 def linkrev(rev):
1226 def linkrev(rev):
1223 """Obtain the changeset revision number a revision is linked to."""
1227 """Obtain the changeset revision number a revision is linked to."""
1224
1228
1225 def revision(node, _df=None, raw=False):
1229 def revision(node, _df=None, raw=False):
1226 """Obtain fulltext data for a node."""
1230 """Obtain fulltext data for a node."""
1227
1231
1228 def rawdata(node, _df=None):
1232 def rawdata(node, _df=None):
1229 """Obtain raw data for a node."""
1233 """Obtain raw data for a node."""
1230
1234
1231 def revdiff(rev1, rev2):
1235 def revdiff(rev1, rev2):
1232 """Obtain a delta between two revision numbers.
1236 """Obtain a delta between two revision numbers.
1233
1237
1234 The returned data is the result of ``bdiff.bdiff()`` on the raw
1238 The returned data is the result of ``bdiff.bdiff()`` on the raw
1235 revision data.
1239 revision data.
1236 """
1240 """
1237
1241
1238 def cmp(node, fulltext):
1242 def cmp(node, fulltext):
1239 """Compare fulltext to another revision.
1243 """Compare fulltext to another revision.
1240
1244
1241 Returns True if the fulltext is different from what is stored.
1245 Returns True if the fulltext is different from what is stored.
1242 """
1246 """
1243
1247
1244 def emitrevisions(
1248 def emitrevisions(
1245 nodes,
1249 nodes,
1246 nodesorder=None,
1250 nodesorder=None,
1247 revisiondata=False,
1251 revisiondata=False,
1248 assumehaveparentrevisions=False,
1252 assumehaveparentrevisions=False,
1249 ):
1253 ):
1250 """Produce ``irevisiondelta`` describing revisions.
1254 """Produce ``irevisiondelta`` describing revisions.
1251
1255
1252 See the documentation for ``ifiledata`` for more.
1256 See the documentation for ``ifiledata`` for more.
1253 """
1257 """
1254
1258
1255 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1259 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1256 """Process a series of deltas for storage.
1260 """Process a series of deltas for storage.
1257
1261
1258 See the documentation in ``ifilemutation`` for more.
1262 See the documentation in ``ifilemutation`` for more.
1259 """
1263 """
1260
1264
1261 def rawsize(rev):
1265 def rawsize(rev):
1262 """Obtain the size of tracked data.
1266 """Obtain the size of tracked data.
1263
1267
1264 Is equivalent to ``len(m.rawdata(node))``.
1268 Is equivalent to ``len(m.rawdata(node))``.
1265
1269
1266 TODO this method is only used by upgrade code and may be removed.
1270 TODO this method is only used by upgrade code and may be removed.
1267 """
1271 """
1268
1272
1269 def getstrippoint(minlink):
1273 def getstrippoint(minlink):
1270 """Find minimum revision that must be stripped to strip a linkrev.
1274 """Find minimum revision that must be stripped to strip a linkrev.
1271
1275
1272 See the documentation in ``ifilemutation`` for more.
1276 See the documentation in ``ifilemutation`` for more.
1273 """
1277 """
1274
1278
1275 def strip(minlink, transaction):
1279 def strip(minlink, transaction):
1276 """Remove storage of items starting at a linkrev.
1280 """Remove storage of items starting at a linkrev.
1277
1281
1278 See the documentation in ``ifilemutation`` for more.
1282 See the documentation in ``ifilemutation`` for more.
1279 """
1283 """
1280
1284
1281 def checksize():
1285 def checksize():
1282 """Obtain the expected sizes of backing files.
1286 """Obtain the expected sizes of backing files.
1283
1287
1284 TODO this is used by verify and it should not be part of the interface.
1288 TODO this is used by verify and it should not be part of the interface.
1285 """
1289 """
1286
1290
1287 def files():
1291 def files():
1288 """Obtain paths that are backing storage for this manifest.
1292 """Obtain paths that are backing storage for this manifest.
1289
1293
1290 TODO this is used by verify and there should probably be a better API
1294 TODO this is used by verify and there should probably be a better API
1291 for this functionality.
1295 for this functionality.
1292 """
1296 """
1293
1297
1294 def deltaparent(rev):
1298 def deltaparent(rev):
1295 """Obtain the revision that a revision is delta'd against.
1299 """Obtain the revision that a revision is delta'd against.
1296
1300
1297 TODO delta encoding is an implementation detail of storage and should
1301 TODO delta encoding is an implementation detail of storage and should
1298 not be exposed to the storage interface.
1302 not be exposed to the storage interface.
1299 """
1303 """
1300
1304
1301 def clone(tr, dest, **kwargs):
1305 def clone(tr, dest, **kwargs):
1302 """Clone this instance to another."""
1306 """Clone this instance to another."""
1303
1307
1304 def clearcaches(clear_persisted_data=False):
1308 def clearcaches(clear_persisted_data=False):
1305 """Clear any caches associated with this instance."""
1309 """Clear any caches associated with this instance."""
1306
1310
1307 def dirlog(d):
1311 def dirlog(d):
1308 """Obtain a manifest storage instance for a tree."""
1312 """Obtain a manifest storage instance for a tree."""
1309
1313
1310 def add(
1314 def add(
1311 m, transaction, link, p1, p2, added, removed, readtree=None, match=None
1315 m, transaction, link, p1, p2, added, removed, readtree=None, match=None
1312 ):
1316 ):
1313 """Add a revision to storage.
1317 """Add a revision to storage.
1314
1318
1315 ``m`` is an object conforming to ``imanifestdict``.
1319 ``m`` is an object conforming to ``imanifestdict``.
1316
1320
1317 ``link`` is the linkrev revision number.
1321 ``link`` is the linkrev revision number.
1318
1322
1319 ``p1`` and ``p2`` are the parent revision numbers.
1323 ``p1`` and ``p2`` are the parent revision numbers.
1320
1324
1321 ``added`` and ``removed`` are iterables of added and removed paths,
1325 ``added`` and ``removed`` are iterables of added and removed paths,
1322 respectively.
1326 respectively.
1323
1327
1324 ``readtree`` is a function that can be used to read the child tree(s)
1328 ``readtree`` is a function that can be used to read the child tree(s)
1325 when recursively writing the full tree structure when using
1329 when recursively writing the full tree structure when using
1326 treemanifets.
1330 treemanifets.
1327
1331
1328 ``match`` is a matcher that can be used to hint to storage that not all
1332 ``match`` is a matcher that can be used to hint to storage that not all
1329 paths must be inspected; this is an optimization and can be safely
1333 paths must be inspected; this is an optimization and can be safely
1330 ignored. Note that the storage must still be able to reproduce a full
1334 ignored. Note that the storage must still be able to reproduce a full
1331 manifest including files that did not match.
1335 manifest including files that did not match.
1332 """
1336 """
1333
1337
1334 def storageinfo(
1338 def storageinfo(
1335 exclusivefiles=False,
1339 exclusivefiles=False,
1336 sharedfiles=False,
1340 sharedfiles=False,
1337 revisionscount=False,
1341 revisionscount=False,
1338 trackedsize=False,
1342 trackedsize=False,
1339 storedsize=False,
1343 storedsize=False,
1340 ):
1344 ):
1341 """Obtain information about storage for this manifest's data.
1345 """Obtain information about storage for this manifest's data.
1342
1346
1343 See ``ifilestorage.storageinfo()`` for a description of this method.
1347 See ``ifilestorage.storageinfo()`` for a description of this method.
1344 This one behaves the same way, except for manifest data.
1348 This one behaves the same way, except for manifest data.
1345 """
1349 """
1346
1350
1347
1351
1348 class imanifestlog(interfaceutil.Interface):
1352 class imanifestlog(interfaceutil.Interface):
1349 """Interface representing a collection of manifest snapshots.
1353 """Interface representing a collection of manifest snapshots.
1350
1354
1351 Represents the root manifest in a repository.
1355 Represents the root manifest in a repository.
1352
1356
1353 Also serves as a means to access nested tree manifests and to cache
1357 Also serves as a means to access nested tree manifests and to cache
1354 tree manifests.
1358 tree manifests.
1355 """
1359 """
1356
1360
1357 def __getitem__(node):
1361 def __getitem__(node):
1358 """Obtain a manifest instance for a given binary node.
1362 """Obtain a manifest instance for a given binary node.
1359
1363
1360 Equivalent to calling ``self.get('', node)``.
1364 Equivalent to calling ``self.get('', node)``.
1361
1365
1362 The returned object conforms to the ``imanifestrevisionstored``
1366 The returned object conforms to the ``imanifestrevisionstored``
1363 interface.
1367 interface.
1364 """
1368 """
1365
1369
1366 def get(tree, node, verify=True):
1370 def get(tree, node, verify=True):
1367 """Retrieve the manifest instance for a given directory and binary node.
1371 """Retrieve the manifest instance for a given directory and binary node.
1368
1372
1369 ``node`` always refers to the node of the root manifest (which will be
1373 ``node`` always refers to the node of the root manifest (which will be
1370 the only manifest if flat manifests are being used).
1374 the only manifest if flat manifests are being used).
1371
1375
1372 If ``tree`` is the empty string, the root manifest is returned.
1376 If ``tree`` is the empty string, the root manifest is returned.
1373 Otherwise the manifest for the specified directory will be returned
1377 Otherwise the manifest for the specified directory will be returned
1374 (requires tree manifests).
1378 (requires tree manifests).
1375
1379
1376 If ``verify`` is True, ``LookupError`` is raised if the node is not
1380 If ``verify`` is True, ``LookupError`` is raised if the node is not
1377 known.
1381 known.
1378
1382
1379 The returned object conforms to the ``imanifestrevisionstored``
1383 The returned object conforms to the ``imanifestrevisionstored``
1380 interface.
1384 interface.
1381 """
1385 """
1382
1386
1383 def getstorage(tree):
1387 def getstorage(tree):
1384 """Retrieve an interface to storage for a particular tree.
1388 """Retrieve an interface to storage for a particular tree.
1385
1389
1386 If ``tree`` is the empty bytestring, storage for the root manifest will
1390 If ``tree`` is the empty bytestring, storage for the root manifest will
1387 be returned. Otherwise storage for a tree manifest is returned.
1391 be returned. Otherwise storage for a tree manifest is returned.
1388
1392
1389 TODO formalize interface for returned object.
1393 TODO formalize interface for returned object.
1390 """
1394 """
1391
1395
1392 def clearcaches():
1396 def clearcaches():
1393 """Clear caches associated with this collection."""
1397 """Clear caches associated with this collection."""
1394
1398
1395 def rev(node):
1399 def rev(node):
1396 """Obtain the revision number for a binary node.
1400 """Obtain the revision number for a binary node.
1397
1401
1398 Raises ``error.LookupError`` if the node is not known.
1402 Raises ``error.LookupError`` if the node is not known.
1399 """
1403 """
1400
1404
1401 def update_caches(transaction):
1405 def update_caches(transaction):
1402 """update whatever cache are relevant for the used storage."""
1406 """update whatever cache are relevant for the used storage."""
1403
1407
1404
1408
1405 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1409 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1406 """Local repository sub-interface providing access to tracked file storage.
1410 """Local repository sub-interface providing access to tracked file storage.
1407
1411
1408 This interface defines how a repository accesses storage for a single
1412 This interface defines how a repository accesses storage for a single
1409 tracked file path.
1413 tracked file path.
1410 """
1414 """
1411
1415
1412 def file(f):
1416 def file(f):
1413 """Obtain a filelog for a tracked path.
1417 """Obtain a filelog for a tracked path.
1414
1418
1415 The returned type conforms to the ``ifilestorage`` interface.
1419 The returned type conforms to the ``ifilestorage`` interface.
1416 """
1420 """
1417
1421
1418
1422
1419 class ilocalrepositorymain(interfaceutil.Interface):
1423 class ilocalrepositorymain(interfaceutil.Interface):
1420 """Main interface for local repositories.
1424 """Main interface for local repositories.
1421
1425
1422 This currently captures the reality of things - not how things should be.
1426 This currently captures the reality of things - not how things should be.
1423 """
1427 """
1424
1428
1425 supportedformats = interfaceutil.Attribute(
1429 supportedformats = interfaceutil.Attribute(
1426 """Set of requirements that apply to stream clone.
1430 """Set of requirements that apply to stream clone.
1427
1431
1428 This is actually a class attribute and is shared among all instances.
1432 This is actually a class attribute and is shared among all instances.
1429 """
1433 """
1430 )
1434 )
1431
1435
1432 supported = interfaceutil.Attribute(
1436 supported = interfaceutil.Attribute(
1433 """Set of requirements that this repo is capable of opening."""
1437 """Set of requirements that this repo is capable of opening."""
1434 )
1438 )
1435
1439
1436 requirements = interfaceutil.Attribute(
1440 requirements = interfaceutil.Attribute(
1437 """Set of requirements this repo uses."""
1441 """Set of requirements this repo uses."""
1438 )
1442 )
1439
1443
1440 features = interfaceutil.Attribute(
1444 features = interfaceutil.Attribute(
1441 """Set of "features" this repository supports.
1445 """Set of "features" this repository supports.
1442
1446
1443 A "feature" is a loosely-defined term. It can refer to a feature
1447 A "feature" is a loosely-defined term. It can refer to a feature
1444 in the classical sense or can describe an implementation detail
1448 in the classical sense or can describe an implementation detail
1445 of the repository. For example, a ``readonly`` feature may denote
1449 of the repository. For example, a ``readonly`` feature may denote
1446 the repository as read-only. Or a ``revlogfilestore`` feature may
1450 the repository as read-only. Or a ``revlogfilestore`` feature may
1447 denote that the repository is using revlogs for file storage.
1451 denote that the repository is using revlogs for file storage.
1448
1452
1449 The intent of features is to provide a machine-queryable mechanism
1453 The intent of features is to provide a machine-queryable mechanism
1450 for repo consumers to test for various repository characteristics.
1454 for repo consumers to test for various repository characteristics.
1451
1455
1452 Features are similar to ``requirements``. The main difference is that
1456 Features are similar to ``requirements``. The main difference is that
1453 requirements are stored on-disk and represent requirements to open the
1457 requirements are stored on-disk and represent requirements to open the
1454 repository. Features are more run-time capabilities of the repository
1458 repository. Features are more run-time capabilities of the repository
1455 and more granular capabilities (which may be derived from requirements).
1459 and more granular capabilities (which may be derived from requirements).
1456 """
1460 """
1457 )
1461 )
1458
1462
1459 filtername = interfaceutil.Attribute(
1463 filtername = interfaceutil.Attribute(
1460 """Name of the repoview that is active on this repo."""
1464 """Name of the repoview that is active on this repo."""
1461 )
1465 )
1462
1466
1463 wvfs = interfaceutil.Attribute(
1467 wvfs = interfaceutil.Attribute(
1464 """VFS used to access the working directory."""
1468 """VFS used to access the working directory."""
1465 )
1469 )
1466
1470
1467 vfs = interfaceutil.Attribute(
1471 vfs = interfaceutil.Attribute(
1468 """VFS rooted at the .hg directory.
1472 """VFS rooted at the .hg directory.
1469
1473
1470 Used to access repository data not in the store.
1474 Used to access repository data not in the store.
1471 """
1475 """
1472 )
1476 )
1473
1477
1474 svfs = interfaceutil.Attribute(
1478 svfs = interfaceutil.Attribute(
1475 """VFS rooted at the store.
1479 """VFS rooted at the store.
1476
1480
1477 Used to access repository data in the store. Typically .hg/store.
1481 Used to access repository data in the store. Typically .hg/store.
1478 But can point elsewhere if the store is shared.
1482 But can point elsewhere if the store is shared.
1479 """
1483 """
1480 )
1484 )
1481
1485
1482 root = interfaceutil.Attribute(
1486 root = interfaceutil.Attribute(
1483 """Path to the root of the working directory."""
1487 """Path to the root of the working directory."""
1484 )
1488 )
1485
1489
1486 path = interfaceutil.Attribute("""Path to the .hg directory.""")
1490 path = interfaceutil.Attribute("""Path to the .hg directory.""")
1487
1491
1488 origroot = interfaceutil.Attribute(
1492 origroot = interfaceutil.Attribute(
1489 """The filesystem path that was used to construct the repo."""
1493 """The filesystem path that was used to construct the repo."""
1490 )
1494 )
1491
1495
1492 auditor = interfaceutil.Attribute(
1496 auditor = interfaceutil.Attribute(
1493 """A pathauditor for the working directory.
1497 """A pathauditor for the working directory.
1494
1498
1495 This checks if a path refers to a nested repository.
1499 This checks if a path refers to a nested repository.
1496
1500
1497 Operates on the filesystem.
1501 Operates on the filesystem.
1498 """
1502 """
1499 )
1503 )
1500
1504
1501 nofsauditor = interfaceutil.Attribute(
1505 nofsauditor = interfaceutil.Attribute(
1502 """A pathauditor for the working directory.
1506 """A pathauditor for the working directory.
1503
1507
1504 This is like ``auditor`` except it doesn't do filesystem checks.
1508 This is like ``auditor`` except it doesn't do filesystem checks.
1505 """
1509 """
1506 )
1510 )
1507
1511
1508 baseui = interfaceutil.Attribute(
1512 baseui = interfaceutil.Attribute(
1509 """Original ui instance passed into constructor."""
1513 """Original ui instance passed into constructor."""
1510 )
1514 )
1511
1515
1512 ui = interfaceutil.Attribute("""Main ui instance for this instance.""")
1516 ui = interfaceutil.Attribute("""Main ui instance for this instance.""")
1513
1517
1514 sharedpath = interfaceutil.Attribute(
1518 sharedpath = interfaceutil.Attribute(
1515 """Path to the .hg directory of the repo this repo was shared from."""
1519 """Path to the .hg directory of the repo this repo was shared from."""
1516 )
1520 )
1517
1521
1518 store = interfaceutil.Attribute("""A store instance.""")
1522 store = interfaceutil.Attribute("""A store instance.""")
1519
1523
1520 spath = interfaceutil.Attribute("""Path to the store.""")
1524 spath = interfaceutil.Attribute("""Path to the store.""")
1521
1525
1522 sjoin = interfaceutil.Attribute("""Alias to self.store.join.""")
1526 sjoin = interfaceutil.Attribute("""Alias to self.store.join.""")
1523
1527
1524 cachevfs = interfaceutil.Attribute(
1528 cachevfs = interfaceutil.Attribute(
1525 """A VFS used to access the cache directory.
1529 """A VFS used to access the cache directory.
1526
1530
1527 Typically .hg/cache.
1531 Typically .hg/cache.
1528 """
1532 """
1529 )
1533 )
1530
1534
1531 wcachevfs = interfaceutil.Attribute(
1535 wcachevfs = interfaceutil.Attribute(
1532 """A VFS used to access the cache directory dedicated to working copy
1536 """A VFS used to access the cache directory dedicated to working copy
1533
1537
1534 Typically .hg/wcache.
1538 Typically .hg/wcache.
1535 """
1539 """
1536 )
1540 )
1537
1541
1538 filteredrevcache = interfaceutil.Attribute(
1542 filteredrevcache = interfaceutil.Attribute(
1539 """Holds sets of revisions to be filtered."""
1543 """Holds sets of revisions to be filtered."""
1540 )
1544 )
1541
1545
1542 names = interfaceutil.Attribute("""A ``namespaces`` instance.""")
1546 names = interfaceutil.Attribute("""A ``namespaces`` instance.""")
1543
1547
1544 filecopiesmode = interfaceutil.Attribute(
1548 filecopiesmode = interfaceutil.Attribute(
1545 """The way files copies should be dealt with in this repo."""
1549 """The way files copies should be dealt with in this repo."""
1546 )
1550 )
1547
1551
1548 def close():
1552 def close():
1549 """Close the handle on this repository."""
1553 """Close the handle on this repository."""
1550
1554
1551 def peer():
1555 def peer():
1552 """Obtain an object conforming to the ``peer`` interface."""
1556 """Obtain an object conforming to the ``peer`` interface."""
1553
1557
1554 def unfiltered():
1558 def unfiltered():
1555 """Obtain an unfiltered/raw view of this repo."""
1559 """Obtain an unfiltered/raw view of this repo."""
1556
1560
1557 def filtered(name, visibilityexceptions=None):
1561 def filtered(name, visibilityexceptions=None):
1558 """Obtain a named view of this repository."""
1562 """Obtain a named view of this repository."""
1559
1563
1560 obsstore = interfaceutil.Attribute("""A store of obsolescence data.""")
1564 obsstore = interfaceutil.Attribute("""A store of obsolescence data.""")
1561
1565
1562 changelog = interfaceutil.Attribute("""A handle on the changelog revlog.""")
1566 changelog = interfaceutil.Attribute("""A handle on the changelog revlog.""")
1563
1567
1564 manifestlog = interfaceutil.Attribute(
1568 manifestlog = interfaceutil.Attribute(
1565 """An instance conforming to the ``imanifestlog`` interface.
1569 """An instance conforming to the ``imanifestlog`` interface.
1566
1570
1567 Provides access to manifests for the repository.
1571 Provides access to manifests for the repository.
1568 """
1572 """
1569 )
1573 )
1570
1574
1571 dirstate = interfaceutil.Attribute("""Working directory state.""")
1575 dirstate = interfaceutil.Attribute("""Working directory state.""")
1572
1576
1573 narrowpats = interfaceutil.Attribute(
1577 narrowpats = interfaceutil.Attribute(
1574 """Matcher patterns for this repository's narrowspec."""
1578 """Matcher patterns for this repository's narrowspec."""
1575 )
1579 )
1576
1580
1577 def narrowmatch(match=None, includeexact=False):
1581 def narrowmatch(match=None, includeexact=False):
1578 """Obtain a matcher for the narrowspec."""
1582 """Obtain a matcher for the narrowspec."""
1579
1583
1580 def setnarrowpats(newincludes, newexcludes):
1584 def setnarrowpats(newincludes, newexcludes):
1581 """Define the narrowspec for this repository."""
1585 """Define the narrowspec for this repository."""
1582
1586
1583 def __getitem__(changeid):
1587 def __getitem__(changeid):
1584 """Try to resolve a changectx."""
1588 """Try to resolve a changectx."""
1585
1589
1586 def __contains__(changeid):
1590 def __contains__(changeid):
1587 """Whether a changeset exists."""
1591 """Whether a changeset exists."""
1588
1592
1589 def __nonzero__():
1593 def __nonzero__():
1590 """Always returns True."""
1594 """Always returns True."""
1591 return True
1595 return True
1592
1596
1593 __bool__ = __nonzero__
1597 __bool__ = __nonzero__
1594
1598
1595 def __len__():
1599 def __len__():
1596 """Returns the number of changesets in the repo."""
1600 """Returns the number of changesets in the repo."""
1597
1601
1598 def __iter__():
1602 def __iter__():
1599 """Iterate over revisions in the changelog."""
1603 """Iterate over revisions in the changelog."""
1600
1604
1601 def revs(expr, *args):
1605 def revs(expr, *args):
1602 """Evaluate a revset.
1606 """Evaluate a revset.
1603
1607
1604 Emits revisions.
1608 Emits revisions.
1605 """
1609 """
1606
1610
1607 def set(expr, *args):
1611 def set(expr, *args):
1608 """Evaluate a revset.
1612 """Evaluate a revset.
1609
1613
1610 Emits changectx instances.
1614 Emits changectx instances.
1611 """
1615 """
1612
1616
1613 def anyrevs(specs, user=False, localalias=None):
1617 def anyrevs(specs, user=False, localalias=None):
1614 """Find revisions matching one of the given revsets."""
1618 """Find revisions matching one of the given revsets."""
1615
1619
1616 def url():
1620 def url():
1617 """Returns a string representing the location of this repo."""
1621 """Returns a string representing the location of this repo."""
1618
1622
1619 def hook(name, throw=False, **args):
1623 def hook(name, throw=False, **args):
1620 """Call a hook."""
1624 """Call a hook."""
1621
1625
1622 def tags():
1626 def tags():
1623 """Return a mapping of tag to node."""
1627 """Return a mapping of tag to node."""
1624
1628
1625 def tagtype(tagname):
1629 def tagtype(tagname):
1626 """Return the type of a given tag."""
1630 """Return the type of a given tag."""
1627
1631
1628 def tagslist():
1632 def tagslist():
1629 """Return a list of tags ordered by revision."""
1633 """Return a list of tags ordered by revision."""
1630
1634
1631 def nodetags(node):
1635 def nodetags(node):
1632 """Return the tags associated with a node."""
1636 """Return the tags associated with a node."""
1633
1637
1634 def nodebookmarks(node):
1638 def nodebookmarks(node):
1635 """Return the list of bookmarks pointing to the specified node."""
1639 """Return the list of bookmarks pointing to the specified node."""
1636
1640
1637 def branchmap():
1641 def branchmap():
1638 """Return a mapping of branch to heads in that branch."""
1642 """Return a mapping of branch to heads in that branch."""
1639
1643
1640 def revbranchcache():
1644 def revbranchcache():
1641 pass
1645 pass
1642
1646
1643 def branchtip(branchtip, ignoremissing=False):
1647 def branchtip(branchtip, ignoremissing=False):
1644 """Return the tip node for a given branch."""
1648 """Return the tip node for a given branch."""
1645
1649
1646 def lookup(key):
1650 def lookup(key):
1647 """Resolve the node for a revision."""
1651 """Resolve the node for a revision."""
1648
1652
1649 def lookupbranch(key):
1653 def lookupbranch(key):
1650 """Look up the branch name of the given revision or branch name."""
1654 """Look up the branch name of the given revision or branch name."""
1651
1655
1652 def known(nodes):
1656 def known(nodes):
1653 """Determine whether a series of nodes is known.
1657 """Determine whether a series of nodes is known.
1654
1658
1655 Returns a list of bools.
1659 Returns a list of bools.
1656 """
1660 """
1657
1661
1658 def local():
1662 def local():
1659 """Whether the repository is local."""
1663 """Whether the repository is local."""
1660 return True
1664 return True
1661
1665
1662 def publishing():
1666 def publishing():
1663 """Whether the repository is a publishing repository."""
1667 """Whether the repository is a publishing repository."""
1664
1668
1665 def cancopy():
1669 def cancopy():
1666 pass
1670 pass
1667
1671
1668 def shared():
1672 def shared():
1669 """The type of shared repository or None."""
1673 """The type of shared repository or None."""
1670
1674
1671 def wjoin(f, *insidef):
1675 def wjoin(f, *insidef):
1672 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1676 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1673
1677
1674 def setparents(p1, p2):
1678 def setparents(p1, p2):
1675 """Set the parent nodes of the working directory."""
1679 """Set the parent nodes of the working directory."""
1676
1680
1677 def filectx(path, changeid=None, fileid=None):
1681 def filectx(path, changeid=None, fileid=None):
1678 """Obtain a filectx for the given file revision."""
1682 """Obtain a filectx for the given file revision."""
1679
1683
1680 def getcwd():
1684 def getcwd():
1681 """Obtain the current working directory from the dirstate."""
1685 """Obtain the current working directory from the dirstate."""
1682
1686
1683 def pathto(f, cwd=None):
1687 def pathto(f, cwd=None):
1684 """Obtain the relative path to a file."""
1688 """Obtain the relative path to a file."""
1685
1689
1686 def adddatafilter(name, fltr):
1690 def adddatafilter(name, fltr):
1687 pass
1691 pass
1688
1692
1689 def wread(filename):
1693 def wread(filename):
1690 """Read a file from wvfs, using data filters."""
1694 """Read a file from wvfs, using data filters."""
1691
1695
1692 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1696 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1693 """Write data to a file in the wvfs, using data filters."""
1697 """Write data to a file in the wvfs, using data filters."""
1694
1698
1695 def wwritedata(filename, data):
1699 def wwritedata(filename, data):
1696 """Resolve data for writing to the wvfs, using data filters."""
1700 """Resolve data for writing to the wvfs, using data filters."""
1697
1701
1698 def currenttransaction():
1702 def currenttransaction():
1699 """Obtain the current transaction instance or None."""
1703 """Obtain the current transaction instance or None."""
1700
1704
1701 def transaction(desc, report=None):
1705 def transaction(desc, report=None):
1702 """Open a new transaction to write to the repository."""
1706 """Open a new transaction to write to the repository."""
1703
1707
1704 def undofiles():
1708 def undofiles():
1705 """Returns a list of (vfs, path) for files to undo transactions."""
1709 """Returns a list of (vfs, path) for files to undo transactions."""
1706
1710
1707 def recover():
1711 def recover():
1708 """Roll back an interrupted transaction."""
1712 """Roll back an interrupted transaction."""
1709
1713
1710 def rollback(dryrun=False, force=False):
1714 def rollback(dryrun=False, force=False):
1711 """Undo the last transaction.
1715 """Undo the last transaction.
1712
1716
1713 DANGEROUS.
1717 DANGEROUS.
1714 """
1718 """
1715
1719
1716 def updatecaches(tr=None, full=False):
1720 def updatecaches(tr=None, full=False):
1717 """Warm repo caches."""
1721 """Warm repo caches."""
1718
1722
1719 def invalidatecaches():
1723 def invalidatecaches():
1720 """Invalidate cached data due to the repository mutating."""
1724 """Invalidate cached data due to the repository mutating."""
1721
1725
1722 def invalidatevolatilesets():
1726 def invalidatevolatilesets():
1723 pass
1727 pass
1724
1728
1725 def invalidatedirstate():
1729 def invalidatedirstate():
1726 """Invalidate the dirstate."""
1730 """Invalidate the dirstate."""
1727
1731
1728 def invalidate(clearfilecache=False):
1732 def invalidate(clearfilecache=False):
1729 pass
1733 pass
1730
1734
1731 def invalidateall():
1735 def invalidateall():
1732 pass
1736 pass
1733
1737
1734 def lock(wait=True):
1738 def lock(wait=True):
1735 """Lock the repository store and return a lock instance."""
1739 """Lock the repository store and return a lock instance."""
1736
1740
1737 def wlock(wait=True):
1741 def wlock(wait=True):
1738 """Lock the non-store parts of the repository."""
1742 """Lock the non-store parts of the repository."""
1739
1743
1740 def currentwlock():
1744 def currentwlock():
1741 """Return the wlock if it's held or None."""
1745 """Return the wlock if it's held or None."""
1742
1746
1743 def checkcommitpatterns(wctx, match, status, fail):
1747 def checkcommitpatterns(wctx, match, status, fail):
1744 pass
1748 pass
1745
1749
1746 def commit(
1750 def commit(
1747 text=b'',
1751 text=b'',
1748 user=None,
1752 user=None,
1749 date=None,
1753 date=None,
1750 match=None,
1754 match=None,
1751 force=False,
1755 force=False,
1752 editor=False,
1756 editor=False,
1753 extra=None,
1757 extra=None,
1754 ):
1758 ):
1755 """Add a new revision to the repository."""
1759 """Add a new revision to the repository."""
1756
1760
1757 def commitctx(ctx, error=False, origctx=None):
1761 def commitctx(ctx, error=False, origctx=None):
1758 """Commit a commitctx instance to the repository."""
1762 """Commit a commitctx instance to the repository."""
1759
1763
1760 def destroying():
1764 def destroying():
1761 """Inform the repository that nodes are about to be destroyed."""
1765 """Inform the repository that nodes are about to be destroyed."""
1762
1766
1763 def destroyed():
1767 def destroyed():
1764 """Inform the repository that nodes have been destroyed."""
1768 """Inform the repository that nodes have been destroyed."""
1765
1769
1766 def status(
1770 def status(
1767 node1=b'.',
1771 node1=b'.',
1768 node2=None,
1772 node2=None,
1769 match=None,
1773 match=None,
1770 ignored=False,
1774 ignored=False,
1771 clean=False,
1775 clean=False,
1772 unknown=False,
1776 unknown=False,
1773 listsubrepos=False,
1777 listsubrepos=False,
1774 ):
1778 ):
1775 """Convenience method to call repo[x].status()."""
1779 """Convenience method to call repo[x].status()."""
1776
1780
1777 def addpostdsstatus(ps):
1781 def addpostdsstatus(ps):
1778 pass
1782 pass
1779
1783
1780 def postdsstatus():
1784 def postdsstatus():
1781 pass
1785 pass
1782
1786
1783 def clearpostdsstatus():
1787 def clearpostdsstatus():
1784 pass
1788 pass
1785
1789
1786 def heads(start=None):
1790 def heads(start=None):
1787 """Obtain list of nodes that are DAG heads."""
1791 """Obtain list of nodes that are DAG heads."""
1788
1792
1789 def branchheads(branch=None, start=None, closed=False):
1793 def branchheads(branch=None, start=None, closed=False):
1790 pass
1794 pass
1791
1795
1792 def branches(nodes):
1796 def branches(nodes):
1793 pass
1797 pass
1794
1798
1795 def between(pairs):
1799 def between(pairs):
1796 pass
1800 pass
1797
1801
1798 def checkpush(pushop):
1802 def checkpush(pushop):
1799 pass
1803 pass
1800
1804
1801 prepushoutgoinghooks = interfaceutil.Attribute("""util.hooks instance.""")
1805 prepushoutgoinghooks = interfaceutil.Attribute("""util.hooks instance.""")
1802
1806
1803 def pushkey(namespace, key, old, new):
1807 def pushkey(namespace, key, old, new):
1804 pass
1808 pass
1805
1809
1806 def listkeys(namespace):
1810 def listkeys(namespace):
1807 pass
1811 pass
1808
1812
1809 def debugwireargs(one, two, three=None, four=None, five=None):
1813 def debugwireargs(one, two, three=None, four=None, five=None):
1810 pass
1814 pass
1811
1815
1812 def savecommitmessage(text):
1816 def savecommitmessage(text):
1813 pass
1817 pass
1814
1818
1815
1819
1816 class completelocalrepository(
1820 class completelocalrepository(
1817 ilocalrepositorymain, ilocalrepositoryfilestorage
1821 ilocalrepositorymain, ilocalrepositoryfilestorage
1818 ):
1822 ):
1819 """Complete interface for a local repository."""
1823 """Complete interface for a local repository."""
1820
1824
1821
1825
1822 class iwireprotocolcommandcacher(interfaceutil.Interface):
1826 class iwireprotocolcommandcacher(interfaceutil.Interface):
1823 """Represents a caching backend for wire protocol commands.
1827 """Represents a caching backend for wire protocol commands.
1824
1828
1825 Wire protocol version 2 supports transparent caching of many commands.
1829 Wire protocol version 2 supports transparent caching of many commands.
1826 To leverage this caching, servers can activate objects that cache
1830 To leverage this caching, servers can activate objects that cache
1827 command responses. Objects handle both cache writing and reading.
1831 command responses. Objects handle both cache writing and reading.
1828 This interface defines how that response caching mechanism works.
1832 This interface defines how that response caching mechanism works.
1829
1833
1830 Wire protocol version 2 commands emit a series of objects that are
1834 Wire protocol version 2 commands emit a series of objects that are
1831 serialized and sent to the client. The caching layer exists between
1835 serialized and sent to the client. The caching layer exists between
1832 the invocation of the command function and the sending of its output
1836 the invocation of the command function and the sending of its output
1833 objects to an output layer.
1837 objects to an output layer.
1834
1838
1835 Instances of this interface represent a binding to a cache that
1839 Instances of this interface represent a binding to a cache that
1836 can serve a response (in place of calling a command function) and/or
1840 can serve a response (in place of calling a command function) and/or
1837 write responses to a cache for subsequent use.
1841 write responses to a cache for subsequent use.
1838
1842
1839 When a command request arrives, the following happens with regards
1843 When a command request arrives, the following happens with regards
1840 to this interface:
1844 to this interface:
1841
1845
1842 1. The server determines whether the command request is cacheable.
1846 1. The server determines whether the command request is cacheable.
1843 2. If it is, an instance of this interface is spawned.
1847 2. If it is, an instance of this interface is spawned.
1844 3. The cacher is activated in a context manager (``__enter__`` is called).
1848 3. The cacher is activated in a context manager (``__enter__`` is called).
1845 4. A cache *key* for that request is derived. This will call the
1849 4. A cache *key* for that request is derived. This will call the
1846 instance's ``adjustcachekeystate()`` method so the derivation
1850 instance's ``adjustcachekeystate()`` method so the derivation
1847 can be influenced.
1851 can be influenced.
1848 5. The cacher is informed of the derived cache key via a call to
1852 5. The cacher is informed of the derived cache key via a call to
1849 ``setcachekey()``.
1853 ``setcachekey()``.
1850 6. The cacher's ``lookup()`` method is called to test for presence of
1854 6. The cacher's ``lookup()`` method is called to test for presence of
1851 the derived key in the cache.
1855 the derived key in the cache.
1852 7. If ``lookup()`` returns a hit, that cached result is used in place
1856 7. If ``lookup()`` returns a hit, that cached result is used in place
1853 of invoking the command function. ``__exit__`` is called and the instance
1857 of invoking the command function. ``__exit__`` is called and the instance
1854 is discarded.
1858 is discarded.
1855 8. The command function is invoked.
1859 8. The command function is invoked.
1856 9. ``onobject()`` is called for each object emitted by the command
1860 9. ``onobject()`` is called for each object emitted by the command
1857 function.
1861 function.
1858 10. After the final object is seen, ``onfinished()`` is called.
1862 10. After the final object is seen, ``onfinished()`` is called.
1859 11. ``__exit__`` is called to signal the end of use of the instance.
1863 11. ``__exit__`` is called to signal the end of use of the instance.
1860
1864
1861 Cache *key* derivation can be influenced by the instance.
1865 Cache *key* derivation can be influenced by the instance.
1862
1866
1863 Cache keys are initially derived by a deterministic representation of
1867 Cache keys are initially derived by a deterministic representation of
1864 the command request. This includes the command name, arguments, protocol
1868 the command request. This includes the command name, arguments, protocol
1865 version, etc. This initial key derivation is performed by CBOR-encoding a
1869 version, etc. This initial key derivation is performed by CBOR-encoding a
1866 data structure and feeding that output into a hasher.
1870 data structure and feeding that output into a hasher.
1867
1871
1868 Instances of this interface can influence this initial key derivation
1872 Instances of this interface can influence this initial key derivation
1869 via ``adjustcachekeystate()``.
1873 via ``adjustcachekeystate()``.
1870
1874
1871 The instance is informed of the derived cache key via a call to
1875 The instance is informed of the derived cache key via a call to
1872 ``setcachekey()``. The instance must store the key locally so it can
1876 ``setcachekey()``. The instance must store the key locally so it can
1873 be consulted on subsequent operations that may require it.
1877 be consulted on subsequent operations that may require it.
1874
1878
1875 When constructed, the instance has access to a callable that can be used
1879 When constructed, the instance has access to a callable that can be used
1876 for encoding response objects. This callable receives as its single
1880 for encoding response objects. This callable receives as its single
1877 argument an object emitted by a command function. It returns an iterable
1881 argument an object emitted by a command function. It returns an iterable
1878 of bytes chunks representing the encoded object. Unless the cacher is
1882 of bytes chunks representing the encoded object. Unless the cacher is
1879 caching native Python objects in memory or has a way of reconstructing
1883 caching native Python objects in memory or has a way of reconstructing
1880 the original Python objects, implementations typically call this function
1884 the original Python objects, implementations typically call this function
1881 to produce bytes from the output objects and then store those bytes in
1885 to produce bytes from the output objects and then store those bytes in
1882 the cache. When it comes time to re-emit those bytes, they are wrapped
1886 the cache. When it comes time to re-emit those bytes, they are wrapped
1883 in a ``wireprototypes.encodedresponse`` instance to tell the output
1887 in a ``wireprototypes.encodedresponse`` instance to tell the output
1884 layer that they are pre-encoded.
1888 layer that they are pre-encoded.
1885
1889
1886 When receiving the objects emitted by the command function, instances
1890 When receiving the objects emitted by the command function, instances
1887 can choose what to do with those objects. The simplest thing to do is
1891 can choose what to do with those objects. The simplest thing to do is
1888 re-emit the original objects. They will be forwarded to the output
1892 re-emit the original objects. They will be forwarded to the output
1889 layer and will be processed as if the cacher did not exist.
1893 layer and will be processed as if the cacher did not exist.
1890
1894
1891 Implementations could also choose to not emit objects - instead locally
1895 Implementations could also choose to not emit objects - instead locally
1892 buffering objects or their encoded representation. They could then emit
1896 buffering objects or their encoded representation. They could then emit
1893 a single "coalesced" object when ``onfinished()`` is called. In
1897 a single "coalesced" object when ``onfinished()`` is called. In
1894 this way, the implementation would function as a filtering layer of
1898 this way, the implementation would function as a filtering layer of
1895 sorts.
1899 sorts.
1896
1900
1897 When caching objects, typically the encoded form of the object will
1901 When caching objects, typically the encoded form of the object will
1898 be stored. Keep in mind that if the original object is forwarded to
1902 be stored. Keep in mind that if the original object is forwarded to
1899 the output layer, it will need to be encoded there as well. For large
1903 the output layer, it will need to be encoded there as well. For large
1900 output, this redundant encoding could add overhead. Implementations
1904 output, this redundant encoding could add overhead. Implementations
1901 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1905 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1902 instances to avoid this overhead.
1906 instances to avoid this overhead.
1903 """
1907 """
1904
1908
1905 def __enter__():
1909 def __enter__():
1906 """Marks the instance as active.
1910 """Marks the instance as active.
1907
1911
1908 Should return self.
1912 Should return self.
1909 """
1913 """
1910
1914
1911 def __exit__(exctype, excvalue, exctb):
1915 def __exit__(exctype, excvalue, exctb):
1912 """Called when cacher is no longer used.
1916 """Called when cacher is no longer used.
1913
1917
1914 This can be used by implementations to perform cleanup actions (e.g.
1918 This can be used by implementations to perform cleanup actions (e.g.
1915 disconnecting network sockets, aborting a partially cached response.
1919 disconnecting network sockets, aborting a partially cached response.
1916 """
1920 """
1917
1921
1918 def adjustcachekeystate(state):
1922 def adjustcachekeystate(state):
1919 """Influences cache key derivation by adjusting state to derive key.
1923 """Influences cache key derivation by adjusting state to derive key.
1920
1924
1921 A dict defining the state used to derive the cache key is passed.
1925 A dict defining the state used to derive the cache key is passed.
1922
1926
1923 Implementations can modify this dict to record additional state that
1927 Implementations can modify this dict to record additional state that
1924 is wanted to influence key derivation.
1928 is wanted to influence key derivation.
1925
1929
1926 Implementations are *highly* encouraged to not modify or delete
1930 Implementations are *highly* encouraged to not modify or delete
1927 existing keys.
1931 existing keys.
1928 """
1932 """
1929
1933
1930 def setcachekey(key):
1934 def setcachekey(key):
1931 """Record the derived cache key for this request.
1935 """Record the derived cache key for this request.
1932
1936
1933 Instances may mutate the key for internal usage, as desired. e.g.
1937 Instances may mutate the key for internal usage, as desired. e.g.
1934 instances may wish to prepend the repo name, introduce path
1938 instances may wish to prepend the repo name, introduce path
1935 components for filesystem or URL addressing, etc. Behavior is up to
1939 components for filesystem or URL addressing, etc. Behavior is up to
1936 the cache.
1940 the cache.
1937
1941
1938 Returns a bool indicating if the request is cacheable by this
1942 Returns a bool indicating if the request is cacheable by this
1939 instance.
1943 instance.
1940 """
1944 """
1941
1945
1942 def lookup():
1946 def lookup():
1943 """Attempt to resolve an entry in the cache.
1947 """Attempt to resolve an entry in the cache.
1944
1948
1945 The instance is instructed to look for the cache key that it was
1949 The instance is instructed to look for the cache key that it was
1946 informed about via the call to ``setcachekey()``.
1950 informed about via the call to ``setcachekey()``.
1947
1951
1948 If there's no cache hit or the cacher doesn't wish to use the cached
1952 If there's no cache hit or the cacher doesn't wish to use the cached
1949 entry, ``None`` should be returned.
1953 entry, ``None`` should be returned.
1950
1954
1951 Else, a dict defining the cached result should be returned. The
1955 Else, a dict defining the cached result should be returned. The
1952 dict may have the following keys:
1956 dict may have the following keys:
1953
1957
1954 objs
1958 objs
1955 An iterable of objects that should be sent to the client. That
1959 An iterable of objects that should be sent to the client. That
1956 iterable of objects is expected to be what the command function
1960 iterable of objects is expected to be what the command function
1957 would return if invoked or an equivalent representation thereof.
1961 would return if invoked or an equivalent representation thereof.
1958 """
1962 """
1959
1963
1960 def onobject(obj):
1964 def onobject(obj):
1961 """Called when a new object is emitted from the command function.
1965 """Called when a new object is emitted from the command function.
1962
1966
1963 Receives as its argument the object that was emitted from the
1967 Receives as its argument the object that was emitted from the
1964 command function.
1968 command function.
1965
1969
1966 This method returns an iterator of objects to forward to the output
1970 This method returns an iterator of objects to forward to the output
1967 layer. The easiest implementation is a generator that just
1971 layer. The easiest implementation is a generator that just
1968 ``yield obj``.
1972 ``yield obj``.
1969 """
1973 """
1970
1974
1971 def onfinished():
1975 def onfinished():
1972 """Called after all objects have been emitted from the command function.
1976 """Called after all objects have been emitted from the command function.
1973
1977
1974 Implementations should return an iterator of objects to forward to
1978 Implementations should return an iterator of objects to forward to
1975 the output layer.
1979 the output layer.
1976
1980
1977 This method can be a generator.
1981 This method can be a generator.
1978 """
1982 """
@@ -1,3530 +1,3530 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import random
12 import random
13 import sys
13 import sys
14 import time
14 import time
15 import weakref
15 import weakref
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 bin,
19 bin,
20 hex,
20 hex,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 )
24 )
25 from .pycompat import (
25 from .pycompat import (
26 delattr,
26 delattr,
27 getattr,
27 getattr,
28 )
28 )
29 from . import (
29 from . import (
30 bookmarks,
30 bookmarks,
31 branchmap,
31 branchmap,
32 bundle2,
32 bundle2,
33 changegroup,
33 changegroup,
34 color,
34 color,
35 commit,
35 commit,
36 context,
36 context,
37 dirstate,
37 dirstate,
38 dirstateguard,
38 dirstateguard,
39 discovery,
39 discovery,
40 encoding,
40 encoding,
41 error,
41 error,
42 exchange,
42 exchange,
43 extensions,
43 extensions,
44 filelog,
44 filelog,
45 hook,
45 hook,
46 lock as lockmod,
46 lock as lockmod,
47 match as matchmod,
47 match as matchmod,
48 mergestate as mergestatemod,
48 mergestate as mergestatemod,
49 mergeutil,
49 mergeutil,
50 namespaces,
50 namespaces,
51 narrowspec,
51 narrowspec,
52 obsolete,
52 obsolete,
53 pathutil,
53 pathutil,
54 phases,
54 phases,
55 pushkey,
55 pushkey,
56 pycompat,
56 pycompat,
57 rcutil,
57 rcutil,
58 repoview,
58 repoview,
59 revset,
59 revset,
60 revsetlang,
60 revsetlang,
61 scmutil,
61 scmutil,
62 sparse,
62 sparse,
63 store as storemod,
63 store as storemod,
64 subrepoutil,
64 subrepoutil,
65 tags as tagsmod,
65 tags as tagsmod,
66 transaction,
66 transaction,
67 txnutil,
67 txnutil,
68 util,
68 util,
69 vfs as vfsmod,
69 vfs as vfsmod,
70 )
70 )
71
71
72 from .interfaces import (
72 from .interfaces import (
73 repository,
73 repository,
74 util as interfaceutil,
74 util as interfaceutil,
75 )
75 )
76
76
77 from .utils import (
77 from .utils import (
78 hashutil,
78 hashutil,
79 procutil,
79 procutil,
80 stringutil,
80 stringutil,
81 )
81 )
82
82
83 from .revlogutils import constants as revlogconst
83 from .revlogutils import constants as revlogconst
84
84
85 release = lockmod.release
85 release = lockmod.release
86 urlerr = util.urlerr
86 urlerr = util.urlerr
87 urlreq = util.urlreq
87 urlreq = util.urlreq
88
88
89 # set of (path, vfs-location) tuples. vfs-location is:
89 # set of (path, vfs-location) tuples. vfs-location is:
90 # - 'plain for vfs relative paths
90 # - 'plain for vfs relative paths
91 # - '' for svfs relative paths
91 # - '' for svfs relative paths
92 _cachedfiles = set()
92 _cachedfiles = set()
93
93
94
94
95 class _basefilecache(scmutil.filecache):
95 class _basefilecache(scmutil.filecache):
96 """All filecache usage on repo are done for logic that should be unfiltered
96 """All filecache usage on repo are done for logic that should be unfiltered
97 """
97 """
98
98
99 def __get__(self, repo, type=None):
99 def __get__(self, repo, type=None):
100 if repo is None:
100 if repo is None:
101 return self
101 return self
102 # proxy to unfiltered __dict__ since filtered repo has no entry
102 # proxy to unfiltered __dict__ since filtered repo has no entry
103 unfi = repo.unfiltered()
103 unfi = repo.unfiltered()
104 try:
104 try:
105 return unfi.__dict__[self.sname]
105 return unfi.__dict__[self.sname]
106 except KeyError:
106 except KeyError:
107 pass
107 pass
108 return super(_basefilecache, self).__get__(unfi, type)
108 return super(_basefilecache, self).__get__(unfi, type)
109
109
110 def set(self, repo, value):
110 def set(self, repo, value):
111 return super(_basefilecache, self).set(repo.unfiltered(), value)
111 return super(_basefilecache, self).set(repo.unfiltered(), value)
112
112
113
113
114 class repofilecache(_basefilecache):
114 class repofilecache(_basefilecache):
115 """filecache for files in .hg but outside of .hg/store"""
115 """filecache for files in .hg but outside of .hg/store"""
116
116
117 def __init__(self, *paths):
117 def __init__(self, *paths):
118 super(repofilecache, self).__init__(*paths)
118 super(repofilecache, self).__init__(*paths)
119 for path in paths:
119 for path in paths:
120 _cachedfiles.add((path, b'plain'))
120 _cachedfiles.add((path, b'plain'))
121
121
122 def join(self, obj, fname):
122 def join(self, obj, fname):
123 return obj.vfs.join(fname)
123 return obj.vfs.join(fname)
124
124
125
125
126 class storecache(_basefilecache):
126 class storecache(_basefilecache):
127 """filecache for files in the store"""
127 """filecache for files in the store"""
128
128
129 def __init__(self, *paths):
129 def __init__(self, *paths):
130 super(storecache, self).__init__(*paths)
130 super(storecache, self).__init__(*paths)
131 for path in paths:
131 for path in paths:
132 _cachedfiles.add((path, b''))
132 _cachedfiles.add((path, b''))
133
133
134 def join(self, obj, fname):
134 def join(self, obj, fname):
135 return obj.sjoin(fname)
135 return obj.sjoin(fname)
136
136
137
137
138 class mixedrepostorecache(_basefilecache):
138 class mixedrepostorecache(_basefilecache):
139 """filecache for a mix files in .hg/store and outside"""
139 """filecache for a mix files in .hg/store and outside"""
140
140
141 def __init__(self, *pathsandlocations):
141 def __init__(self, *pathsandlocations):
142 # scmutil.filecache only uses the path for passing back into our
142 # scmutil.filecache only uses the path for passing back into our
143 # join(), so we can safely pass a list of paths and locations
143 # join(), so we can safely pass a list of paths and locations
144 super(mixedrepostorecache, self).__init__(*pathsandlocations)
144 super(mixedrepostorecache, self).__init__(*pathsandlocations)
145 _cachedfiles.update(pathsandlocations)
145 _cachedfiles.update(pathsandlocations)
146
146
147 def join(self, obj, fnameandlocation):
147 def join(self, obj, fnameandlocation):
148 fname, location = fnameandlocation
148 fname, location = fnameandlocation
149 if location == b'plain':
149 if location == b'plain':
150 return obj.vfs.join(fname)
150 return obj.vfs.join(fname)
151 else:
151 else:
152 if location != b'':
152 if location != b'':
153 raise error.ProgrammingError(
153 raise error.ProgrammingError(
154 b'unexpected location: %s' % location
154 b'unexpected location: %s' % location
155 )
155 )
156 return obj.sjoin(fname)
156 return obj.sjoin(fname)
157
157
158
158
159 def isfilecached(repo, name):
159 def isfilecached(repo, name):
160 """check if a repo has already cached "name" filecache-ed property
160 """check if a repo has already cached "name" filecache-ed property
161
161
162 This returns (cachedobj-or-None, iscached) tuple.
162 This returns (cachedobj-or-None, iscached) tuple.
163 """
163 """
164 cacheentry = repo.unfiltered()._filecache.get(name, None)
164 cacheentry = repo.unfiltered()._filecache.get(name, None)
165 if not cacheentry:
165 if not cacheentry:
166 return None, False
166 return None, False
167 return cacheentry.obj, True
167 return cacheentry.obj, True
168
168
169
169
170 class unfilteredpropertycache(util.propertycache):
170 class unfilteredpropertycache(util.propertycache):
171 """propertycache that apply to unfiltered repo only"""
171 """propertycache that apply to unfiltered repo only"""
172
172
173 def __get__(self, repo, type=None):
173 def __get__(self, repo, type=None):
174 unfi = repo.unfiltered()
174 unfi = repo.unfiltered()
175 if unfi is repo:
175 if unfi is repo:
176 return super(unfilteredpropertycache, self).__get__(unfi)
176 return super(unfilteredpropertycache, self).__get__(unfi)
177 return getattr(unfi, self.name)
177 return getattr(unfi, self.name)
178
178
179
179
180 class filteredpropertycache(util.propertycache):
180 class filteredpropertycache(util.propertycache):
181 """propertycache that must take filtering in account"""
181 """propertycache that must take filtering in account"""
182
182
183 def cachevalue(self, obj, value):
183 def cachevalue(self, obj, value):
184 object.__setattr__(obj, self.name, value)
184 object.__setattr__(obj, self.name, value)
185
185
186
186
187 def hasunfilteredcache(repo, name):
187 def hasunfilteredcache(repo, name):
188 """check if a repo has an unfilteredpropertycache value for <name>"""
188 """check if a repo has an unfilteredpropertycache value for <name>"""
189 return name in vars(repo.unfiltered())
189 return name in vars(repo.unfiltered())
190
190
191
191
192 def unfilteredmethod(orig):
192 def unfilteredmethod(orig):
193 """decorate method that always need to be run on unfiltered version"""
193 """decorate method that always need to be run on unfiltered version"""
194
194
195 def wrapper(repo, *args, **kwargs):
195 def wrapper(repo, *args, **kwargs):
196 return orig(repo.unfiltered(), *args, **kwargs)
196 return orig(repo.unfiltered(), *args, **kwargs)
197
197
198 return wrapper
198 return wrapper
199
199
200
200
201 moderncaps = {
201 moderncaps = {
202 b'lookup',
202 b'lookup',
203 b'branchmap',
203 b'branchmap',
204 b'pushkey',
204 b'pushkey',
205 b'known',
205 b'known',
206 b'getbundle',
206 b'getbundle',
207 b'unbundle',
207 b'unbundle',
208 }
208 }
209 legacycaps = moderncaps.union({b'changegroupsubset'})
209 legacycaps = moderncaps.union({b'changegroupsubset'})
210
210
211
211
212 @interfaceutil.implementer(repository.ipeercommandexecutor)
212 @interfaceutil.implementer(repository.ipeercommandexecutor)
213 class localcommandexecutor(object):
213 class localcommandexecutor(object):
214 def __init__(self, peer):
214 def __init__(self, peer):
215 self._peer = peer
215 self._peer = peer
216 self._sent = False
216 self._sent = False
217 self._closed = False
217 self._closed = False
218
218
219 def __enter__(self):
219 def __enter__(self):
220 return self
220 return self
221
221
222 def __exit__(self, exctype, excvalue, exctb):
222 def __exit__(self, exctype, excvalue, exctb):
223 self.close()
223 self.close()
224
224
225 def callcommand(self, command, args):
225 def callcommand(self, command, args):
226 if self._sent:
226 if self._sent:
227 raise error.ProgrammingError(
227 raise error.ProgrammingError(
228 b'callcommand() cannot be used after sendcommands()'
228 b'callcommand() cannot be used after sendcommands()'
229 )
229 )
230
230
231 if self._closed:
231 if self._closed:
232 raise error.ProgrammingError(
232 raise error.ProgrammingError(
233 b'callcommand() cannot be used after close()'
233 b'callcommand() cannot be used after close()'
234 )
234 )
235
235
236 # We don't need to support anything fancy. Just call the named
236 # We don't need to support anything fancy. Just call the named
237 # method on the peer and return a resolved future.
237 # method on the peer and return a resolved future.
238 fn = getattr(self._peer, pycompat.sysstr(command))
238 fn = getattr(self._peer, pycompat.sysstr(command))
239
239
240 f = pycompat.futures.Future()
240 f = pycompat.futures.Future()
241
241
242 try:
242 try:
243 result = fn(**pycompat.strkwargs(args))
243 result = fn(**pycompat.strkwargs(args))
244 except Exception:
244 except Exception:
245 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
245 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
246 else:
246 else:
247 f.set_result(result)
247 f.set_result(result)
248
248
249 return f
249 return f
250
250
251 def sendcommands(self):
251 def sendcommands(self):
252 self._sent = True
252 self._sent = True
253
253
254 def close(self):
254 def close(self):
255 self._closed = True
255 self._closed = True
256
256
257
257
258 @interfaceutil.implementer(repository.ipeercommands)
258 @interfaceutil.implementer(repository.ipeercommands)
259 class localpeer(repository.peer):
259 class localpeer(repository.peer):
260 '''peer for a local repo; reflects only the most recent API'''
260 '''peer for a local repo; reflects only the most recent API'''
261
261
262 def __init__(self, repo, caps=None):
262 def __init__(self, repo, caps=None):
263 super(localpeer, self).__init__()
263 super(localpeer, self).__init__()
264
264
265 if caps is None:
265 if caps is None:
266 caps = moderncaps.copy()
266 caps = moderncaps.copy()
267 self._repo = repo.filtered(b'served')
267 self._repo = repo.filtered(b'served')
268 self.ui = repo.ui
268 self.ui = repo.ui
269 self._caps = repo._restrictcapabilities(caps)
269 self._caps = repo._restrictcapabilities(caps)
270
270
271 # Begin of _basepeer interface.
271 # Begin of _basepeer interface.
272
272
273 def url(self):
273 def url(self):
274 return self._repo.url()
274 return self._repo.url()
275
275
276 def local(self):
276 def local(self):
277 return self._repo
277 return self._repo
278
278
279 def peer(self):
279 def peer(self):
280 return self
280 return self
281
281
282 def canpush(self):
282 def canpush(self):
283 return True
283 return True
284
284
285 def close(self):
285 def close(self):
286 self._repo.close()
286 self._repo.close()
287
287
288 # End of _basepeer interface.
288 # End of _basepeer interface.
289
289
290 # Begin of _basewirecommands interface.
290 # Begin of _basewirecommands interface.
291
291
292 def branchmap(self):
292 def branchmap(self):
293 return self._repo.branchmap()
293 return self._repo.branchmap()
294
294
295 def capabilities(self):
295 def capabilities(self):
296 return self._caps
296 return self._caps
297
297
298 def clonebundles(self):
298 def clonebundles(self):
299 return self._repo.tryread(b'clonebundles.manifest')
299 return self._repo.tryread(b'clonebundles.manifest')
300
300
301 def debugwireargs(self, one, two, three=None, four=None, five=None):
301 def debugwireargs(self, one, two, three=None, four=None, five=None):
302 """Used to test argument passing over the wire"""
302 """Used to test argument passing over the wire"""
303 return b"%s %s %s %s %s" % (
303 return b"%s %s %s %s %s" % (
304 one,
304 one,
305 two,
305 two,
306 pycompat.bytestr(three),
306 pycompat.bytestr(three),
307 pycompat.bytestr(four),
307 pycompat.bytestr(four),
308 pycompat.bytestr(five),
308 pycompat.bytestr(five),
309 )
309 )
310
310
311 def getbundle(
311 def getbundle(
312 self, source, heads=None, common=None, bundlecaps=None, **kwargs
312 self, source, heads=None, common=None, bundlecaps=None, **kwargs
313 ):
313 ):
314 chunks = exchange.getbundlechunks(
314 chunks = exchange.getbundlechunks(
315 self._repo,
315 self._repo,
316 source,
316 source,
317 heads=heads,
317 heads=heads,
318 common=common,
318 common=common,
319 bundlecaps=bundlecaps,
319 bundlecaps=bundlecaps,
320 **kwargs
320 **kwargs
321 )[1]
321 )[1]
322 cb = util.chunkbuffer(chunks)
322 cb = util.chunkbuffer(chunks)
323
323
324 if exchange.bundle2requested(bundlecaps):
324 if exchange.bundle2requested(bundlecaps):
325 # When requesting a bundle2, getbundle returns a stream to make the
325 # When requesting a bundle2, getbundle returns a stream to make the
326 # wire level function happier. We need to build a proper object
326 # wire level function happier. We need to build a proper object
327 # from it in local peer.
327 # from it in local peer.
328 return bundle2.getunbundler(self.ui, cb)
328 return bundle2.getunbundler(self.ui, cb)
329 else:
329 else:
330 return changegroup.getunbundler(b'01', cb, None)
330 return changegroup.getunbundler(b'01', cb, None)
331
331
332 def heads(self):
332 def heads(self):
333 return self._repo.heads()
333 return self._repo.heads()
334
334
335 def known(self, nodes):
335 def known(self, nodes):
336 return self._repo.known(nodes)
336 return self._repo.known(nodes)
337
337
338 def listkeys(self, namespace):
338 def listkeys(self, namespace):
339 return self._repo.listkeys(namespace)
339 return self._repo.listkeys(namespace)
340
340
341 def lookup(self, key):
341 def lookup(self, key):
342 return self._repo.lookup(key)
342 return self._repo.lookup(key)
343
343
344 def pushkey(self, namespace, key, old, new):
344 def pushkey(self, namespace, key, old, new):
345 return self._repo.pushkey(namespace, key, old, new)
345 return self._repo.pushkey(namespace, key, old, new)
346
346
347 def stream_out(self):
347 def stream_out(self):
348 raise error.Abort(_(b'cannot perform stream clone against local peer'))
348 raise error.Abort(_(b'cannot perform stream clone against local peer'))
349
349
350 def unbundle(self, bundle, heads, url):
350 def unbundle(self, bundle, heads, url):
351 """apply a bundle on a repo
351 """apply a bundle on a repo
352
352
353 This function handles the repo locking itself."""
353 This function handles the repo locking itself."""
354 try:
354 try:
355 try:
355 try:
356 bundle = exchange.readbundle(self.ui, bundle, None)
356 bundle = exchange.readbundle(self.ui, bundle, None)
357 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
357 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
358 if util.safehasattr(ret, b'getchunks'):
358 if util.safehasattr(ret, b'getchunks'):
359 # This is a bundle20 object, turn it into an unbundler.
359 # This is a bundle20 object, turn it into an unbundler.
360 # This little dance should be dropped eventually when the
360 # This little dance should be dropped eventually when the
361 # API is finally improved.
361 # API is finally improved.
362 stream = util.chunkbuffer(ret.getchunks())
362 stream = util.chunkbuffer(ret.getchunks())
363 ret = bundle2.getunbundler(self.ui, stream)
363 ret = bundle2.getunbundler(self.ui, stream)
364 return ret
364 return ret
365 except Exception as exc:
365 except Exception as exc:
366 # If the exception contains output salvaged from a bundle2
366 # If the exception contains output salvaged from a bundle2
367 # reply, we need to make sure it is printed before continuing
367 # reply, we need to make sure it is printed before continuing
368 # to fail. So we build a bundle2 with such output and consume
368 # to fail. So we build a bundle2 with such output and consume
369 # it directly.
369 # it directly.
370 #
370 #
371 # This is not very elegant but allows a "simple" solution for
371 # This is not very elegant but allows a "simple" solution for
372 # issue4594
372 # issue4594
373 output = getattr(exc, '_bundle2salvagedoutput', ())
373 output = getattr(exc, '_bundle2salvagedoutput', ())
374 if output:
374 if output:
375 bundler = bundle2.bundle20(self._repo.ui)
375 bundler = bundle2.bundle20(self._repo.ui)
376 for out in output:
376 for out in output:
377 bundler.addpart(out)
377 bundler.addpart(out)
378 stream = util.chunkbuffer(bundler.getchunks())
378 stream = util.chunkbuffer(bundler.getchunks())
379 b = bundle2.getunbundler(self.ui, stream)
379 b = bundle2.getunbundler(self.ui, stream)
380 bundle2.processbundle(self._repo, b)
380 bundle2.processbundle(self._repo, b)
381 raise
381 raise
382 except error.PushRaced as exc:
382 except error.PushRaced as exc:
383 raise error.ResponseError(
383 raise error.ResponseError(
384 _(b'push failed:'), stringutil.forcebytestr(exc)
384 _(b'push failed:'), stringutil.forcebytestr(exc)
385 )
385 )
386
386
387 # End of _basewirecommands interface.
387 # End of _basewirecommands interface.
388
388
389 # Begin of peer interface.
389 # Begin of peer interface.
390
390
391 def commandexecutor(self):
391 def commandexecutor(self):
392 return localcommandexecutor(self)
392 return localcommandexecutor(self)
393
393
394 # End of peer interface.
394 # End of peer interface.
395
395
396
396
397 @interfaceutil.implementer(repository.ipeerlegacycommands)
397 @interfaceutil.implementer(repository.ipeerlegacycommands)
398 class locallegacypeer(localpeer):
398 class locallegacypeer(localpeer):
399 '''peer extension which implements legacy methods too; used for tests with
399 '''peer extension which implements legacy methods too; used for tests with
400 restricted capabilities'''
400 restricted capabilities'''
401
401
402 def __init__(self, repo):
402 def __init__(self, repo):
403 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
403 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
404
404
405 # Begin of baselegacywirecommands interface.
405 # Begin of baselegacywirecommands interface.
406
406
407 def between(self, pairs):
407 def between(self, pairs):
408 return self._repo.between(pairs)
408 return self._repo.between(pairs)
409
409
410 def branches(self, nodes):
410 def branches(self, nodes):
411 return self._repo.branches(nodes)
411 return self._repo.branches(nodes)
412
412
413 def changegroup(self, nodes, source):
413 def changegroup(self, nodes, source):
414 outgoing = discovery.outgoing(
414 outgoing = discovery.outgoing(
415 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
415 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
416 )
416 )
417 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
417 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
418
418
419 def changegroupsubset(self, bases, heads, source):
419 def changegroupsubset(self, bases, heads, source):
420 outgoing = discovery.outgoing(
420 outgoing = discovery.outgoing(
421 self._repo, missingroots=bases, ancestorsof=heads
421 self._repo, missingroots=bases, ancestorsof=heads
422 )
422 )
423 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
423 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
424
424
425 # End of baselegacywirecommands interface.
425 # End of baselegacywirecommands interface.
426
426
427
427
428 # Increment the sub-version when the revlog v2 format changes to lock out old
428 # Increment the sub-version when the revlog v2 format changes to lock out old
429 # clients.
429 # clients.
430 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
430 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
431
431
432 # A repository with the sparserevlog feature will have delta chains that
432 # A repository with the sparserevlog feature will have delta chains that
433 # can spread over a larger span. Sparse reading cuts these large spans into
433 # can spread over a larger span. Sparse reading cuts these large spans into
434 # pieces, so that each piece isn't too big.
434 # pieces, so that each piece isn't too big.
435 # Without the sparserevlog capability, reading from the repository could use
435 # Without the sparserevlog capability, reading from the repository could use
436 # huge amounts of memory, because the whole span would be read at once,
436 # huge amounts of memory, because the whole span would be read at once,
437 # including all the intermediate revisions that aren't pertinent for the chain.
437 # including all the intermediate revisions that aren't pertinent for the chain.
438 # This is why once a repository has enabled sparse-read, it becomes required.
438 # This is why once a repository has enabled sparse-read, it becomes required.
439 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
439 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
440
440
441 # A repository with the sidedataflag requirement will allow to store extra
441 # A repository with the sidedataflag requirement will allow to store extra
442 # information for revision without altering their original hashes.
442 # information for revision without altering their original hashes.
443 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
443 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
444
444
445 # A repository with the the copies-sidedata-changeset requirement will store
445 # A repository with the the copies-sidedata-changeset requirement will store
446 # copies related information in changeset's sidedata.
446 # copies related information in changeset's sidedata.
447 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
447 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
448
448
449 # The repository use persistent nodemap for the changelog and the manifest.
449 # The repository use persistent nodemap for the changelog and the manifest.
450 NODEMAP_REQUIREMENT = b'persistent-nodemap'
450 NODEMAP_REQUIREMENT = b'persistent-nodemap'
451
451
452 # Functions receiving (ui, features) that extensions can register to impact
452 # Functions receiving (ui, features) that extensions can register to impact
453 # the ability to load repositories with custom requirements. Only
453 # the ability to load repositories with custom requirements. Only
454 # functions defined in loaded extensions are called.
454 # functions defined in loaded extensions are called.
455 #
455 #
456 # The function receives a set of requirement strings that the repository
456 # The function receives a set of requirement strings that the repository
457 # is capable of opening. Functions will typically add elements to the
457 # is capable of opening. Functions will typically add elements to the
458 # set to reflect that the extension knows how to handle that requirements.
458 # set to reflect that the extension knows how to handle that requirements.
459 featuresetupfuncs = set()
459 featuresetupfuncs = set()
460
460
461
461
462 def _getsharedvfs(hgvfs, requirements):
462 def _getsharedvfs(hgvfs, requirements):
463 """ returns the vfs object pointing to root of shared source
463 """ returns the vfs object pointing to root of shared source
464 repo for a shared repository
464 repo for a shared repository
465
465
466 hgvfs is vfs pointing at .hg/ of current repo (shared one)
466 hgvfs is vfs pointing at .hg/ of current repo (shared one)
467 requirements is a set of requirements of current repo (shared one)
467 requirements is a set of requirements of current repo (shared one)
468 """
468 """
469 # The ``shared`` or ``relshared`` requirements indicate the
469 # The ``shared`` or ``relshared`` requirements indicate the
470 # store lives in the path contained in the ``.hg/sharedpath`` file.
470 # store lives in the path contained in the ``.hg/sharedpath`` file.
471 # This is an absolute path for ``shared`` and relative to
471 # This is an absolute path for ``shared`` and relative to
472 # ``.hg/`` for ``relshared``.
472 # ``.hg/`` for ``relshared``.
473 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
473 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
474 if b'relshared' in requirements:
474 if b'relshared' in requirements:
475 sharedpath = hgvfs.join(sharedpath)
475 sharedpath = hgvfs.join(sharedpath)
476
476
477 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
477 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
478
478
479 if not sharedvfs.exists():
479 if not sharedvfs.exists():
480 raise error.RepoError(
480 raise error.RepoError(
481 _(b'.hg/sharedpath points to nonexistent directory %s')
481 _(b'.hg/sharedpath points to nonexistent directory %s')
482 % sharedvfs.base
482 % sharedvfs.base
483 )
483 )
484 return sharedvfs
484 return sharedvfs
485
485
486
486
487 def _readrequires(vfs, allowmissing):
487 def _readrequires(vfs, allowmissing):
488 """ reads the require file present at root of this vfs
488 """ reads the require file present at root of this vfs
489 and return a set of requirements
489 and return a set of requirements
490
490
491 If allowmissing is True, we suppress ENOENT if raised"""
491 If allowmissing is True, we suppress ENOENT if raised"""
492 # requires file contains a newline-delimited list of
492 # requires file contains a newline-delimited list of
493 # features/capabilities the opener (us) must have in order to use
493 # features/capabilities the opener (us) must have in order to use
494 # the repository. This file was introduced in Mercurial 0.9.2,
494 # the repository. This file was introduced in Mercurial 0.9.2,
495 # which means very old repositories may not have one. We assume
495 # which means very old repositories may not have one. We assume
496 # a missing file translates to no requirements.
496 # a missing file translates to no requirements.
497 try:
497 try:
498 requirements = set(vfs.read(b'requires').splitlines())
498 requirements = set(vfs.read(b'requires').splitlines())
499 except IOError as e:
499 except IOError as e:
500 if not (allowmissing and e.errno == errno.ENOENT):
500 if not (allowmissing and e.errno == errno.ENOENT):
501 raise
501 raise
502 requirements = set()
502 requirements = set()
503 return requirements
503 return requirements
504
504
505
505
506 def makelocalrepository(baseui, path, intents=None):
506 def makelocalrepository(baseui, path, intents=None):
507 """Create a local repository object.
507 """Create a local repository object.
508
508
509 Given arguments needed to construct a local repository, this function
509 Given arguments needed to construct a local repository, this function
510 performs various early repository loading functionality (such as
510 performs various early repository loading functionality (such as
511 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
511 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
512 the repository can be opened, derives a type suitable for representing
512 the repository can be opened, derives a type suitable for representing
513 that repository, and returns an instance of it.
513 that repository, and returns an instance of it.
514
514
515 The returned object conforms to the ``repository.completelocalrepository``
515 The returned object conforms to the ``repository.completelocalrepository``
516 interface.
516 interface.
517
517
518 The repository type is derived by calling a series of factory functions
518 The repository type is derived by calling a series of factory functions
519 for each aspect/interface of the final repository. These are defined by
519 for each aspect/interface of the final repository. These are defined by
520 ``REPO_INTERFACES``.
520 ``REPO_INTERFACES``.
521
521
522 Each factory function is called to produce a type implementing a specific
522 Each factory function is called to produce a type implementing a specific
523 interface. The cumulative list of returned types will be combined into a
523 interface. The cumulative list of returned types will be combined into a
524 new type and that type will be instantiated to represent the local
524 new type and that type will be instantiated to represent the local
525 repository.
525 repository.
526
526
527 The factory functions each receive various state that may be consulted
527 The factory functions each receive various state that may be consulted
528 as part of deriving a type.
528 as part of deriving a type.
529
529
530 Extensions should wrap these factory functions to customize repository type
530 Extensions should wrap these factory functions to customize repository type
531 creation. Note that an extension's wrapped function may be called even if
531 creation. Note that an extension's wrapped function may be called even if
532 that extension is not loaded for the repo being constructed. Extensions
532 that extension is not loaded for the repo being constructed. Extensions
533 should check if their ``__name__`` appears in the
533 should check if their ``__name__`` appears in the
534 ``extensionmodulenames`` set passed to the factory function and no-op if
534 ``extensionmodulenames`` set passed to the factory function and no-op if
535 not.
535 not.
536 """
536 """
537 ui = baseui.copy()
537 ui = baseui.copy()
538 # Prevent copying repo configuration.
538 # Prevent copying repo configuration.
539 ui.copy = baseui.copy
539 ui.copy = baseui.copy
540
540
541 # Working directory VFS rooted at repository root.
541 # Working directory VFS rooted at repository root.
542 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
542 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
543
543
544 # Main VFS for .hg/ directory.
544 # Main VFS for .hg/ directory.
545 hgpath = wdirvfs.join(b'.hg')
545 hgpath = wdirvfs.join(b'.hg')
546 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
546 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
547 # Whether this repository is shared one or not
547 # Whether this repository is shared one or not
548 shared = False
548 shared = False
549 # If this repository is shared, vfs pointing to shared repo
549 # If this repository is shared, vfs pointing to shared repo
550 sharedvfs = None
550 sharedvfs = None
551
551
552 # The .hg/ path should exist and should be a directory. All other
552 # The .hg/ path should exist and should be a directory. All other
553 # cases are errors.
553 # cases are errors.
554 if not hgvfs.isdir():
554 if not hgvfs.isdir():
555 try:
555 try:
556 hgvfs.stat()
556 hgvfs.stat()
557 except OSError as e:
557 except OSError as e:
558 if e.errno != errno.ENOENT:
558 if e.errno != errno.ENOENT:
559 raise
559 raise
560 except ValueError as e:
560 except ValueError as e:
561 # Can be raised on Python 3.8 when path is invalid.
561 # Can be raised on Python 3.8 when path is invalid.
562 raise error.Abort(
562 raise error.Abort(
563 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
563 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
564 )
564 )
565
565
566 raise error.RepoError(_(b'repository %s not found') % path)
566 raise error.RepoError(_(b'repository %s not found') % path)
567
567
568 requirements = _readrequires(hgvfs, True)
568 requirements = _readrequires(hgvfs, True)
569
569
570 # The .hg/hgrc file may load extensions or contain config options
570 # The .hg/hgrc file may load extensions or contain config options
571 # that influence repository construction. Attempt to load it and
571 # that influence repository construction. Attempt to load it and
572 # process any new extensions that it may have pulled in.
572 # process any new extensions that it may have pulled in.
573 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
573 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
574 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
574 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
575 extensions.loadall(ui)
575 extensions.loadall(ui)
576 extensions.populateui(ui)
576 extensions.populateui(ui)
577
577
578 # Set of module names of extensions loaded for this repository.
578 # Set of module names of extensions loaded for this repository.
579 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
579 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
580
580
581 supportedrequirements = gathersupportedrequirements(ui)
581 supportedrequirements = gathersupportedrequirements(ui)
582
582
583 # We first validate the requirements are known.
583 # We first validate the requirements are known.
584 ensurerequirementsrecognized(requirements, supportedrequirements)
584 ensurerequirementsrecognized(requirements, supportedrequirements)
585
585
586 # Then we validate that the known set is reasonable to use together.
586 # Then we validate that the known set is reasonable to use together.
587 ensurerequirementscompatible(ui, requirements)
587 ensurerequirementscompatible(ui, requirements)
588
588
589 # TODO there are unhandled edge cases related to opening repositories with
589 # TODO there are unhandled edge cases related to opening repositories with
590 # shared storage. If storage is shared, we should also test for requirements
590 # shared storage. If storage is shared, we should also test for requirements
591 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
591 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
592 # that repo, as that repo may load extensions needed to open it. This is a
592 # that repo, as that repo may load extensions needed to open it. This is a
593 # bit complicated because we don't want the other hgrc to overwrite settings
593 # bit complicated because we don't want the other hgrc to overwrite settings
594 # in this hgrc.
594 # in this hgrc.
595 #
595 #
596 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
596 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
597 # file when sharing repos. But if a requirement is added after the share is
597 # file when sharing repos. But if a requirement is added after the share is
598 # performed, thereby introducing a new requirement for the opener, we may
598 # performed, thereby introducing a new requirement for the opener, we may
599 # will not see that and could encounter a run-time error interacting with
599 # will not see that and could encounter a run-time error interacting with
600 # that shared store since it has an unknown-to-us requirement.
600 # that shared store since it has an unknown-to-us requirement.
601
601
602 # At this point, we know we should be capable of opening the repository.
602 # At this point, we know we should be capable of opening the repository.
603 # Now get on with doing that.
603 # Now get on with doing that.
604
604
605 features = set()
605 features = set()
606
606
607 # The "store" part of the repository holds versioned data. How it is
607 # The "store" part of the repository holds versioned data. How it is
608 # accessed is determined by various requirements. If `shared` or
608 # accessed is determined by various requirements. If `shared` or
609 # `relshared` requirements are present, this indicates current repository
609 # `relshared` requirements are present, this indicates current repository
610 # is a share and store exists in path mentioned in `.hg/sharedpath`
610 # is a share and store exists in path mentioned in `.hg/sharedpath`
611 shared = b'shared' in requirements or b'relshared' in requirements
611 shared = b'shared' in requirements or b'relshared' in requirements
612 if shared:
612 if shared:
613 sharedvfs = _getsharedvfs(hgvfs, requirements)
613 sharedvfs = _getsharedvfs(hgvfs, requirements)
614 storebasepath = sharedvfs.base
614 storebasepath = sharedvfs.base
615 cachepath = sharedvfs.join(b'cache')
615 cachepath = sharedvfs.join(b'cache')
616 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
616 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
617 else:
617 else:
618 storebasepath = hgvfs.base
618 storebasepath = hgvfs.base
619 cachepath = hgvfs.join(b'cache')
619 cachepath = hgvfs.join(b'cache')
620 wcachepath = hgvfs.join(b'wcache')
620 wcachepath = hgvfs.join(b'wcache')
621
621
622 # The store has changed over time and the exact layout is dictated by
622 # The store has changed over time and the exact layout is dictated by
623 # requirements. The store interface abstracts differences across all
623 # requirements. The store interface abstracts differences across all
624 # of them.
624 # of them.
625 store = makestore(
625 store = makestore(
626 requirements,
626 requirements,
627 storebasepath,
627 storebasepath,
628 lambda base: vfsmod.vfs(base, cacheaudited=True),
628 lambda base: vfsmod.vfs(base, cacheaudited=True),
629 )
629 )
630 hgvfs.createmode = store.createmode
630 hgvfs.createmode = store.createmode
631
631
632 storevfs = store.vfs
632 storevfs = store.vfs
633 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
633 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
634
634
635 # The cache vfs is used to manage cache files.
635 # The cache vfs is used to manage cache files.
636 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
636 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
637 cachevfs.createmode = store.createmode
637 cachevfs.createmode = store.createmode
638 # The cache vfs is used to manage cache files related to the working copy
638 # The cache vfs is used to manage cache files related to the working copy
639 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
639 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
640 wcachevfs.createmode = store.createmode
640 wcachevfs.createmode = store.createmode
641
641
642 # Now resolve the type for the repository object. We do this by repeatedly
642 # Now resolve the type for the repository object. We do this by repeatedly
643 # calling a factory function to produces types for specific aspects of the
643 # calling a factory function to produces types for specific aspects of the
644 # repo's operation. The aggregate returned types are used as base classes
644 # repo's operation. The aggregate returned types are used as base classes
645 # for a dynamically-derived type, which will represent our new repository.
645 # for a dynamically-derived type, which will represent our new repository.
646
646
647 bases = []
647 bases = []
648 extrastate = {}
648 extrastate = {}
649
649
650 for iface, fn in REPO_INTERFACES:
650 for iface, fn in REPO_INTERFACES:
651 # We pass all potentially useful state to give extensions tons of
651 # We pass all potentially useful state to give extensions tons of
652 # flexibility.
652 # flexibility.
653 typ = fn()(
653 typ = fn()(
654 ui=ui,
654 ui=ui,
655 intents=intents,
655 intents=intents,
656 requirements=requirements,
656 requirements=requirements,
657 features=features,
657 features=features,
658 wdirvfs=wdirvfs,
658 wdirvfs=wdirvfs,
659 hgvfs=hgvfs,
659 hgvfs=hgvfs,
660 store=store,
660 store=store,
661 storevfs=storevfs,
661 storevfs=storevfs,
662 storeoptions=storevfs.options,
662 storeoptions=storevfs.options,
663 cachevfs=cachevfs,
663 cachevfs=cachevfs,
664 wcachevfs=wcachevfs,
664 wcachevfs=wcachevfs,
665 extensionmodulenames=extensionmodulenames,
665 extensionmodulenames=extensionmodulenames,
666 extrastate=extrastate,
666 extrastate=extrastate,
667 baseclasses=bases,
667 baseclasses=bases,
668 )
668 )
669
669
670 if not isinstance(typ, type):
670 if not isinstance(typ, type):
671 raise error.ProgrammingError(
671 raise error.ProgrammingError(
672 b'unable to construct type for %s' % iface
672 b'unable to construct type for %s' % iface
673 )
673 )
674
674
675 bases.append(typ)
675 bases.append(typ)
676
676
677 # type() allows you to use characters in type names that wouldn't be
677 # type() allows you to use characters in type names that wouldn't be
678 # recognized as Python symbols in source code. We abuse that to add
678 # recognized as Python symbols in source code. We abuse that to add
679 # rich information about our constructed repo.
679 # rich information about our constructed repo.
680 name = pycompat.sysstr(
680 name = pycompat.sysstr(
681 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
681 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
682 )
682 )
683
683
684 cls = type(name, tuple(bases), {})
684 cls = type(name, tuple(bases), {})
685
685
686 return cls(
686 return cls(
687 baseui=baseui,
687 baseui=baseui,
688 ui=ui,
688 ui=ui,
689 origroot=path,
689 origroot=path,
690 wdirvfs=wdirvfs,
690 wdirvfs=wdirvfs,
691 hgvfs=hgvfs,
691 hgvfs=hgvfs,
692 requirements=requirements,
692 requirements=requirements,
693 supportedrequirements=supportedrequirements,
693 supportedrequirements=supportedrequirements,
694 sharedpath=storebasepath,
694 sharedpath=storebasepath,
695 store=store,
695 store=store,
696 cachevfs=cachevfs,
696 cachevfs=cachevfs,
697 wcachevfs=wcachevfs,
697 wcachevfs=wcachevfs,
698 features=features,
698 features=features,
699 intents=intents,
699 intents=intents,
700 )
700 )
701
701
702
702
703 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
703 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
704 """Load hgrc files/content into a ui instance.
704 """Load hgrc files/content into a ui instance.
705
705
706 This is called during repository opening to load any additional
706 This is called during repository opening to load any additional
707 config files or settings relevant to the current repository.
707 config files or settings relevant to the current repository.
708
708
709 Returns a bool indicating whether any additional configs were loaded.
709 Returns a bool indicating whether any additional configs were loaded.
710
710
711 Extensions should monkeypatch this function to modify how per-repo
711 Extensions should monkeypatch this function to modify how per-repo
712 configs are loaded. For example, an extension may wish to pull in
712 configs are loaded. For example, an extension may wish to pull in
713 configs from alternate files or sources.
713 configs from alternate files or sources.
714 """
714 """
715 if not rcutil.use_repo_hgrc():
715 if not rcutil.use_repo_hgrc():
716 return False
716 return False
717 try:
717 try:
718 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
718 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
719 return True
719 return True
720 except IOError:
720 except IOError:
721 return False
721 return False
722
722
723
723
724 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
724 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
725 """Perform additional actions after .hg/hgrc is loaded.
725 """Perform additional actions after .hg/hgrc is loaded.
726
726
727 This function is called during repository loading immediately after
727 This function is called during repository loading immediately after
728 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
728 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
729
729
730 The function can be used to validate configs, automatically add
730 The function can be used to validate configs, automatically add
731 options (including extensions) based on requirements, etc.
731 options (including extensions) based on requirements, etc.
732 """
732 """
733
733
734 # Map of requirements to list of extensions to load automatically when
734 # Map of requirements to list of extensions to load automatically when
735 # requirement is present.
735 # requirement is present.
736 autoextensions = {
736 autoextensions = {
737 b'git': [b'git'],
737 b'git': [b'git'],
738 b'largefiles': [b'largefiles'],
738 b'largefiles': [b'largefiles'],
739 b'lfs': [b'lfs'],
739 b'lfs': [b'lfs'],
740 }
740 }
741
741
742 for requirement, names in sorted(autoextensions.items()):
742 for requirement, names in sorted(autoextensions.items()):
743 if requirement not in requirements:
743 if requirement not in requirements:
744 continue
744 continue
745
745
746 for name in names:
746 for name in names:
747 if not ui.hasconfig(b'extensions', name):
747 if not ui.hasconfig(b'extensions', name):
748 ui.setconfig(b'extensions', name, b'', source=b'autoload')
748 ui.setconfig(b'extensions', name, b'', source=b'autoload')
749
749
750
750
751 def gathersupportedrequirements(ui):
751 def gathersupportedrequirements(ui):
752 """Determine the complete set of recognized requirements."""
752 """Determine the complete set of recognized requirements."""
753 # Start with all requirements supported by this file.
753 # Start with all requirements supported by this file.
754 supported = set(localrepository._basesupported)
754 supported = set(localrepository._basesupported)
755
755
756 # Execute ``featuresetupfuncs`` entries if they belong to an extension
756 # Execute ``featuresetupfuncs`` entries if they belong to an extension
757 # relevant to this ui instance.
757 # relevant to this ui instance.
758 modules = {m.__name__ for n, m in extensions.extensions(ui)}
758 modules = {m.__name__ for n, m in extensions.extensions(ui)}
759
759
760 for fn in featuresetupfuncs:
760 for fn in featuresetupfuncs:
761 if fn.__module__ in modules:
761 if fn.__module__ in modules:
762 fn(ui, supported)
762 fn(ui, supported)
763
763
764 # Add derived requirements from registered compression engines.
764 # Add derived requirements from registered compression engines.
765 for name in util.compengines:
765 for name in util.compengines:
766 engine = util.compengines[name]
766 engine = util.compengines[name]
767 if engine.available() and engine.revlogheader():
767 if engine.available() and engine.revlogheader():
768 supported.add(b'exp-compression-%s' % name)
768 supported.add(b'exp-compression-%s' % name)
769 if engine.name() == b'zstd':
769 if engine.name() == b'zstd':
770 supported.add(b'revlog-compression-zstd')
770 supported.add(b'revlog-compression-zstd')
771
771
772 return supported
772 return supported
773
773
774
774
775 def ensurerequirementsrecognized(requirements, supported):
775 def ensurerequirementsrecognized(requirements, supported):
776 """Validate that a set of local requirements is recognized.
776 """Validate that a set of local requirements is recognized.
777
777
778 Receives a set of requirements. Raises an ``error.RepoError`` if there
778 Receives a set of requirements. Raises an ``error.RepoError`` if there
779 exists any requirement in that set that currently loaded code doesn't
779 exists any requirement in that set that currently loaded code doesn't
780 recognize.
780 recognize.
781
781
782 Returns a set of supported requirements.
782 Returns a set of supported requirements.
783 """
783 """
784 missing = set()
784 missing = set()
785
785
786 for requirement in requirements:
786 for requirement in requirements:
787 if requirement in supported:
787 if requirement in supported:
788 continue
788 continue
789
789
790 if not requirement or not requirement[0:1].isalnum():
790 if not requirement or not requirement[0:1].isalnum():
791 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
791 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
792
792
793 missing.add(requirement)
793 missing.add(requirement)
794
794
795 if missing:
795 if missing:
796 raise error.RequirementError(
796 raise error.RequirementError(
797 _(b'repository requires features unknown to this Mercurial: %s')
797 _(b'repository requires features unknown to this Mercurial: %s')
798 % b' '.join(sorted(missing)),
798 % b' '.join(sorted(missing)),
799 hint=_(
799 hint=_(
800 b'see https://mercurial-scm.org/wiki/MissingRequirement '
800 b'see https://mercurial-scm.org/wiki/MissingRequirement '
801 b'for more information'
801 b'for more information'
802 ),
802 ),
803 )
803 )
804
804
805
805
806 def ensurerequirementscompatible(ui, requirements):
806 def ensurerequirementscompatible(ui, requirements):
807 """Validates that a set of recognized requirements is mutually compatible.
807 """Validates that a set of recognized requirements is mutually compatible.
808
808
809 Some requirements may not be compatible with others or require
809 Some requirements may not be compatible with others or require
810 config options that aren't enabled. This function is called during
810 config options that aren't enabled. This function is called during
811 repository opening to ensure that the set of requirements needed
811 repository opening to ensure that the set of requirements needed
812 to open a repository is sane and compatible with config options.
812 to open a repository is sane and compatible with config options.
813
813
814 Extensions can monkeypatch this function to perform additional
814 Extensions can monkeypatch this function to perform additional
815 checking.
815 checking.
816
816
817 ``error.RepoError`` should be raised on failure.
817 ``error.RepoError`` should be raised on failure.
818 """
818 """
819 if repository.SPARSE_REQUIREMENT in requirements and not sparse.enabled:
819 if repository.SPARSE_REQUIREMENT in requirements and not sparse.enabled:
820 raise error.RepoError(
820 raise error.RepoError(
821 _(
821 _(
822 b'repository is using sparse feature but '
822 b'repository is using sparse feature but '
823 b'sparse is not enabled; enable the '
823 b'sparse is not enabled; enable the '
824 b'"sparse" extensions to access'
824 b'"sparse" extensions to access'
825 )
825 )
826 )
826 )
827
827
828
828
829 def makestore(requirements, path, vfstype):
829 def makestore(requirements, path, vfstype):
830 """Construct a storage object for a repository."""
830 """Construct a storage object for a repository."""
831 if b'store' in requirements:
831 if b'store' in requirements:
832 if b'fncache' in requirements:
832 if b'fncache' in requirements:
833 return storemod.fncachestore(
833 return storemod.fncachestore(
834 path, vfstype, b'dotencode' in requirements
834 path, vfstype, b'dotencode' in requirements
835 )
835 )
836
836
837 return storemod.encodedstore(path, vfstype)
837 return storemod.encodedstore(path, vfstype)
838
838
839 return storemod.basicstore(path, vfstype)
839 return storemod.basicstore(path, vfstype)
840
840
841
841
842 def resolvestorevfsoptions(ui, requirements, features):
842 def resolvestorevfsoptions(ui, requirements, features):
843 """Resolve the options to pass to the store vfs opener.
843 """Resolve the options to pass to the store vfs opener.
844
844
845 The returned dict is used to influence behavior of the storage layer.
845 The returned dict is used to influence behavior of the storage layer.
846 """
846 """
847 options = {}
847 options = {}
848
848
849 if b'treemanifest' in requirements:
849 if b'treemanifest' in requirements:
850 options[b'treemanifest'] = True
850 options[b'treemanifest'] = True
851
851
852 # experimental config: format.manifestcachesize
852 # experimental config: format.manifestcachesize
853 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
853 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
854 if manifestcachesize is not None:
854 if manifestcachesize is not None:
855 options[b'manifestcachesize'] = manifestcachesize
855 options[b'manifestcachesize'] = manifestcachesize
856
856
857 # In the absence of another requirement superseding a revlog-related
857 # In the absence of another requirement superseding a revlog-related
858 # requirement, we have to assume the repo is using revlog version 0.
858 # requirement, we have to assume the repo is using revlog version 0.
859 # This revlog format is super old and we don't bother trying to parse
859 # This revlog format is super old and we don't bother trying to parse
860 # opener options for it because those options wouldn't do anything
860 # opener options for it because those options wouldn't do anything
861 # meaningful on such old repos.
861 # meaningful on such old repos.
862 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
862 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
863 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
863 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
864 else: # explicitly mark repo as using revlogv0
864 else: # explicitly mark repo as using revlogv0
865 options[b'revlogv0'] = True
865 options[b'revlogv0'] = True
866
866
867 if COPIESSDC_REQUIREMENT in requirements:
867 if COPIESSDC_REQUIREMENT in requirements:
868 options[b'copies-storage'] = b'changeset-sidedata'
868 options[b'copies-storage'] = b'changeset-sidedata'
869 else:
869 else:
870 writecopiesto = ui.config(b'experimental', b'copies.write-to')
870 writecopiesto = ui.config(b'experimental', b'copies.write-to')
871 copiesextramode = (b'changeset-only', b'compatibility')
871 copiesextramode = (b'changeset-only', b'compatibility')
872 if writecopiesto in copiesextramode:
872 if writecopiesto in copiesextramode:
873 options[b'copies-storage'] = b'extra'
873 options[b'copies-storage'] = b'extra'
874
874
875 return options
875 return options
876
876
877
877
878 def resolverevlogstorevfsoptions(ui, requirements, features):
878 def resolverevlogstorevfsoptions(ui, requirements, features):
879 """Resolve opener options specific to revlogs."""
879 """Resolve opener options specific to revlogs."""
880
880
881 options = {}
881 options = {}
882 options[b'flagprocessors'] = {}
882 options[b'flagprocessors'] = {}
883
883
884 if b'revlogv1' in requirements:
884 if b'revlogv1' in requirements:
885 options[b'revlogv1'] = True
885 options[b'revlogv1'] = True
886 if REVLOGV2_REQUIREMENT in requirements:
886 if REVLOGV2_REQUIREMENT in requirements:
887 options[b'revlogv2'] = True
887 options[b'revlogv2'] = True
888
888
889 if b'generaldelta' in requirements:
889 if b'generaldelta' in requirements:
890 options[b'generaldelta'] = True
890 options[b'generaldelta'] = True
891
891
892 # experimental config: format.chunkcachesize
892 # experimental config: format.chunkcachesize
893 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
893 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
894 if chunkcachesize is not None:
894 if chunkcachesize is not None:
895 options[b'chunkcachesize'] = chunkcachesize
895 options[b'chunkcachesize'] = chunkcachesize
896
896
897 deltabothparents = ui.configbool(
897 deltabothparents = ui.configbool(
898 b'storage', b'revlog.optimize-delta-parent-choice'
898 b'storage', b'revlog.optimize-delta-parent-choice'
899 )
899 )
900 options[b'deltabothparents'] = deltabothparents
900 options[b'deltabothparents'] = deltabothparents
901
901
902 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
902 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
903 lazydeltabase = False
903 lazydeltabase = False
904 if lazydelta:
904 if lazydelta:
905 lazydeltabase = ui.configbool(
905 lazydeltabase = ui.configbool(
906 b'storage', b'revlog.reuse-external-delta-parent'
906 b'storage', b'revlog.reuse-external-delta-parent'
907 )
907 )
908 if lazydeltabase is None:
908 if lazydeltabase is None:
909 lazydeltabase = not scmutil.gddeltaconfig(ui)
909 lazydeltabase = not scmutil.gddeltaconfig(ui)
910 options[b'lazydelta'] = lazydelta
910 options[b'lazydelta'] = lazydelta
911 options[b'lazydeltabase'] = lazydeltabase
911 options[b'lazydeltabase'] = lazydeltabase
912
912
913 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
913 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
914 if 0 <= chainspan:
914 if 0 <= chainspan:
915 options[b'maxdeltachainspan'] = chainspan
915 options[b'maxdeltachainspan'] = chainspan
916
916
917 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
917 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
918 if mmapindexthreshold is not None:
918 if mmapindexthreshold is not None:
919 options[b'mmapindexthreshold'] = mmapindexthreshold
919 options[b'mmapindexthreshold'] = mmapindexthreshold
920
920
921 withsparseread = ui.configbool(b'experimental', b'sparse-read')
921 withsparseread = ui.configbool(b'experimental', b'sparse-read')
922 srdensitythres = float(
922 srdensitythres = float(
923 ui.config(b'experimental', b'sparse-read.density-threshold')
923 ui.config(b'experimental', b'sparse-read.density-threshold')
924 )
924 )
925 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
925 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
926 options[b'with-sparse-read'] = withsparseread
926 options[b'with-sparse-read'] = withsparseread
927 options[b'sparse-read-density-threshold'] = srdensitythres
927 options[b'sparse-read-density-threshold'] = srdensitythres
928 options[b'sparse-read-min-gap-size'] = srmingapsize
928 options[b'sparse-read-min-gap-size'] = srmingapsize
929
929
930 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
930 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
931 options[b'sparse-revlog'] = sparserevlog
931 options[b'sparse-revlog'] = sparserevlog
932 if sparserevlog:
932 if sparserevlog:
933 options[b'generaldelta'] = True
933 options[b'generaldelta'] = True
934
934
935 sidedata = SIDEDATA_REQUIREMENT in requirements
935 sidedata = SIDEDATA_REQUIREMENT in requirements
936 options[b'side-data'] = sidedata
936 options[b'side-data'] = sidedata
937
937
938 maxchainlen = None
938 maxchainlen = None
939 if sparserevlog:
939 if sparserevlog:
940 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
940 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
941 # experimental config: format.maxchainlen
941 # experimental config: format.maxchainlen
942 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
942 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
943 if maxchainlen is not None:
943 if maxchainlen is not None:
944 options[b'maxchainlen'] = maxchainlen
944 options[b'maxchainlen'] = maxchainlen
945
945
946 for r in requirements:
946 for r in requirements:
947 # we allow multiple compression engine requirement to co-exist because
947 # we allow multiple compression engine requirement to co-exist because
948 # strickly speaking, revlog seems to support mixed compression style.
948 # strickly speaking, revlog seems to support mixed compression style.
949 #
949 #
950 # The compression used for new entries will be "the last one"
950 # The compression used for new entries will be "the last one"
951 prefix = r.startswith
951 prefix = r.startswith
952 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
952 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
953 options[b'compengine'] = r.split(b'-', 2)[2]
953 options[b'compengine'] = r.split(b'-', 2)[2]
954
954
955 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
955 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
956 if options[b'zlib.level'] is not None:
956 if options[b'zlib.level'] is not None:
957 if not (0 <= options[b'zlib.level'] <= 9):
957 if not (0 <= options[b'zlib.level'] <= 9):
958 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
958 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
959 raise error.Abort(msg % options[b'zlib.level'])
959 raise error.Abort(msg % options[b'zlib.level'])
960 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
960 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
961 if options[b'zstd.level'] is not None:
961 if options[b'zstd.level'] is not None:
962 if not (0 <= options[b'zstd.level'] <= 22):
962 if not (0 <= options[b'zstd.level'] <= 22):
963 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
963 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
964 raise error.Abort(msg % options[b'zstd.level'])
964 raise error.Abort(msg % options[b'zstd.level'])
965
965
966 if repository.NARROW_REQUIREMENT in requirements:
966 if repository.NARROW_REQUIREMENT in requirements:
967 options[b'enableellipsis'] = True
967 options[b'enableellipsis'] = True
968
968
969 if ui.configbool(b'experimental', b'rust.index'):
969 if ui.configbool(b'experimental', b'rust.index'):
970 options[b'rust.index'] = True
970 options[b'rust.index'] = True
971 if NODEMAP_REQUIREMENT in requirements:
971 if NODEMAP_REQUIREMENT in requirements:
972 options[b'persistent-nodemap'] = True
972 options[b'persistent-nodemap'] = True
973 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
973 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
974 options[b'persistent-nodemap.mmap'] = True
974 options[b'persistent-nodemap.mmap'] = True
975 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
975 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
976 options[b'persistent-nodemap.mode'] = epnm
976 options[b'persistent-nodemap.mode'] = epnm
977 if ui.configbool(b'devel', b'persistent-nodemap'):
977 if ui.configbool(b'devel', b'persistent-nodemap'):
978 options[b'devel-force-nodemap'] = True
978 options[b'devel-force-nodemap'] = True
979
979
980 return options
980 return options
981
981
982
982
983 def makemain(**kwargs):
983 def makemain(**kwargs):
984 """Produce a type conforming to ``ilocalrepositorymain``."""
984 """Produce a type conforming to ``ilocalrepositorymain``."""
985 return localrepository
985 return localrepository
986
986
987
987
988 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
988 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
989 class revlogfilestorage(object):
989 class revlogfilestorage(object):
990 """File storage when using revlogs."""
990 """File storage when using revlogs."""
991
991
992 def file(self, path):
992 def file(self, path):
993 if path[0] == b'/':
993 if path[0] == b'/':
994 path = path[1:]
994 path = path[1:]
995
995
996 return filelog.filelog(self.svfs, path)
996 return filelog.filelog(self.svfs, path)
997
997
998
998
999 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
999 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1000 class revlognarrowfilestorage(object):
1000 class revlognarrowfilestorage(object):
1001 """File storage when using revlogs and narrow files."""
1001 """File storage when using revlogs and narrow files."""
1002
1002
1003 def file(self, path):
1003 def file(self, path):
1004 if path[0] == b'/':
1004 if path[0] == b'/':
1005 path = path[1:]
1005 path = path[1:]
1006
1006
1007 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1007 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1008
1008
1009
1009
1010 def makefilestorage(requirements, features, **kwargs):
1010 def makefilestorage(requirements, features, **kwargs):
1011 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1011 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1012 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1012 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1013 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1013 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1014
1014
1015 if repository.NARROW_REQUIREMENT in requirements:
1015 if repository.NARROW_REQUIREMENT in requirements:
1016 return revlognarrowfilestorage
1016 return revlognarrowfilestorage
1017 else:
1017 else:
1018 return revlogfilestorage
1018 return revlogfilestorage
1019
1019
1020
1020
1021 # List of repository interfaces and factory functions for them. Each
1021 # List of repository interfaces and factory functions for them. Each
1022 # will be called in order during ``makelocalrepository()`` to iteratively
1022 # will be called in order during ``makelocalrepository()`` to iteratively
1023 # derive the final type for a local repository instance. We capture the
1023 # derive the final type for a local repository instance. We capture the
1024 # function as a lambda so we don't hold a reference and the module-level
1024 # function as a lambda so we don't hold a reference and the module-level
1025 # functions can be wrapped.
1025 # functions can be wrapped.
1026 REPO_INTERFACES = [
1026 REPO_INTERFACES = [
1027 (repository.ilocalrepositorymain, lambda: makemain),
1027 (repository.ilocalrepositorymain, lambda: makemain),
1028 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1028 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1029 ]
1029 ]
1030
1030
1031
1031
1032 @interfaceutil.implementer(repository.ilocalrepositorymain)
1032 @interfaceutil.implementer(repository.ilocalrepositorymain)
1033 class localrepository(object):
1033 class localrepository(object):
1034 """Main class for representing local repositories.
1034 """Main class for representing local repositories.
1035
1035
1036 All local repositories are instances of this class.
1036 All local repositories are instances of this class.
1037
1037
1038 Constructed on its own, instances of this class are not usable as
1038 Constructed on its own, instances of this class are not usable as
1039 repository objects. To obtain a usable repository object, call
1039 repository objects. To obtain a usable repository object, call
1040 ``hg.repository()``, ``localrepo.instance()``, or
1040 ``hg.repository()``, ``localrepo.instance()``, or
1041 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1041 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1042 ``instance()`` adds support for creating new repositories.
1042 ``instance()`` adds support for creating new repositories.
1043 ``hg.repository()`` adds more extension integration, including calling
1043 ``hg.repository()`` adds more extension integration, including calling
1044 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1044 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1045 used.
1045 used.
1046 """
1046 """
1047
1047
1048 # obsolete experimental requirements:
1048 # obsolete experimental requirements:
1049 # - manifestv2: An experimental new manifest format that allowed
1049 # - manifestv2: An experimental new manifest format that allowed
1050 # for stem compression of long paths. Experiment ended up not
1050 # for stem compression of long paths. Experiment ended up not
1051 # being successful (repository sizes went up due to worse delta
1051 # being successful (repository sizes went up due to worse delta
1052 # chains), and the code was deleted in 4.6.
1052 # chains), and the code was deleted in 4.6.
1053 supportedformats = {
1053 supportedformats = {
1054 b'revlogv1',
1054 b'revlogv1',
1055 b'generaldelta',
1055 b'generaldelta',
1056 b'treemanifest',
1056 b'treemanifest',
1057 COPIESSDC_REQUIREMENT,
1057 COPIESSDC_REQUIREMENT,
1058 REVLOGV2_REQUIREMENT,
1058 REVLOGV2_REQUIREMENT,
1059 SIDEDATA_REQUIREMENT,
1059 SIDEDATA_REQUIREMENT,
1060 SPARSEREVLOG_REQUIREMENT,
1060 SPARSEREVLOG_REQUIREMENT,
1061 NODEMAP_REQUIREMENT,
1061 NODEMAP_REQUIREMENT,
1062 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1062 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1063 }
1063 }
1064 _basesupported = supportedformats | {
1064 _basesupported = supportedformats | {
1065 b'store',
1065 b'store',
1066 b'fncache',
1066 b'fncache',
1067 b'shared',
1067 b'shared',
1068 b'relshared',
1068 b'relshared',
1069 b'dotencode',
1069 b'dotencode',
1070 repository.SPARSE_REQUIREMENT,
1070 repository.SPARSE_REQUIREMENT,
1071 b'internal-phase',
1071 repository.INTERNAL_PHASE_REQUIREMENT,
1072 }
1072 }
1073
1073
1074 # list of prefix for file which can be written without 'wlock'
1074 # list of prefix for file which can be written without 'wlock'
1075 # Extensions should extend this list when needed
1075 # Extensions should extend this list when needed
1076 _wlockfreeprefix = {
1076 _wlockfreeprefix = {
1077 # We migh consider requiring 'wlock' for the next
1077 # We migh consider requiring 'wlock' for the next
1078 # two, but pretty much all the existing code assume
1078 # two, but pretty much all the existing code assume
1079 # wlock is not needed so we keep them excluded for
1079 # wlock is not needed so we keep them excluded for
1080 # now.
1080 # now.
1081 b'hgrc',
1081 b'hgrc',
1082 b'requires',
1082 b'requires',
1083 # XXX cache is a complicatged business someone
1083 # XXX cache is a complicatged business someone
1084 # should investigate this in depth at some point
1084 # should investigate this in depth at some point
1085 b'cache/',
1085 b'cache/',
1086 # XXX shouldn't be dirstate covered by the wlock?
1086 # XXX shouldn't be dirstate covered by the wlock?
1087 b'dirstate',
1087 b'dirstate',
1088 # XXX bisect was still a bit too messy at the time
1088 # XXX bisect was still a bit too messy at the time
1089 # this changeset was introduced. Someone should fix
1089 # this changeset was introduced. Someone should fix
1090 # the remainig bit and drop this line
1090 # the remainig bit and drop this line
1091 b'bisect.state',
1091 b'bisect.state',
1092 }
1092 }
1093
1093
1094 def __init__(
1094 def __init__(
1095 self,
1095 self,
1096 baseui,
1096 baseui,
1097 ui,
1097 ui,
1098 origroot,
1098 origroot,
1099 wdirvfs,
1099 wdirvfs,
1100 hgvfs,
1100 hgvfs,
1101 requirements,
1101 requirements,
1102 supportedrequirements,
1102 supportedrequirements,
1103 sharedpath,
1103 sharedpath,
1104 store,
1104 store,
1105 cachevfs,
1105 cachevfs,
1106 wcachevfs,
1106 wcachevfs,
1107 features,
1107 features,
1108 intents=None,
1108 intents=None,
1109 ):
1109 ):
1110 """Create a new local repository instance.
1110 """Create a new local repository instance.
1111
1111
1112 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1112 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1113 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1113 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1114 object.
1114 object.
1115
1115
1116 Arguments:
1116 Arguments:
1117
1117
1118 baseui
1118 baseui
1119 ``ui.ui`` instance that ``ui`` argument was based off of.
1119 ``ui.ui`` instance that ``ui`` argument was based off of.
1120
1120
1121 ui
1121 ui
1122 ``ui.ui`` instance for use by the repository.
1122 ``ui.ui`` instance for use by the repository.
1123
1123
1124 origroot
1124 origroot
1125 ``bytes`` path to working directory root of this repository.
1125 ``bytes`` path to working directory root of this repository.
1126
1126
1127 wdirvfs
1127 wdirvfs
1128 ``vfs.vfs`` rooted at the working directory.
1128 ``vfs.vfs`` rooted at the working directory.
1129
1129
1130 hgvfs
1130 hgvfs
1131 ``vfs.vfs`` rooted at .hg/
1131 ``vfs.vfs`` rooted at .hg/
1132
1132
1133 requirements
1133 requirements
1134 ``set`` of bytestrings representing repository opening requirements.
1134 ``set`` of bytestrings representing repository opening requirements.
1135
1135
1136 supportedrequirements
1136 supportedrequirements
1137 ``set`` of bytestrings representing repository requirements that we
1137 ``set`` of bytestrings representing repository requirements that we
1138 know how to open. May be a supetset of ``requirements``.
1138 know how to open. May be a supetset of ``requirements``.
1139
1139
1140 sharedpath
1140 sharedpath
1141 ``bytes`` Defining path to storage base directory. Points to a
1141 ``bytes`` Defining path to storage base directory. Points to a
1142 ``.hg/`` directory somewhere.
1142 ``.hg/`` directory somewhere.
1143
1143
1144 store
1144 store
1145 ``store.basicstore`` (or derived) instance providing access to
1145 ``store.basicstore`` (or derived) instance providing access to
1146 versioned storage.
1146 versioned storage.
1147
1147
1148 cachevfs
1148 cachevfs
1149 ``vfs.vfs`` used for cache files.
1149 ``vfs.vfs`` used for cache files.
1150
1150
1151 wcachevfs
1151 wcachevfs
1152 ``vfs.vfs`` used for cache files related to the working copy.
1152 ``vfs.vfs`` used for cache files related to the working copy.
1153
1153
1154 features
1154 features
1155 ``set`` of bytestrings defining features/capabilities of this
1155 ``set`` of bytestrings defining features/capabilities of this
1156 instance.
1156 instance.
1157
1157
1158 intents
1158 intents
1159 ``set`` of system strings indicating what this repo will be used
1159 ``set`` of system strings indicating what this repo will be used
1160 for.
1160 for.
1161 """
1161 """
1162 self.baseui = baseui
1162 self.baseui = baseui
1163 self.ui = ui
1163 self.ui = ui
1164 self.origroot = origroot
1164 self.origroot = origroot
1165 # vfs rooted at working directory.
1165 # vfs rooted at working directory.
1166 self.wvfs = wdirvfs
1166 self.wvfs = wdirvfs
1167 self.root = wdirvfs.base
1167 self.root = wdirvfs.base
1168 # vfs rooted at .hg/. Used to access most non-store paths.
1168 # vfs rooted at .hg/. Used to access most non-store paths.
1169 self.vfs = hgvfs
1169 self.vfs = hgvfs
1170 self.path = hgvfs.base
1170 self.path = hgvfs.base
1171 self.requirements = requirements
1171 self.requirements = requirements
1172 self.supported = supportedrequirements
1172 self.supported = supportedrequirements
1173 self.sharedpath = sharedpath
1173 self.sharedpath = sharedpath
1174 self.store = store
1174 self.store = store
1175 self.cachevfs = cachevfs
1175 self.cachevfs = cachevfs
1176 self.wcachevfs = wcachevfs
1176 self.wcachevfs = wcachevfs
1177 self.features = features
1177 self.features = features
1178
1178
1179 self.filtername = None
1179 self.filtername = None
1180
1180
1181 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1181 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1182 b'devel', b'check-locks'
1182 b'devel', b'check-locks'
1183 ):
1183 ):
1184 self.vfs.audit = self._getvfsward(self.vfs.audit)
1184 self.vfs.audit = self._getvfsward(self.vfs.audit)
1185 # A list of callback to shape the phase if no data were found.
1185 # A list of callback to shape the phase if no data were found.
1186 # Callback are in the form: func(repo, roots) --> processed root.
1186 # Callback are in the form: func(repo, roots) --> processed root.
1187 # This list it to be filled by extension during repo setup
1187 # This list it to be filled by extension during repo setup
1188 self._phasedefaults = []
1188 self._phasedefaults = []
1189
1189
1190 color.setup(self.ui)
1190 color.setup(self.ui)
1191
1191
1192 self.spath = self.store.path
1192 self.spath = self.store.path
1193 self.svfs = self.store.vfs
1193 self.svfs = self.store.vfs
1194 self.sjoin = self.store.join
1194 self.sjoin = self.store.join
1195 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1195 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1196 b'devel', b'check-locks'
1196 b'devel', b'check-locks'
1197 ):
1197 ):
1198 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1198 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1199 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1199 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1200 else: # standard vfs
1200 else: # standard vfs
1201 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1201 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1202
1202
1203 self._dirstatevalidatewarned = False
1203 self._dirstatevalidatewarned = False
1204
1204
1205 self._branchcaches = branchmap.BranchMapCache()
1205 self._branchcaches = branchmap.BranchMapCache()
1206 self._revbranchcache = None
1206 self._revbranchcache = None
1207 self._filterpats = {}
1207 self._filterpats = {}
1208 self._datafilters = {}
1208 self._datafilters = {}
1209 self._transref = self._lockref = self._wlockref = None
1209 self._transref = self._lockref = self._wlockref = None
1210
1210
1211 # A cache for various files under .hg/ that tracks file changes,
1211 # A cache for various files under .hg/ that tracks file changes,
1212 # (used by the filecache decorator)
1212 # (used by the filecache decorator)
1213 #
1213 #
1214 # Maps a property name to its util.filecacheentry
1214 # Maps a property name to its util.filecacheentry
1215 self._filecache = {}
1215 self._filecache = {}
1216
1216
1217 # hold sets of revision to be filtered
1217 # hold sets of revision to be filtered
1218 # should be cleared when something might have changed the filter value:
1218 # should be cleared when something might have changed the filter value:
1219 # - new changesets,
1219 # - new changesets,
1220 # - phase change,
1220 # - phase change,
1221 # - new obsolescence marker,
1221 # - new obsolescence marker,
1222 # - working directory parent change,
1222 # - working directory parent change,
1223 # - bookmark changes
1223 # - bookmark changes
1224 self.filteredrevcache = {}
1224 self.filteredrevcache = {}
1225
1225
1226 # post-dirstate-status hooks
1226 # post-dirstate-status hooks
1227 self._postdsstatus = []
1227 self._postdsstatus = []
1228
1228
1229 # generic mapping between names and nodes
1229 # generic mapping between names and nodes
1230 self.names = namespaces.namespaces()
1230 self.names = namespaces.namespaces()
1231
1231
1232 # Key to signature value.
1232 # Key to signature value.
1233 self._sparsesignaturecache = {}
1233 self._sparsesignaturecache = {}
1234 # Signature to cached matcher instance.
1234 # Signature to cached matcher instance.
1235 self._sparsematchercache = {}
1235 self._sparsematchercache = {}
1236
1236
1237 self._extrafilterid = repoview.extrafilter(ui)
1237 self._extrafilterid = repoview.extrafilter(ui)
1238
1238
1239 self.filecopiesmode = None
1239 self.filecopiesmode = None
1240 if COPIESSDC_REQUIREMENT in self.requirements:
1240 if COPIESSDC_REQUIREMENT in self.requirements:
1241 self.filecopiesmode = b'changeset-sidedata'
1241 self.filecopiesmode = b'changeset-sidedata'
1242
1242
1243 def _getvfsward(self, origfunc):
1243 def _getvfsward(self, origfunc):
1244 """build a ward for self.vfs"""
1244 """build a ward for self.vfs"""
1245 rref = weakref.ref(self)
1245 rref = weakref.ref(self)
1246
1246
1247 def checkvfs(path, mode=None):
1247 def checkvfs(path, mode=None):
1248 ret = origfunc(path, mode=mode)
1248 ret = origfunc(path, mode=mode)
1249 repo = rref()
1249 repo = rref()
1250 if (
1250 if (
1251 repo is None
1251 repo is None
1252 or not util.safehasattr(repo, b'_wlockref')
1252 or not util.safehasattr(repo, b'_wlockref')
1253 or not util.safehasattr(repo, b'_lockref')
1253 or not util.safehasattr(repo, b'_lockref')
1254 ):
1254 ):
1255 return
1255 return
1256 if mode in (None, b'r', b'rb'):
1256 if mode in (None, b'r', b'rb'):
1257 return
1257 return
1258 if path.startswith(repo.path):
1258 if path.startswith(repo.path):
1259 # truncate name relative to the repository (.hg)
1259 # truncate name relative to the repository (.hg)
1260 path = path[len(repo.path) + 1 :]
1260 path = path[len(repo.path) + 1 :]
1261 if path.startswith(b'cache/'):
1261 if path.startswith(b'cache/'):
1262 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1262 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1263 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1263 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1264 # path prefixes covered by 'lock'
1264 # path prefixes covered by 'lock'
1265 vfs_path_prefixes = (b'journal.', b'undo.', b'strip-backup/')
1265 vfs_path_prefixes = (b'journal.', b'undo.', b'strip-backup/')
1266 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1266 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1267 if repo._currentlock(repo._lockref) is None:
1267 if repo._currentlock(repo._lockref) is None:
1268 repo.ui.develwarn(
1268 repo.ui.develwarn(
1269 b'write with no lock: "%s"' % path,
1269 b'write with no lock: "%s"' % path,
1270 stacklevel=3,
1270 stacklevel=3,
1271 config=b'check-locks',
1271 config=b'check-locks',
1272 )
1272 )
1273 elif repo._currentlock(repo._wlockref) is None:
1273 elif repo._currentlock(repo._wlockref) is None:
1274 # rest of vfs files are covered by 'wlock'
1274 # rest of vfs files are covered by 'wlock'
1275 #
1275 #
1276 # exclude special files
1276 # exclude special files
1277 for prefix in self._wlockfreeprefix:
1277 for prefix in self._wlockfreeprefix:
1278 if path.startswith(prefix):
1278 if path.startswith(prefix):
1279 return
1279 return
1280 repo.ui.develwarn(
1280 repo.ui.develwarn(
1281 b'write with no wlock: "%s"' % path,
1281 b'write with no wlock: "%s"' % path,
1282 stacklevel=3,
1282 stacklevel=3,
1283 config=b'check-locks',
1283 config=b'check-locks',
1284 )
1284 )
1285 return ret
1285 return ret
1286
1286
1287 return checkvfs
1287 return checkvfs
1288
1288
1289 def _getsvfsward(self, origfunc):
1289 def _getsvfsward(self, origfunc):
1290 """build a ward for self.svfs"""
1290 """build a ward for self.svfs"""
1291 rref = weakref.ref(self)
1291 rref = weakref.ref(self)
1292
1292
1293 def checksvfs(path, mode=None):
1293 def checksvfs(path, mode=None):
1294 ret = origfunc(path, mode=mode)
1294 ret = origfunc(path, mode=mode)
1295 repo = rref()
1295 repo = rref()
1296 if repo is None or not util.safehasattr(repo, b'_lockref'):
1296 if repo is None or not util.safehasattr(repo, b'_lockref'):
1297 return
1297 return
1298 if mode in (None, b'r', b'rb'):
1298 if mode in (None, b'r', b'rb'):
1299 return
1299 return
1300 if path.startswith(repo.sharedpath):
1300 if path.startswith(repo.sharedpath):
1301 # truncate name relative to the repository (.hg)
1301 # truncate name relative to the repository (.hg)
1302 path = path[len(repo.sharedpath) + 1 :]
1302 path = path[len(repo.sharedpath) + 1 :]
1303 if repo._currentlock(repo._lockref) is None:
1303 if repo._currentlock(repo._lockref) is None:
1304 repo.ui.develwarn(
1304 repo.ui.develwarn(
1305 b'write with no lock: "%s"' % path, stacklevel=4
1305 b'write with no lock: "%s"' % path, stacklevel=4
1306 )
1306 )
1307 return ret
1307 return ret
1308
1308
1309 return checksvfs
1309 return checksvfs
1310
1310
1311 def close(self):
1311 def close(self):
1312 self._writecaches()
1312 self._writecaches()
1313
1313
1314 def _writecaches(self):
1314 def _writecaches(self):
1315 if self._revbranchcache:
1315 if self._revbranchcache:
1316 self._revbranchcache.write()
1316 self._revbranchcache.write()
1317
1317
1318 def _restrictcapabilities(self, caps):
1318 def _restrictcapabilities(self, caps):
1319 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1319 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1320 caps = set(caps)
1320 caps = set(caps)
1321 capsblob = bundle2.encodecaps(
1321 capsblob = bundle2.encodecaps(
1322 bundle2.getrepocaps(self, role=b'client')
1322 bundle2.getrepocaps(self, role=b'client')
1323 )
1323 )
1324 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1324 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1325 return caps
1325 return caps
1326
1326
1327 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1327 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1328 # self -> auditor -> self._checknested -> self
1328 # self -> auditor -> self._checknested -> self
1329
1329
1330 @property
1330 @property
1331 def auditor(self):
1331 def auditor(self):
1332 # This is only used by context.workingctx.match in order to
1332 # This is only used by context.workingctx.match in order to
1333 # detect files in subrepos.
1333 # detect files in subrepos.
1334 return pathutil.pathauditor(self.root, callback=self._checknested)
1334 return pathutil.pathauditor(self.root, callback=self._checknested)
1335
1335
1336 @property
1336 @property
1337 def nofsauditor(self):
1337 def nofsauditor(self):
1338 # This is only used by context.basectx.match in order to detect
1338 # This is only used by context.basectx.match in order to detect
1339 # files in subrepos.
1339 # files in subrepos.
1340 return pathutil.pathauditor(
1340 return pathutil.pathauditor(
1341 self.root, callback=self._checknested, realfs=False, cached=True
1341 self.root, callback=self._checknested, realfs=False, cached=True
1342 )
1342 )
1343
1343
1344 def _checknested(self, path):
1344 def _checknested(self, path):
1345 """Determine if path is a legal nested repository."""
1345 """Determine if path is a legal nested repository."""
1346 if not path.startswith(self.root):
1346 if not path.startswith(self.root):
1347 return False
1347 return False
1348 subpath = path[len(self.root) + 1 :]
1348 subpath = path[len(self.root) + 1 :]
1349 normsubpath = util.pconvert(subpath)
1349 normsubpath = util.pconvert(subpath)
1350
1350
1351 # XXX: Checking against the current working copy is wrong in
1351 # XXX: Checking against the current working copy is wrong in
1352 # the sense that it can reject things like
1352 # the sense that it can reject things like
1353 #
1353 #
1354 # $ hg cat -r 10 sub/x.txt
1354 # $ hg cat -r 10 sub/x.txt
1355 #
1355 #
1356 # if sub/ is no longer a subrepository in the working copy
1356 # if sub/ is no longer a subrepository in the working copy
1357 # parent revision.
1357 # parent revision.
1358 #
1358 #
1359 # However, it can of course also allow things that would have
1359 # However, it can of course also allow things that would have
1360 # been rejected before, such as the above cat command if sub/
1360 # been rejected before, such as the above cat command if sub/
1361 # is a subrepository now, but was a normal directory before.
1361 # is a subrepository now, but was a normal directory before.
1362 # The old path auditor would have rejected by mistake since it
1362 # The old path auditor would have rejected by mistake since it
1363 # panics when it sees sub/.hg/.
1363 # panics when it sees sub/.hg/.
1364 #
1364 #
1365 # All in all, checking against the working copy seems sensible
1365 # All in all, checking against the working copy seems sensible
1366 # since we want to prevent access to nested repositories on
1366 # since we want to prevent access to nested repositories on
1367 # the filesystem *now*.
1367 # the filesystem *now*.
1368 ctx = self[None]
1368 ctx = self[None]
1369 parts = util.splitpath(subpath)
1369 parts = util.splitpath(subpath)
1370 while parts:
1370 while parts:
1371 prefix = b'/'.join(parts)
1371 prefix = b'/'.join(parts)
1372 if prefix in ctx.substate:
1372 if prefix in ctx.substate:
1373 if prefix == normsubpath:
1373 if prefix == normsubpath:
1374 return True
1374 return True
1375 else:
1375 else:
1376 sub = ctx.sub(prefix)
1376 sub = ctx.sub(prefix)
1377 return sub.checknested(subpath[len(prefix) + 1 :])
1377 return sub.checknested(subpath[len(prefix) + 1 :])
1378 else:
1378 else:
1379 parts.pop()
1379 parts.pop()
1380 return False
1380 return False
1381
1381
1382 def peer(self):
1382 def peer(self):
1383 return localpeer(self) # not cached to avoid reference cycle
1383 return localpeer(self) # not cached to avoid reference cycle
1384
1384
1385 def unfiltered(self):
1385 def unfiltered(self):
1386 """Return unfiltered version of the repository
1386 """Return unfiltered version of the repository
1387
1387
1388 Intended to be overwritten by filtered repo."""
1388 Intended to be overwritten by filtered repo."""
1389 return self
1389 return self
1390
1390
1391 def filtered(self, name, visibilityexceptions=None):
1391 def filtered(self, name, visibilityexceptions=None):
1392 """Return a filtered version of a repository
1392 """Return a filtered version of a repository
1393
1393
1394 The `name` parameter is the identifier of the requested view. This
1394 The `name` parameter is the identifier of the requested view. This
1395 will return a repoview object set "exactly" to the specified view.
1395 will return a repoview object set "exactly" to the specified view.
1396
1396
1397 This function does not apply recursive filtering to a repository. For
1397 This function does not apply recursive filtering to a repository. For
1398 example calling `repo.filtered("served")` will return a repoview using
1398 example calling `repo.filtered("served")` will return a repoview using
1399 the "served" view, regardless of the initial view used by `repo`.
1399 the "served" view, regardless of the initial view used by `repo`.
1400
1400
1401 In other word, there is always only one level of `repoview` "filtering".
1401 In other word, there is always only one level of `repoview` "filtering".
1402 """
1402 """
1403 if self._extrafilterid is not None and b'%' not in name:
1403 if self._extrafilterid is not None and b'%' not in name:
1404 name = name + b'%' + self._extrafilterid
1404 name = name + b'%' + self._extrafilterid
1405
1405
1406 cls = repoview.newtype(self.unfiltered().__class__)
1406 cls = repoview.newtype(self.unfiltered().__class__)
1407 return cls(self, name, visibilityexceptions)
1407 return cls(self, name, visibilityexceptions)
1408
1408
1409 @mixedrepostorecache(
1409 @mixedrepostorecache(
1410 (b'bookmarks', b'plain'),
1410 (b'bookmarks', b'plain'),
1411 (b'bookmarks.current', b'plain'),
1411 (b'bookmarks.current', b'plain'),
1412 (b'bookmarks', b''),
1412 (b'bookmarks', b''),
1413 (b'00changelog.i', b''),
1413 (b'00changelog.i', b''),
1414 )
1414 )
1415 def _bookmarks(self):
1415 def _bookmarks(self):
1416 # Since the multiple files involved in the transaction cannot be
1416 # Since the multiple files involved in the transaction cannot be
1417 # written atomically (with current repository format), there is a race
1417 # written atomically (with current repository format), there is a race
1418 # condition here.
1418 # condition here.
1419 #
1419 #
1420 # 1) changelog content A is read
1420 # 1) changelog content A is read
1421 # 2) outside transaction update changelog to content B
1421 # 2) outside transaction update changelog to content B
1422 # 3) outside transaction update bookmark file referring to content B
1422 # 3) outside transaction update bookmark file referring to content B
1423 # 4) bookmarks file content is read and filtered against changelog-A
1423 # 4) bookmarks file content is read and filtered against changelog-A
1424 #
1424 #
1425 # When this happens, bookmarks against nodes missing from A are dropped.
1425 # When this happens, bookmarks against nodes missing from A are dropped.
1426 #
1426 #
1427 # Having this happening during read is not great, but it become worse
1427 # Having this happening during read is not great, but it become worse
1428 # when this happen during write because the bookmarks to the "unknown"
1428 # when this happen during write because the bookmarks to the "unknown"
1429 # nodes will be dropped for good. However, writes happen within locks.
1429 # nodes will be dropped for good. However, writes happen within locks.
1430 # This locking makes it possible to have a race free consistent read.
1430 # This locking makes it possible to have a race free consistent read.
1431 # For this purpose data read from disc before locking are
1431 # For this purpose data read from disc before locking are
1432 # "invalidated" right after the locks are taken. This invalidations are
1432 # "invalidated" right after the locks are taken. This invalidations are
1433 # "light", the `filecache` mechanism keep the data in memory and will
1433 # "light", the `filecache` mechanism keep the data in memory and will
1434 # reuse them if the underlying files did not changed. Not parsing the
1434 # reuse them if the underlying files did not changed. Not parsing the
1435 # same data multiple times helps performances.
1435 # same data multiple times helps performances.
1436 #
1436 #
1437 # Unfortunately in the case describe above, the files tracked by the
1437 # Unfortunately in the case describe above, the files tracked by the
1438 # bookmarks file cache might not have changed, but the in-memory
1438 # bookmarks file cache might not have changed, but the in-memory
1439 # content is still "wrong" because we used an older changelog content
1439 # content is still "wrong" because we used an older changelog content
1440 # to process the on-disk data. So after locking, the changelog would be
1440 # to process the on-disk data. So after locking, the changelog would be
1441 # refreshed but `_bookmarks` would be preserved.
1441 # refreshed but `_bookmarks` would be preserved.
1442 # Adding `00changelog.i` to the list of tracked file is not
1442 # Adding `00changelog.i` to the list of tracked file is not
1443 # enough, because at the time we build the content for `_bookmarks` in
1443 # enough, because at the time we build the content for `_bookmarks` in
1444 # (4), the changelog file has already diverged from the content used
1444 # (4), the changelog file has already diverged from the content used
1445 # for loading `changelog` in (1)
1445 # for loading `changelog` in (1)
1446 #
1446 #
1447 # To prevent the issue, we force the changelog to be explicitly
1447 # To prevent the issue, we force the changelog to be explicitly
1448 # reloaded while computing `_bookmarks`. The data race can still happen
1448 # reloaded while computing `_bookmarks`. The data race can still happen
1449 # without the lock (with a narrower window), but it would no longer go
1449 # without the lock (with a narrower window), but it would no longer go
1450 # undetected during the lock time refresh.
1450 # undetected during the lock time refresh.
1451 #
1451 #
1452 # The new schedule is as follow
1452 # The new schedule is as follow
1453 #
1453 #
1454 # 1) filecache logic detect that `_bookmarks` needs to be computed
1454 # 1) filecache logic detect that `_bookmarks` needs to be computed
1455 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1455 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1456 # 3) We force `changelog` filecache to be tested
1456 # 3) We force `changelog` filecache to be tested
1457 # 4) cachestat for `changelog` are captured (for changelog)
1457 # 4) cachestat for `changelog` are captured (for changelog)
1458 # 5) `_bookmarks` is computed and cached
1458 # 5) `_bookmarks` is computed and cached
1459 #
1459 #
1460 # The step in (3) ensure we have a changelog at least as recent as the
1460 # The step in (3) ensure we have a changelog at least as recent as the
1461 # cache stat computed in (1). As a result at locking time:
1461 # cache stat computed in (1). As a result at locking time:
1462 # * if the changelog did not changed since (1) -> we can reuse the data
1462 # * if the changelog did not changed since (1) -> we can reuse the data
1463 # * otherwise -> the bookmarks get refreshed.
1463 # * otherwise -> the bookmarks get refreshed.
1464 self._refreshchangelog()
1464 self._refreshchangelog()
1465 return bookmarks.bmstore(self)
1465 return bookmarks.bmstore(self)
1466
1466
1467 def _refreshchangelog(self):
1467 def _refreshchangelog(self):
1468 """make sure the in memory changelog match the on-disk one"""
1468 """make sure the in memory changelog match the on-disk one"""
1469 if 'changelog' in vars(self) and self.currenttransaction() is None:
1469 if 'changelog' in vars(self) and self.currenttransaction() is None:
1470 del self.changelog
1470 del self.changelog
1471
1471
1472 @property
1472 @property
1473 def _activebookmark(self):
1473 def _activebookmark(self):
1474 return self._bookmarks.active
1474 return self._bookmarks.active
1475
1475
1476 # _phasesets depend on changelog. what we need is to call
1476 # _phasesets depend on changelog. what we need is to call
1477 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1477 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1478 # can't be easily expressed in filecache mechanism.
1478 # can't be easily expressed in filecache mechanism.
1479 @storecache(b'phaseroots', b'00changelog.i')
1479 @storecache(b'phaseroots', b'00changelog.i')
1480 def _phasecache(self):
1480 def _phasecache(self):
1481 return phases.phasecache(self, self._phasedefaults)
1481 return phases.phasecache(self, self._phasedefaults)
1482
1482
1483 @storecache(b'obsstore')
1483 @storecache(b'obsstore')
1484 def obsstore(self):
1484 def obsstore(self):
1485 return obsolete.makestore(self.ui, self)
1485 return obsolete.makestore(self.ui, self)
1486
1486
1487 @storecache(b'00changelog.i')
1487 @storecache(b'00changelog.i')
1488 def changelog(self):
1488 def changelog(self):
1489 # load dirstate before changelog to avoid race see issue6303
1489 # load dirstate before changelog to avoid race see issue6303
1490 self.dirstate.prefetch_parents()
1490 self.dirstate.prefetch_parents()
1491 return self.store.changelog(txnutil.mayhavepending(self.root))
1491 return self.store.changelog(txnutil.mayhavepending(self.root))
1492
1492
1493 @storecache(b'00manifest.i')
1493 @storecache(b'00manifest.i')
1494 def manifestlog(self):
1494 def manifestlog(self):
1495 return self.store.manifestlog(self, self._storenarrowmatch)
1495 return self.store.manifestlog(self, self._storenarrowmatch)
1496
1496
1497 @repofilecache(b'dirstate')
1497 @repofilecache(b'dirstate')
1498 def dirstate(self):
1498 def dirstate(self):
1499 return self._makedirstate()
1499 return self._makedirstate()
1500
1500
1501 def _makedirstate(self):
1501 def _makedirstate(self):
1502 """Extension point for wrapping the dirstate per-repo."""
1502 """Extension point for wrapping the dirstate per-repo."""
1503 sparsematchfn = lambda: sparse.matcher(self)
1503 sparsematchfn = lambda: sparse.matcher(self)
1504
1504
1505 return dirstate.dirstate(
1505 return dirstate.dirstate(
1506 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1506 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1507 )
1507 )
1508
1508
1509 def _dirstatevalidate(self, node):
1509 def _dirstatevalidate(self, node):
1510 try:
1510 try:
1511 self.changelog.rev(node)
1511 self.changelog.rev(node)
1512 return node
1512 return node
1513 except error.LookupError:
1513 except error.LookupError:
1514 if not self._dirstatevalidatewarned:
1514 if not self._dirstatevalidatewarned:
1515 self._dirstatevalidatewarned = True
1515 self._dirstatevalidatewarned = True
1516 self.ui.warn(
1516 self.ui.warn(
1517 _(b"warning: ignoring unknown working parent %s!\n")
1517 _(b"warning: ignoring unknown working parent %s!\n")
1518 % short(node)
1518 % short(node)
1519 )
1519 )
1520 return nullid
1520 return nullid
1521
1521
1522 @storecache(narrowspec.FILENAME)
1522 @storecache(narrowspec.FILENAME)
1523 def narrowpats(self):
1523 def narrowpats(self):
1524 """matcher patterns for this repository's narrowspec
1524 """matcher patterns for this repository's narrowspec
1525
1525
1526 A tuple of (includes, excludes).
1526 A tuple of (includes, excludes).
1527 """
1527 """
1528 return narrowspec.load(self)
1528 return narrowspec.load(self)
1529
1529
1530 @storecache(narrowspec.FILENAME)
1530 @storecache(narrowspec.FILENAME)
1531 def _storenarrowmatch(self):
1531 def _storenarrowmatch(self):
1532 if repository.NARROW_REQUIREMENT not in self.requirements:
1532 if repository.NARROW_REQUIREMENT not in self.requirements:
1533 return matchmod.always()
1533 return matchmod.always()
1534 include, exclude = self.narrowpats
1534 include, exclude = self.narrowpats
1535 return narrowspec.match(self.root, include=include, exclude=exclude)
1535 return narrowspec.match(self.root, include=include, exclude=exclude)
1536
1536
1537 @storecache(narrowspec.FILENAME)
1537 @storecache(narrowspec.FILENAME)
1538 def _narrowmatch(self):
1538 def _narrowmatch(self):
1539 if repository.NARROW_REQUIREMENT not in self.requirements:
1539 if repository.NARROW_REQUIREMENT not in self.requirements:
1540 return matchmod.always()
1540 return matchmod.always()
1541 narrowspec.checkworkingcopynarrowspec(self)
1541 narrowspec.checkworkingcopynarrowspec(self)
1542 include, exclude = self.narrowpats
1542 include, exclude = self.narrowpats
1543 return narrowspec.match(self.root, include=include, exclude=exclude)
1543 return narrowspec.match(self.root, include=include, exclude=exclude)
1544
1544
1545 def narrowmatch(self, match=None, includeexact=False):
1545 def narrowmatch(self, match=None, includeexact=False):
1546 """matcher corresponding the the repo's narrowspec
1546 """matcher corresponding the the repo's narrowspec
1547
1547
1548 If `match` is given, then that will be intersected with the narrow
1548 If `match` is given, then that will be intersected with the narrow
1549 matcher.
1549 matcher.
1550
1550
1551 If `includeexact` is True, then any exact matches from `match` will
1551 If `includeexact` is True, then any exact matches from `match` will
1552 be included even if they're outside the narrowspec.
1552 be included even if they're outside the narrowspec.
1553 """
1553 """
1554 if match:
1554 if match:
1555 if includeexact and not self._narrowmatch.always():
1555 if includeexact and not self._narrowmatch.always():
1556 # do not exclude explicitly-specified paths so that they can
1556 # do not exclude explicitly-specified paths so that they can
1557 # be warned later on
1557 # be warned later on
1558 em = matchmod.exact(match.files())
1558 em = matchmod.exact(match.files())
1559 nm = matchmod.unionmatcher([self._narrowmatch, em])
1559 nm = matchmod.unionmatcher([self._narrowmatch, em])
1560 return matchmod.intersectmatchers(match, nm)
1560 return matchmod.intersectmatchers(match, nm)
1561 return matchmod.intersectmatchers(match, self._narrowmatch)
1561 return matchmod.intersectmatchers(match, self._narrowmatch)
1562 return self._narrowmatch
1562 return self._narrowmatch
1563
1563
1564 def setnarrowpats(self, newincludes, newexcludes):
1564 def setnarrowpats(self, newincludes, newexcludes):
1565 narrowspec.save(self, newincludes, newexcludes)
1565 narrowspec.save(self, newincludes, newexcludes)
1566 self.invalidate(clearfilecache=True)
1566 self.invalidate(clearfilecache=True)
1567
1567
1568 @unfilteredpropertycache
1568 @unfilteredpropertycache
1569 def _quick_access_changeid_null(self):
1569 def _quick_access_changeid_null(self):
1570 return {
1570 return {
1571 b'null': (nullrev, nullid),
1571 b'null': (nullrev, nullid),
1572 nullrev: (nullrev, nullid),
1572 nullrev: (nullrev, nullid),
1573 nullid: (nullrev, nullid),
1573 nullid: (nullrev, nullid),
1574 }
1574 }
1575
1575
1576 @unfilteredpropertycache
1576 @unfilteredpropertycache
1577 def _quick_access_changeid_wc(self):
1577 def _quick_access_changeid_wc(self):
1578 # also fast path access to the working copy parents
1578 # also fast path access to the working copy parents
1579 # however, only do it for filter that ensure wc is visible.
1579 # however, only do it for filter that ensure wc is visible.
1580 quick = {}
1580 quick = {}
1581 cl = self.unfiltered().changelog
1581 cl = self.unfiltered().changelog
1582 for node in self.dirstate.parents():
1582 for node in self.dirstate.parents():
1583 if node == nullid:
1583 if node == nullid:
1584 continue
1584 continue
1585 rev = cl.index.get_rev(node)
1585 rev = cl.index.get_rev(node)
1586 if rev is None:
1586 if rev is None:
1587 # unknown working copy parent case:
1587 # unknown working copy parent case:
1588 #
1588 #
1589 # skip the fast path and let higher code deal with it
1589 # skip the fast path and let higher code deal with it
1590 continue
1590 continue
1591 pair = (rev, node)
1591 pair = (rev, node)
1592 quick[rev] = pair
1592 quick[rev] = pair
1593 quick[node] = pair
1593 quick[node] = pair
1594 # also add the parents of the parents
1594 # also add the parents of the parents
1595 for r in cl.parentrevs(rev):
1595 for r in cl.parentrevs(rev):
1596 if r == nullrev:
1596 if r == nullrev:
1597 continue
1597 continue
1598 n = cl.node(r)
1598 n = cl.node(r)
1599 pair = (r, n)
1599 pair = (r, n)
1600 quick[r] = pair
1600 quick[r] = pair
1601 quick[n] = pair
1601 quick[n] = pair
1602 p1node = self.dirstate.p1()
1602 p1node = self.dirstate.p1()
1603 if p1node != nullid:
1603 if p1node != nullid:
1604 quick[b'.'] = quick[p1node]
1604 quick[b'.'] = quick[p1node]
1605 return quick
1605 return quick
1606
1606
1607 @unfilteredmethod
1607 @unfilteredmethod
1608 def _quick_access_changeid_invalidate(self):
1608 def _quick_access_changeid_invalidate(self):
1609 if '_quick_access_changeid_wc' in vars(self):
1609 if '_quick_access_changeid_wc' in vars(self):
1610 del self.__dict__['_quick_access_changeid_wc']
1610 del self.__dict__['_quick_access_changeid_wc']
1611
1611
1612 @property
1612 @property
1613 def _quick_access_changeid(self):
1613 def _quick_access_changeid(self):
1614 """an helper dictionnary for __getitem__ calls
1614 """an helper dictionnary for __getitem__ calls
1615
1615
1616 This contains a list of symbol we can recognise right away without
1616 This contains a list of symbol we can recognise right away without
1617 further processing.
1617 further processing.
1618 """
1618 """
1619 mapping = self._quick_access_changeid_null
1619 mapping = self._quick_access_changeid_null
1620 if self.filtername in repoview.filter_has_wc:
1620 if self.filtername in repoview.filter_has_wc:
1621 mapping = mapping.copy()
1621 mapping = mapping.copy()
1622 mapping.update(self._quick_access_changeid_wc)
1622 mapping.update(self._quick_access_changeid_wc)
1623 return mapping
1623 return mapping
1624
1624
1625 def __getitem__(self, changeid):
1625 def __getitem__(self, changeid):
1626 # dealing with special cases
1626 # dealing with special cases
1627 if changeid is None:
1627 if changeid is None:
1628 return context.workingctx(self)
1628 return context.workingctx(self)
1629 if isinstance(changeid, context.basectx):
1629 if isinstance(changeid, context.basectx):
1630 return changeid
1630 return changeid
1631
1631
1632 # dealing with multiple revisions
1632 # dealing with multiple revisions
1633 if isinstance(changeid, slice):
1633 if isinstance(changeid, slice):
1634 # wdirrev isn't contiguous so the slice shouldn't include it
1634 # wdirrev isn't contiguous so the slice shouldn't include it
1635 return [
1635 return [
1636 self[i]
1636 self[i]
1637 for i in pycompat.xrange(*changeid.indices(len(self)))
1637 for i in pycompat.xrange(*changeid.indices(len(self)))
1638 if i not in self.changelog.filteredrevs
1638 if i not in self.changelog.filteredrevs
1639 ]
1639 ]
1640
1640
1641 # dealing with some special values
1641 # dealing with some special values
1642 quick_access = self._quick_access_changeid.get(changeid)
1642 quick_access = self._quick_access_changeid.get(changeid)
1643 if quick_access is not None:
1643 if quick_access is not None:
1644 rev, node = quick_access
1644 rev, node = quick_access
1645 return context.changectx(self, rev, node, maybe_filtered=False)
1645 return context.changectx(self, rev, node, maybe_filtered=False)
1646 if changeid == b'tip':
1646 if changeid == b'tip':
1647 node = self.changelog.tip()
1647 node = self.changelog.tip()
1648 rev = self.changelog.rev(node)
1648 rev = self.changelog.rev(node)
1649 return context.changectx(self, rev, node)
1649 return context.changectx(self, rev, node)
1650
1650
1651 # dealing with arbitrary values
1651 # dealing with arbitrary values
1652 try:
1652 try:
1653 if isinstance(changeid, int):
1653 if isinstance(changeid, int):
1654 node = self.changelog.node(changeid)
1654 node = self.changelog.node(changeid)
1655 rev = changeid
1655 rev = changeid
1656 elif changeid == b'.':
1656 elif changeid == b'.':
1657 # this is a hack to delay/avoid loading obsmarkers
1657 # this is a hack to delay/avoid loading obsmarkers
1658 # when we know that '.' won't be hidden
1658 # when we know that '.' won't be hidden
1659 node = self.dirstate.p1()
1659 node = self.dirstate.p1()
1660 rev = self.unfiltered().changelog.rev(node)
1660 rev = self.unfiltered().changelog.rev(node)
1661 elif len(changeid) == 20:
1661 elif len(changeid) == 20:
1662 try:
1662 try:
1663 node = changeid
1663 node = changeid
1664 rev = self.changelog.rev(changeid)
1664 rev = self.changelog.rev(changeid)
1665 except error.FilteredLookupError:
1665 except error.FilteredLookupError:
1666 changeid = hex(changeid) # for the error message
1666 changeid = hex(changeid) # for the error message
1667 raise
1667 raise
1668 except LookupError:
1668 except LookupError:
1669 # check if it might have come from damaged dirstate
1669 # check if it might have come from damaged dirstate
1670 #
1670 #
1671 # XXX we could avoid the unfiltered if we had a recognizable
1671 # XXX we could avoid the unfiltered if we had a recognizable
1672 # exception for filtered changeset access
1672 # exception for filtered changeset access
1673 if (
1673 if (
1674 self.local()
1674 self.local()
1675 and changeid in self.unfiltered().dirstate.parents()
1675 and changeid in self.unfiltered().dirstate.parents()
1676 ):
1676 ):
1677 msg = _(b"working directory has unknown parent '%s'!")
1677 msg = _(b"working directory has unknown parent '%s'!")
1678 raise error.Abort(msg % short(changeid))
1678 raise error.Abort(msg % short(changeid))
1679 changeid = hex(changeid) # for the error message
1679 changeid = hex(changeid) # for the error message
1680 raise
1680 raise
1681
1681
1682 elif len(changeid) == 40:
1682 elif len(changeid) == 40:
1683 node = bin(changeid)
1683 node = bin(changeid)
1684 rev = self.changelog.rev(node)
1684 rev = self.changelog.rev(node)
1685 else:
1685 else:
1686 raise error.ProgrammingError(
1686 raise error.ProgrammingError(
1687 b"unsupported changeid '%s' of type %s"
1687 b"unsupported changeid '%s' of type %s"
1688 % (changeid, pycompat.bytestr(type(changeid)))
1688 % (changeid, pycompat.bytestr(type(changeid)))
1689 )
1689 )
1690
1690
1691 return context.changectx(self, rev, node)
1691 return context.changectx(self, rev, node)
1692
1692
1693 except (error.FilteredIndexError, error.FilteredLookupError):
1693 except (error.FilteredIndexError, error.FilteredLookupError):
1694 raise error.FilteredRepoLookupError(
1694 raise error.FilteredRepoLookupError(
1695 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1695 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1696 )
1696 )
1697 except (IndexError, LookupError):
1697 except (IndexError, LookupError):
1698 raise error.RepoLookupError(
1698 raise error.RepoLookupError(
1699 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1699 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1700 )
1700 )
1701 except error.WdirUnsupported:
1701 except error.WdirUnsupported:
1702 return context.workingctx(self)
1702 return context.workingctx(self)
1703
1703
1704 def __contains__(self, changeid):
1704 def __contains__(self, changeid):
1705 """True if the given changeid exists
1705 """True if the given changeid exists
1706
1706
1707 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1707 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1708 specified.
1708 specified.
1709 """
1709 """
1710 try:
1710 try:
1711 self[changeid]
1711 self[changeid]
1712 return True
1712 return True
1713 except error.RepoLookupError:
1713 except error.RepoLookupError:
1714 return False
1714 return False
1715
1715
1716 def __nonzero__(self):
1716 def __nonzero__(self):
1717 return True
1717 return True
1718
1718
1719 __bool__ = __nonzero__
1719 __bool__ = __nonzero__
1720
1720
1721 def __len__(self):
1721 def __len__(self):
1722 # no need to pay the cost of repoview.changelog
1722 # no need to pay the cost of repoview.changelog
1723 unfi = self.unfiltered()
1723 unfi = self.unfiltered()
1724 return len(unfi.changelog)
1724 return len(unfi.changelog)
1725
1725
1726 def __iter__(self):
1726 def __iter__(self):
1727 return iter(self.changelog)
1727 return iter(self.changelog)
1728
1728
1729 def revs(self, expr, *args):
1729 def revs(self, expr, *args):
1730 '''Find revisions matching a revset.
1730 '''Find revisions matching a revset.
1731
1731
1732 The revset is specified as a string ``expr`` that may contain
1732 The revset is specified as a string ``expr`` that may contain
1733 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1733 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1734
1734
1735 Revset aliases from the configuration are not expanded. To expand
1735 Revset aliases from the configuration are not expanded. To expand
1736 user aliases, consider calling ``scmutil.revrange()`` or
1736 user aliases, consider calling ``scmutil.revrange()`` or
1737 ``repo.anyrevs([expr], user=True)``.
1737 ``repo.anyrevs([expr], user=True)``.
1738
1738
1739 Returns a smartset.abstractsmartset, which is a list-like interface
1739 Returns a smartset.abstractsmartset, which is a list-like interface
1740 that contains integer revisions.
1740 that contains integer revisions.
1741 '''
1741 '''
1742 tree = revsetlang.spectree(expr, *args)
1742 tree = revsetlang.spectree(expr, *args)
1743 return revset.makematcher(tree)(self)
1743 return revset.makematcher(tree)(self)
1744
1744
1745 def set(self, expr, *args):
1745 def set(self, expr, *args):
1746 '''Find revisions matching a revset and emit changectx instances.
1746 '''Find revisions matching a revset and emit changectx instances.
1747
1747
1748 This is a convenience wrapper around ``revs()`` that iterates the
1748 This is a convenience wrapper around ``revs()`` that iterates the
1749 result and is a generator of changectx instances.
1749 result and is a generator of changectx instances.
1750
1750
1751 Revset aliases from the configuration are not expanded. To expand
1751 Revset aliases from the configuration are not expanded. To expand
1752 user aliases, consider calling ``scmutil.revrange()``.
1752 user aliases, consider calling ``scmutil.revrange()``.
1753 '''
1753 '''
1754 for r in self.revs(expr, *args):
1754 for r in self.revs(expr, *args):
1755 yield self[r]
1755 yield self[r]
1756
1756
1757 def anyrevs(self, specs, user=False, localalias=None):
1757 def anyrevs(self, specs, user=False, localalias=None):
1758 '''Find revisions matching one of the given revsets.
1758 '''Find revisions matching one of the given revsets.
1759
1759
1760 Revset aliases from the configuration are not expanded by default. To
1760 Revset aliases from the configuration are not expanded by default. To
1761 expand user aliases, specify ``user=True``. To provide some local
1761 expand user aliases, specify ``user=True``. To provide some local
1762 definitions overriding user aliases, set ``localalias`` to
1762 definitions overriding user aliases, set ``localalias`` to
1763 ``{name: definitionstring}``.
1763 ``{name: definitionstring}``.
1764 '''
1764 '''
1765 if specs == [b'null']:
1765 if specs == [b'null']:
1766 return revset.baseset([nullrev])
1766 return revset.baseset([nullrev])
1767 if specs == [b'.']:
1767 if specs == [b'.']:
1768 quick_data = self._quick_access_changeid.get(b'.')
1768 quick_data = self._quick_access_changeid.get(b'.')
1769 if quick_data is not None:
1769 if quick_data is not None:
1770 return revset.baseset([quick_data[0]])
1770 return revset.baseset([quick_data[0]])
1771 if user:
1771 if user:
1772 m = revset.matchany(
1772 m = revset.matchany(
1773 self.ui,
1773 self.ui,
1774 specs,
1774 specs,
1775 lookup=revset.lookupfn(self),
1775 lookup=revset.lookupfn(self),
1776 localalias=localalias,
1776 localalias=localalias,
1777 )
1777 )
1778 else:
1778 else:
1779 m = revset.matchany(None, specs, localalias=localalias)
1779 m = revset.matchany(None, specs, localalias=localalias)
1780 return m(self)
1780 return m(self)
1781
1781
1782 def url(self):
1782 def url(self):
1783 return b'file:' + self.root
1783 return b'file:' + self.root
1784
1784
1785 def hook(self, name, throw=False, **args):
1785 def hook(self, name, throw=False, **args):
1786 """Call a hook, passing this repo instance.
1786 """Call a hook, passing this repo instance.
1787
1787
1788 This a convenience method to aid invoking hooks. Extensions likely
1788 This a convenience method to aid invoking hooks. Extensions likely
1789 won't call this unless they have registered a custom hook or are
1789 won't call this unless they have registered a custom hook or are
1790 replacing code that is expected to call a hook.
1790 replacing code that is expected to call a hook.
1791 """
1791 """
1792 return hook.hook(self.ui, self, name, throw, **args)
1792 return hook.hook(self.ui, self, name, throw, **args)
1793
1793
1794 @filteredpropertycache
1794 @filteredpropertycache
1795 def _tagscache(self):
1795 def _tagscache(self):
1796 '''Returns a tagscache object that contains various tags related
1796 '''Returns a tagscache object that contains various tags related
1797 caches.'''
1797 caches.'''
1798
1798
1799 # This simplifies its cache management by having one decorated
1799 # This simplifies its cache management by having one decorated
1800 # function (this one) and the rest simply fetch things from it.
1800 # function (this one) and the rest simply fetch things from it.
1801 class tagscache(object):
1801 class tagscache(object):
1802 def __init__(self):
1802 def __init__(self):
1803 # These two define the set of tags for this repository. tags
1803 # These two define the set of tags for this repository. tags
1804 # maps tag name to node; tagtypes maps tag name to 'global' or
1804 # maps tag name to node; tagtypes maps tag name to 'global' or
1805 # 'local'. (Global tags are defined by .hgtags across all
1805 # 'local'. (Global tags are defined by .hgtags across all
1806 # heads, and local tags are defined in .hg/localtags.)
1806 # heads, and local tags are defined in .hg/localtags.)
1807 # They constitute the in-memory cache of tags.
1807 # They constitute the in-memory cache of tags.
1808 self.tags = self.tagtypes = None
1808 self.tags = self.tagtypes = None
1809
1809
1810 self.nodetagscache = self.tagslist = None
1810 self.nodetagscache = self.tagslist = None
1811
1811
1812 cache = tagscache()
1812 cache = tagscache()
1813 cache.tags, cache.tagtypes = self._findtags()
1813 cache.tags, cache.tagtypes = self._findtags()
1814
1814
1815 return cache
1815 return cache
1816
1816
1817 def tags(self):
1817 def tags(self):
1818 '''return a mapping of tag to node'''
1818 '''return a mapping of tag to node'''
1819 t = {}
1819 t = {}
1820 if self.changelog.filteredrevs:
1820 if self.changelog.filteredrevs:
1821 tags, tt = self._findtags()
1821 tags, tt = self._findtags()
1822 else:
1822 else:
1823 tags = self._tagscache.tags
1823 tags = self._tagscache.tags
1824 rev = self.changelog.rev
1824 rev = self.changelog.rev
1825 for k, v in pycompat.iteritems(tags):
1825 for k, v in pycompat.iteritems(tags):
1826 try:
1826 try:
1827 # ignore tags to unknown nodes
1827 # ignore tags to unknown nodes
1828 rev(v)
1828 rev(v)
1829 t[k] = v
1829 t[k] = v
1830 except (error.LookupError, ValueError):
1830 except (error.LookupError, ValueError):
1831 pass
1831 pass
1832 return t
1832 return t
1833
1833
1834 def _findtags(self):
1834 def _findtags(self):
1835 '''Do the hard work of finding tags. Return a pair of dicts
1835 '''Do the hard work of finding tags. Return a pair of dicts
1836 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1836 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1837 maps tag name to a string like \'global\' or \'local\'.
1837 maps tag name to a string like \'global\' or \'local\'.
1838 Subclasses or extensions are free to add their own tags, but
1838 Subclasses or extensions are free to add their own tags, but
1839 should be aware that the returned dicts will be retained for the
1839 should be aware that the returned dicts will be retained for the
1840 duration of the localrepo object.'''
1840 duration of the localrepo object.'''
1841
1841
1842 # XXX what tagtype should subclasses/extensions use? Currently
1842 # XXX what tagtype should subclasses/extensions use? Currently
1843 # mq and bookmarks add tags, but do not set the tagtype at all.
1843 # mq and bookmarks add tags, but do not set the tagtype at all.
1844 # Should each extension invent its own tag type? Should there
1844 # Should each extension invent its own tag type? Should there
1845 # be one tagtype for all such "virtual" tags? Or is the status
1845 # be one tagtype for all such "virtual" tags? Or is the status
1846 # quo fine?
1846 # quo fine?
1847
1847
1848 # map tag name to (node, hist)
1848 # map tag name to (node, hist)
1849 alltags = tagsmod.findglobaltags(self.ui, self)
1849 alltags = tagsmod.findglobaltags(self.ui, self)
1850 # map tag name to tag type
1850 # map tag name to tag type
1851 tagtypes = {tag: b'global' for tag in alltags}
1851 tagtypes = {tag: b'global' for tag in alltags}
1852
1852
1853 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1853 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1854
1854
1855 # Build the return dicts. Have to re-encode tag names because
1855 # Build the return dicts. Have to re-encode tag names because
1856 # the tags module always uses UTF-8 (in order not to lose info
1856 # the tags module always uses UTF-8 (in order not to lose info
1857 # writing to the cache), but the rest of Mercurial wants them in
1857 # writing to the cache), but the rest of Mercurial wants them in
1858 # local encoding.
1858 # local encoding.
1859 tags = {}
1859 tags = {}
1860 for (name, (node, hist)) in pycompat.iteritems(alltags):
1860 for (name, (node, hist)) in pycompat.iteritems(alltags):
1861 if node != nullid:
1861 if node != nullid:
1862 tags[encoding.tolocal(name)] = node
1862 tags[encoding.tolocal(name)] = node
1863 tags[b'tip'] = self.changelog.tip()
1863 tags[b'tip'] = self.changelog.tip()
1864 tagtypes = {
1864 tagtypes = {
1865 encoding.tolocal(name): value
1865 encoding.tolocal(name): value
1866 for (name, value) in pycompat.iteritems(tagtypes)
1866 for (name, value) in pycompat.iteritems(tagtypes)
1867 }
1867 }
1868 return (tags, tagtypes)
1868 return (tags, tagtypes)
1869
1869
1870 def tagtype(self, tagname):
1870 def tagtype(self, tagname):
1871 '''
1871 '''
1872 return the type of the given tag. result can be:
1872 return the type of the given tag. result can be:
1873
1873
1874 'local' : a local tag
1874 'local' : a local tag
1875 'global' : a global tag
1875 'global' : a global tag
1876 None : tag does not exist
1876 None : tag does not exist
1877 '''
1877 '''
1878
1878
1879 return self._tagscache.tagtypes.get(tagname)
1879 return self._tagscache.tagtypes.get(tagname)
1880
1880
1881 def tagslist(self):
1881 def tagslist(self):
1882 '''return a list of tags ordered by revision'''
1882 '''return a list of tags ordered by revision'''
1883 if not self._tagscache.tagslist:
1883 if not self._tagscache.tagslist:
1884 l = []
1884 l = []
1885 for t, n in pycompat.iteritems(self.tags()):
1885 for t, n in pycompat.iteritems(self.tags()):
1886 l.append((self.changelog.rev(n), t, n))
1886 l.append((self.changelog.rev(n), t, n))
1887 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1887 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1888
1888
1889 return self._tagscache.tagslist
1889 return self._tagscache.tagslist
1890
1890
1891 def nodetags(self, node):
1891 def nodetags(self, node):
1892 '''return the tags associated with a node'''
1892 '''return the tags associated with a node'''
1893 if not self._tagscache.nodetagscache:
1893 if not self._tagscache.nodetagscache:
1894 nodetagscache = {}
1894 nodetagscache = {}
1895 for t, n in pycompat.iteritems(self._tagscache.tags):
1895 for t, n in pycompat.iteritems(self._tagscache.tags):
1896 nodetagscache.setdefault(n, []).append(t)
1896 nodetagscache.setdefault(n, []).append(t)
1897 for tags in pycompat.itervalues(nodetagscache):
1897 for tags in pycompat.itervalues(nodetagscache):
1898 tags.sort()
1898 tags.sort()
1899 self._tagscache.nodetagscache = nodetagscache
1899 self._tagscache.nodetagscache = nodetagscache
1900 return self._tagscache.nodetagscache.get(node, [])
1900 return self._tagscache.nodetagscache.get(node, [])
1901
1901
1902 def nodebookmarks(self, node):
1902 def nodebookmarks(self, node):
1903 """return the list of bookmarks pointing to the specified node"""
1903 """return the list of bookmarks pointing to the specified node"""
1904 return self._bookmarks.names(node)
1904 return self._bookmarks.names(node)
1905
1905
1906 def branchmap(self):
1906 def branchmap(self):
1907 '''returns a dictionary {branch: [branchheads]} with branchheads
1907 '''returns a dictionary {branch: [branchheads]} with branchheads
1908 ordered by increasing revision number'''
1908 ordered by increasing revision number'''
1909 return self._branchcaches[self]
1909 return self._branchcaches[self]
1910
1910
1911 @unfilteredmethod
1911 @unfilteredmethod
1912 def revbranchcache(self):
1912 def revbranchcache(self):
1913 if not self._revbranchcache:
1913 if not self._revbranchcache:
1914 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1914 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1915 return self._revbranchcache
1915 return self._revbranchcache
1916
1916
1917 def branchtip(self, branch, ignoremissing=False):
1917 def branchtip(self, branch, ignoremissing=False):
1918 '''return the tip node for a given branch
1918 '''return the tip node for a given branch
1919
1919
1920 If ignoremissing is True, then this method will not raise an error.
1920 If ignoremissing is True, then this method will not raise an error.
1921 This is helpful for callers that only expect None for a missing branch
1921 This is helpful for callers that only expect None for a missing branch
1922 (e.g. namespace).
1922 (e.g. namespace).
1923
1923
1924 '''
1924 '''
1925 try:
1925 try:
1926 return self.branchmap().branchtip(branch)
1926 return self.branchmap().branchtip(branch)
1927 except KeyError:
1927 except KeyError:
1928 if not ignoremissing:
1928 if not ignoremissing:
1929 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1929 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1930 else:
1930 else:
1931 pass
1931 pass
1932
1932
1933 def lookup(self, key):
1933 def lookup(self, key):
1934 node = scmutil.revsymbol(self, key).node()
1934 node = scmutil.revsymbol(self, key).node()
1935 if node is None:
1935 if node is None:
1936 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1936 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1937 return node
1937 return node
1938
1938
1939 def lookupbranch(self, key):
1939 def lookupbranch(self, key):
1940 if self.branchmap().hasbranch(key):
1940 if self.branchmap().hasbranch(key):
1941 return key
1941 return key
1942
1942
1943 return scmutil.revsymbol(self, key).branch()
1943 return scmutil.revsymbol(self, key).branch()
1944
1944
1945 def known(self, nodes):
1945 def known(self, nodes):
1946 cl = self.changelog
1946 cl = self.changelog
1947 get_rev = cl.index.get_rev
1947 get_rev = cl.index.get_rev
1948 filtered = cl.filteredrevs
1948 filtered = cl.filteredrevs
1949 result = []
1949 result = []
1950 for n in nodes:
1950 for n in nodes:
1951 r = get_rev(n)
1951 r = get_rev(n)
1952 resp = not (r is None or r in filtered)
1952 resp = not (r is None or r in filtered)
1953 result.append(resp)
1953 result.append(resp)
1954 return result
1954 return result
1955
1955
1956 def local(self):
1956 def local(self):
1957 return self
1957 return self
1958
1958
1959 def publishing(self):
1959 def publishing(self):
1960 # it's safe (and desirable) to trust the publish flag unconditionally
1960 # it's safe (and desirable) to trust the publish flag unconditionally
1961 # so that we don't finalize changes shared between users via ssh or nfs
1961 # so that we don't finalize changes shared between users via ssh or nfs
1962 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1962 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1963
1963
1964 def cancopy(self):
1964 def cancopy(self):
1965 # so statichttprepo's override of local() works
1965 # so statichttprepo's override of local() works
1966 if not self.local():
1966 if not self.local():
1967 return False
1967 return False
1968 if not self.publishing():
1968 if not self.publishing():
1969 return True
1969 return True
1970 # if publishing we can't copy if there is filtered content
1970 # if publishing we can't copy if there is filtered content
1971 return not self.filtered(b'visible').changelog.filteredrevs
1971 return not self.filtered(b'visible').changelog.filteredrevs
1972
1972
1973 def shared(self):
1973 def shared(self):
1974 '''the type of shared repository (None if not shared)'''
1974 '''the type of shared repository (None if not shared)'''
1975 if self.sharedpath != self.path:
1975 if self.sharedpath != self.path:
1976 return b'store'
1976 return b'store'
1977 return None
1977 return None
1978
1978
1979 def wjoin(self, f, *insidef):
1979 def wjoin(self, f, *insidef):
1980 return self.vfs.reljoin(self.root, f, *insidef)
1980 return self.vfs.reljoin(self.root, f, *insidef)
1981
1981
1982 def setparents(self, p1, p2=nullid):
1982 def setparents(self, p1, p2=nullid):
1983 self[None].setparents(p1, p2)
1983 self[None].setparents(p1, p2)
1984 self._quick_access_changeid_invalidate()
1984 self._quick_access_changeid_invalidate()
1985
1985
1986 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1986 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1987 """changeid must be a changeset revision, if specified.
1987 """changeid must be a changeset revision, if specified.
1988 fileid can be a file revision or node."""
1988 fileid can be a file revision or node."""
1989 return context.filectx(
1989 return context.filectx(
1990 self, path, changeid, fileid, changectx=changectx
1990 self, path, changeid, fileid, changectx=changectx
1991 )
1991 )
1992
1992
1993 def getcwd(self):
1993 def getcwd(self):
1994 return self.dirstate.getcwd()
1994 return self.dirstate.getcwd()
1995
1995
1996 def pathto(self, f, cwd=None):
1996 def pathto(self, f, cwd=None):
1997 return self.dirstate.pathto(f, cwd)
1997 return self.dirstate.pathto(f, cwd)
1998
1998
1999 def _loadfilter(self, filter):
1999 def _loadfilter(self, filter):
2000 if filter not in self._filterpats:
2000 if filter not in self._filterpats:
2001 l = []
2001 l = []
2002 for pat, cmd in self.ui.configitems(filter):
2002 for pat, cmd in self.ui.configitems(filter):
2003 if cmd == b'!':
2003 if cmd == b'!':
2004 continue
2004 continue
2005 mf = matchmod.match(self.root, b'', [pat])
2005 mf = matchmod.match(self.root, b'', [pat])
2006 fn = None
2006 fn = None
2007 params = cmd
2007 params = cmd
2008 for name, filterfn in pycompat.iteritems(self._datafilters):
2008 for name, filterfn in pycompat.iteritems(self._datafilters):
2009 if cmd.startswith(name):
2009 if cmd.startswith(name):
2010 fn = filterfn
2010 fn = filterfn
2011 params = cmd[len(name) :].lstrip()
2011 params = cmd[len(name) :].lstrip()
2012 break
2012 break
2013 if not fn:
2013 if not fn:
2014 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2014 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2015 fn.__name__ = 'commandfilter'
2015 fn.__name__ = 'commandfilter'
2016 # Wrap old filters not supporting keyword arguments
2016 # Wrap old filters not supporting keyword arguments
2017 if not pycompat.getargspec(fn)[2]:
2017 if not pycompat.getargspec(fn)[2]:
2018 oldfn = fn
2018 oldfn = fn
2019 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2019 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2020 fn.__name__ = 'compat-' + oldfn.__name__
2020 fn.__name__ = 'compat-' + oldfn.__name__
2021 l.append((mf, fn, params))
2021 l.append((mf, fn, params))
2022 self._filterpats[filter] = l
2022 self._filterpats[filter] = l
2023 return self._filterpats[filter]
2023 return self._filterpats[filter]
2024
2024
2025 def _filter(self, filterpats, filename, data):
2025 def _filter(self, filterpats, filename, data):
2026 for mf, fn, cmd in filterpats:
2026 for mf, fn, cmd in filterpats:
2027 if mf(filename):
2027 if mf(filename):
2028 self.ui.debug(
2028 self.ui.debug(
2029 b"filtering %s through %s\n"
2029 b"filtering %s through %s\n"
2030 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2030 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2031 )
2031 )
2032 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2032 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2033 break
2033 break
2034
2034
2035 return data
2035 return data
2036
2036
2037 @unfilteredpropertycache
2037 @unfilteredpropertycache
2038 def _encodefilterpats(self):
2038 def _encodefilterpats(self):
2039 return self._loadfilter(b'encode')
2039 return self._loadfilter(b'encode')
2040
2040
2041 @unfilteredpropertycache
2041 @unfilteredpropertycache
2042 def _decodefilterpats(self):
2042 def _decodefilterpats(self):
2043 return self._loadfilter(b'decode')
2043 return self._loadfilter(b'decode')
2044
2044
2045 def adddatafilter(self, name, filter):
2045 def adddatafilter(self, name, filter):
2046 self._datafilters[name] = filter
2046 self._datafilters[name] = filter
2047
2047
2048 def wread(self, filename):
2048 def wread(self, filename):
2049 if self.wvfs.islink(filename):
2049 if self.wvfs.islink(filename):
2050 data = self.wvfs.readlink(filename)
2050 data = self.wvfs.readlink(filename)
2051 else:
2051 else:
2052 data = self.wvfs.read(filename)
2052 data = self.wvfs.read(filename)
2053 return self._filter(self._encodefilterpats, filename, data)
2053 return self._filter(self._encodefilterpats, filename, data)
2054
2054
2055 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2055 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2056 """write ``data`` into ``filename`` in the working directory
2056 """write ``data`` into ``filename`` in the working directory
2057
2057
2058 This returns length of written (maybe decoded) data.
2058 This returns length of written (maybe decoded) data.
2059 """
2059 """
2060 data = self._filter(self._decodefilterpats, filename, data)
2060 data = self._filter(self._decodefilterpats, filename, data)
2061 if b'l' in flags:
2061 if b'l' in flags:
2062 self.wvfs.symlink(data, filename)
2062 self.wvfs.symlink(data, filename)
2063 else:
2063 else:
2064 self.wvfs.write(
2064 self.wvfs.write(
2065 filename, data, backgroundclose=backgroundclose, **kwargs
2065 filename, data, backgroundclose=backgroundclose, **kwargs
2066 )
2066 )
2067 if b'x' in flags:
2067 if b'x' in flags:
2068 self.wvfs.setflags(filename, False, True)
2068 self.wvfs.setflags(filename, False, True)
2069 else:
2069 else:
2070 self.wvfs.setflags(filename, False, False)
2070 self.wvfs.setflags(filename, False, False)
2071 return len(data)
2071 return len(data)
2072
2072
2073 def wwritedata(self, filename, data):
2073 def wwritedata(self, filename, data):
2074 return self._filter(self._decodefilterpats, filename, data)
2074 return self._filter(self._decodefilterpats, filename, data)
2075
2075
2076 def currenttransaction(self):
2076 def currenttransaction(self):
2077 """return the current transaction or None if non exists"""
2077 """return the current transaction or None if non exists"""
2078 if self._transref:
2078 if self._transref:
2079 tr = self._transref()
2079 tr = self._transref()
2080 else:
2080 else:
2081 tr = None
2081 tr = None
2082
2082
2083 if tr and tr.running():
2083 if tr and tr.running():
2084 return tr
2084 return tr
2085 return None
2085 return None
2086
2086
2087 def transaction(self, desc, report=None):
2087 def transaction(self, desc, report=None):
2088 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2088 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2089 b'devel', b'check-locks'
2089 b'devel', b'check-locks'
2090 ):
2090 ):
2091 if self._currentlock(self._lockref) is None:
2091 if self._currentlock(self._lockref) is None:
2092 raise error.ProgrammingError(b'transaction requires locking')
2092 raise error.ProgrammingError(b'transaction requires locking')
2093 tr = self.currenttransaction()
2093 tr = self.currenttransaction()
2094 if tr is not None:
2094 if tr is not None:
2095 return tr.nest(name=desc)
2095 return tr.nest(name=desc)
2096
2096
2097 # abort here if the journal already exists
2097 # abort here if the journal already exists
2098 if self.svfs.exists(b"journal"):
2098 if self.svfs.exists(b"journal"):
2099 raise error.RepoError(
2099 raise error.RepoError(
2100 _(b"abandoned transaction found"),
2100 _(b"abandoned transaction found"),
2101 hint=_(b"run 'hg recover' to clean up transaction"),
2101 hint=_(b"run 'hg recover' to clean up transaction"),
2102 )
2102 )
2103
2103
2104 idbase = b"%.40f#%f" % (random.random(), time.time())
2104 idbase = b"%.40f#%f" % (random.random(), time.time())
2105 ha = hex(hashutil.sha1(idbase).digest())
2105 ha = hex(hashutil.sha1(idbase).digest())
2106 txnid = b'TXN:' + ha
2106 txnid = b'TXN:' + ha
2107 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2107 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2108
2108
2109 self._writejournal(desc)
2109 self._writejournal(desc)
2110 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2110 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2111 if report:
2111 if report:
2112 rp = report
2112 rp = report
2113 else:
2113 else:
2114 rp = self.ui.warn
2114 rp = self.ui.warn
2115 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2115 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2116 # we must avoid cyclic reference between repo and transaction.
2116 # we must avoid cyclic reference between repo and transaction.
2117 reporef = weakref.ref(self)
2117 reporef = weakref.ref(self)
2118 # Code to track tag movement
2118 # Code to track tag movement
2119 #
2119 #
2120 # Since tags are all handled as file content, it is actually quite hard
2120 # Since tags are all handled as file content, it is actually quite hard
2121 # to track these movement from a code perspective. So we fallback to a
2121 # to track these movement from a code perspective. So we fallback to a
2122 # tracking at the repository level. One could envision to track changes
2122 # tracking at the repository level. One could envision to track changes
2123 # to the '.hgtags' file through changegroup apply but that fails to
2123 # to the '.hgtags' file through changegroup apply but that fails to
2124 # cope with case where transaction expose new heads without changegroup
2124 # cope with case where transaction expose new heads without changegroup
2125 # being involved (eg: phase movement).
2125 # being involved (eg: phase movement).
2126 #
2126 #
2127 # For now, We gate the feature behind a flag since this likely comes
2127 # For now, We gate the feature behind a flag since this likely comes
2128 # with performance impacts. The current code run more often than needed
2128 # with performance impacts. The current code run more often than needed
2129 # and do not use caches as much as it could. The current focus is on
2129 # and do not use caches as much as it could. The current focus is on
2130 # the behavior of the feature so we disable it by default. The flag
2130 # the behavior of the feature so we disable it by default. The flag
2131 # will be removed when we are happy with the performance impact.
2131 # will be removed when we are happy with the performance impact.
2132 #
2132 #
2133 # Once this feature is no longer experimental move the following
2133 # Once this feature is no longer experimental move the following
2134 # documentation to the appropriate help section:
2134 # documentation to the appropriate help section:
2135 #
2135 #
2136 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2136 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2137 # tags (new or changed or deleted tags). In addition the details of
2137 # tags (new or changed or deleted tags). In addition the details of
2138 # these changes are made available in a file at:
2138 # these changes are made available in a file at:
2139 # ``REPOROOT/.hg/changes/tags.changes``.
2139 # ``REPOROOT/.hg/changes/tags.changes``.
2140 # Make sure you check for HG_TAG_MOVED before reading that file as it
2140 # Make sure you check for HG_TAG_MOVED before reading that file as it
2141 # might exist from a previous transaction even if no tag were touched
2141 # might exist from a previous transaction even if no tag were touched
2142 # in this one. Changes are recorded in a line base format::
2142 # in this one. Changes are recorded in a line base format::
2143 #
2143 #
2144 # <action> <hex-node> <tag-name>\n
2144 # <action> <hex-node> <tag-name>\n
2145 #
2145 #
2146 # Actions are defined as follow:
2146 # Actions are defined as follow:
2147 # "-R": tag is removed,
2147 # "-R": tag is removed,
2148 # "+A": tag is added,
2148 # "+A": tag is added,
2149 # "-M": tag is moved (old value),
2149 # "-M": tag is moved (old value),
2150 # "+M": tag is moved (new value),
2150 # "+M": tag is moved (new value),
2151 tracktags = lambda x: None
2151 tracktags = lambda x: None
2152 # experimental config: experimental.hook-track-tags
2152 # experimental config: experimental.hook-track-tags
2153 shouldtracktags = self.ui.configbool(
2153 shouldtracktags = self.ui.configbool(
2154 b'experimental', b'hook-track-tags'
2154 b'experimental', b'hook-track-tags'
2155 )
2155 )
2156 if desc != b'strip' and shouldtracktags:
2156 if desc != b'strip' and shouldtracktags:
2157 oldheads = self.changelog.headrevs()
2157 oldheads = self.changelog.headrevs()
2158
2158
2159 def tracktags(tr2):
2159 def tracktags(tr2):
2160 repo = reporef()
2160 repo = reporef()
2161 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2161 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2162 newheads = repo.changelog.headrevs()
2162 newheads = repo.changelog.headrevs()
2163 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2163 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2164 # notes: we compare lists here.
2164 # notes: we compare lists here.
2165 # As we do it only once buiding set would not be cheaper
2165 # As we do it only once buiding set would not be cheaper
2166 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2166 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2167 if changes:
2167 if changes:
2168 tr2.hookargs[b'tag_moved'] = b'1'
2168 tr2.hookargs[b'tag_moved'] = b'1'
2169 with repo.vfs(
2169 with repo.vfs(
2170 b'changes/tags.changes', b'w', atomictemp=True
2170 b'changes/tags.changes', b'w', atomictemp=True
2171 ) as changesfile:
2171 ) as changesfile:
2172 # note: we do not register the file to the transaction
2172 # note: we do not register the file to the transaction
2173 # because we needs it to still exist on the transaction
2173 # because we needs it to still exist on the transaction
2174 # is close (for txnclose hooks)
2174 # is close (for txnclose hooks)
2175 tagsmod.writediff(changesfile, changes)
2175 tagsmod.writediff(changesfile, changes)
2176
2176
2177 def validate(tr2):
2177 def validate(tr2):
2178 """will run pre-closing hooks"""
2178 """will run pre-closing hooks"""
2179 # XXX the transaction API is a bit lacking here so we take a hacky
2179 # XXX the transaction API is a bit lacking here so we take a hacky
2180 # path for now
2180 # path for now
2181 #
2181 #
2182 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2182 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2183 # dict is copied before these run. In addition we needs the data
2183 # dict is copied before these run. In addition we needs the data
2184 # available to in memory hooks too.
2184 # available to in memory hooks too.
2185 #
2185 #
2186 # Moreover, we also need to make sure this runs before txnclose
2186 # Moreover, we also need to make sure this runs before txnclose
2187 # hooks and there is no "pending" mechanism that would execute
2187 # hooks and there is no "pending" mechanism that would execute
2188 # logic only if hooks are about to run.
2188 # logic only if hooks are about to run.
2189 #
2189 #
2190 # Fixing this limitation of the transaction is also needed to track
2190 # Fixing this limitation of the transaction is also needed to track
2191 # other families of changes (bookmarks, phases, obsolescence).
2191 # other families of changes (bookmarks, phases, obsolescence).
2192 #
2192 #
2193 # This will have to be fixed before we remove the experimental
2193 # This will have to be fixed before we remove the experimental
2194 # gating.
2194 # gating.
2195 tracktags(tr2)
2195 tracktags(tr2)
2196 repo = reporef()
2196 repo = reporef()
2197
2197
2198 singleheadopt = (b'experimental', b'single-head-per-branch')
2198 singleheadopt = (b'experimental', b'single-head-per-branch')
2199 singlehead = repo.ui.configbool(*singleheadopt)
2199 singlehead = repo.ui.configbool(*singleheadopt)
2200 if singlehead:
2200 if singlehead:
2201 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2201 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2202 accountclosed = singleheadsub.get(
2202 accountclosed = singleheadsub.get(
2203 b"account-closed-heads", False
2203 b"account-closed-heads", False
2204 )
2204 )
2205 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2205 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2206 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2206 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2207 for name, (old, new) in sorted(
2207 for name, (old, new) in sorted(
2208 tr.changes[b'bookmarks'].items()
2208 tr.changes[b'bookmarks'].items()
2209 ):
2209 ):
2210 args = tr.hookargs.copy()
2210 args = tr.hookargs.copy()
2211 args.update(bookmarks.preparehookargs(name, old, new))
2211 args.update(bookmarks.preparehookargs(name, old, new))
2212 repo.hook(
2212 repo.hook(
2213 b'pretxnclose-bookmark',
2213 b'pretxnclose-bookmark',
2214 throw=True,
2214 throw=True,
2215 **pycompat.strkwargs(args)
2215 **pycompat.strkwargs(args)
2216 )
2216 )
2217 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2217 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2218 cl = repo.unfiltered().changelog
2218 cl = repo.unfiltered().changelog
2219 for revs, (old, new) in tr.changes[b'phases']:
2219 for revs, (old, new) in tr.changes[b'phases']:
2220 for rev in revs:
2220 for rev in revs:
2221 args = tr.hookargs.copy()
2221 args = tr.hookargs.copy()
2222 node = hex(cl.node(rev))
2222 node = hex(cl.node(rev))
2223 args.update(phases.preparehookargs(node, old, new))
2223 args.update(phases.preparehookargs(node, old, new))
2224 repo.hook(
2224 repo.hook(
2225 b'pretxnclose-phase',
2225 b'pretxnclose-phase',
2226 throw=True,
2226 throw=True,
2227 **pycompat.strkwargs(args)
2227 **pycompat.strkwargs(args)
2228 )
2228 )
2229
2229
2230 repo.hook(
2230 repo.hook(
2231 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2231 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2232 )
2232 )
2233
2233
2234 def releasefn(tr, success):
2234 def releasefn(tr, success):
2235 repo = reporef()
2235 repo = reporef()
2236 if repo is None:
2236 if repo is None:
2237 # If the repo has been GC'd (and this release function is being
2237 # If the repo has been GC'd (and this release function is being
2238 # called from transaction.__del__), there's not much we can do,
2238 # called from transaction.__del__), there's not much we can do,
2239 # so just leave the unfinished transaction there and let the
2239 # so just leave the unfinished transaction there and let the
2240 # user run `hg recover`.
2240 # user run `hg recover`.
2241 return
2241 return
2242 if success:
2242 if success:
2243 # this should be explicitly invoked here, because
2243 # this should be explicitly invoked here, because
2244 # in-memory changes aren't written out at closing
2244 # in-memory changes aren't written out at closing
2245 # transaction, if tr.addfilegenerator (via
2245 # transaction, if tr.addfilegenerator (via
2246 # dirstate.write or so) isn't invoked while
2246 # dirstate.write or so) isn't invoked while
2247 # transaction running
2247 # transaction running
2248 repo.dirstate.write(None)
2248 repo.dirstate.write(None)
2249 else:
2249 else:
2250 # discard all changes (including ones already written
2250 # discard all changes (including ones already written
2251 # out) in this transaction
2251 # out) in this transaction
2252 narrowspec.restorebackup(self, b'journal.narrowspec')
2252 narrowspec.restorebackup(self, b'journal.narrowspec')
2253 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2253 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2254 repo.dirstate.restorebackup(None, b'journal.dirstate')
2254 repo.dirstate.restorebackup(None, b'journal.dirstate')
2255
2255
2256 repo.invalidate(clearfilecache=True)
2256 repo.invalidate(clearfilecache=True)
2257
2257
2258 tr = transaction.transaction(
2258 tr = transaction.transaction(
2259 rp,
2259 rp,
2260 self.svfs,
2260 self.svfs,
2261 vfsmap,
2261 vfsmap,
2262 b"journal",
2262 b"journal",
2263 b"undo",
2263 b"undo",
2264 aftertrans(renames),
2264 aftertrans(renames),
2265 self.store.createmode,
2265 self.store.createmode,
2266 validator=validate,
2266 validator=validate,
2267 releasefn=releasefn,
2267 releasefn=releasefn,
2268 checkambigfiles=_cachedfiles,
2268 checkambigfiles=_cachedfiles,
2269 name=desc,
2269 name=desc,
2270 )
2270 )
2271 tr.changes[b'origrepolen'] = len(self)
2271 tr.changes[b'origrepolen'] = len(self)
2272 tr.changes[b'obsmarkers'] = set()
2272 tr.changes[b'obsmarkers'] = set()
2273 tr.changes[b'phases'] = []
2273 tr.changes[b'phases'] = []
2274 tr.changes[b'bookmarks'] = {}
2274 tr.changes[b'bookmarks'] = {}
2275
2275
2276 tr.hookargs[b'txnid'] = txnid
2276 tr.hookargs[b'txnid'] = txnid
2277 tr.hookargs[b'txnname'] = desc
2277 tr.hookargs[b'txnname'] = desc
2278 tr.hookargs[b'changes'] = tr.changes
2278 tr.hookargs[b'changes'] = tr.changes
2279 # note: writing the fncache only during finalize mean that the file is
2279 # note: writing the fncache only during finalize mean that the file is
2280 # outdated when running hooks. As fncache is used for streaming clone,
2280 # outdated when running hooks. As fncache is used for streaming clone,
2281 # this is not expected to break anything that happen during the hooks.
2281 # this is not expected to break anything that happen during the hooks.
2282 tr.addfinalize(b'flush-fncache', self.store.write)
2282 tr.addfinalize(b'flush-fncache', self.store.write)
2283
2283
2284 def txnclosehook(tr2):
2284 def txnclosehook(tr2):
2285 """To be run if transaction is successful, will schedule a hook run
2285 """To be run if transaction is successful, will schedule a hook run
2286 """
2286 """
2287 # Don't reference tr2 in hook() so we don't hold a reference.
2287 # Don't reference tr2 in hook() so we don't hold a reference.
2288 # This reduces memory consumption when there are multiple
2288 # This reduces memory consumption when there are multiple
2289 # transactions per lock. This can likely go away if issue5045
2289 # transactions per lock. This can likely go away if issue5045
2290 # fixes the function accumulation.
2290 # fixes the function accumulation.
2291 hookargs = tr2.hookargs
2291 hookargs = tr2.hookargs
2292
2292
2293 def hookfunc(unused_success):
2293 def hookfunc(unused_success):
2294 repo = reporef()
2294 repo = reporef()
2295 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2295 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2296 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2296 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2297 for name, (old, new) in bmchanges:
2297 for name, (old, new) in bmchanges:
2298 args = tr.hookargs.copy()
2298 args = tr.hookargs.copy()
2299 args.update(bookmarks.preparehookargs(name, old, new))
2299 args.update(bookmarks.preparehookargs(name, old, new))
2300 repo.hook(
2300 repo.hook(
2301 b'txnclose-bookmark',
2301 b'txnclose-bookmark',
2302 throw=False,
2302 throw=False,
2303 **pycompat.strkwargs(args)
2303 **pycompat.strkwargs(args)
2304 )
2304 )
2305
2305
2306 if hook.hashook(repo.ui, b'txnclose-phase'):
2306 if hook.hashook(repo.ui, b'txnclose-phase'):
2307 cl = repo.unfiltered().changelog
2307 cl = repo.unfiltered().changelog
2308 phasemv = sorted(
2308 phasemv = sorted(
2309 tr.changes[b'phases'], key=lambda r: r[0][0]
2309 tr.changes[b'phases'], key=lambda r: r[0][0]
2310 )
2310 )
2311 for revs, (old, new) in phasemv:
2311 for revs, (old, new) in phasemv:
2312 for rev in revs:
2312 for rev in revs:
2313 args = tr.hookargs.copy()
2313 args = tr.hookargs.copy()
2314 node = hex(cl.node(rev))
2314 node = hex(cl.node(rev))
2315 args.update(phases.preparehookargs(node, old, new))
2315 args.update(phases.preparehookargs(node, old, new))
2316 repo.hook(
2316 repo.hook(
2317 b'txnclose-phase',
2317 b'txnclose-phase',
2318 throw=False,
2318 throw=False,
2319 **pycompat.strkwargs(args)
2319 **pycompat.strkwargs(args)
2320 )
2320 )
2321
2321
2322 repo.hook(
2322 repo.hook(
2323 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2323 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2324 )
2324 )
2325
2325
2326 reporef()._afterlock(hookfunc)
2326 reporef()._afterlock(hookfunc)
2327
2327
2328 tr.addfinalize(b'txnclose-hook', txnclosehook)
2328 tr.addfinalize(b'txnclose-hook', txnclosehook)
2329 # Include a leading "-" to make it happen before the transaction summary
2329 # Include a leading "-" to make it happen before the transaction summary
2330 # reports registered via scmutil.registersummarycallback() whose names
2330 # reports registered via scmutil.registersummarycallback() whose names
2331 # are 00-txnreport etc. That way, the caches will be warm when the
2331 # are 00-txnreport etc. That way, the caches will be warm when the
2332 # callbacks run.
2332 # callbacks run.
2333 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2333 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2334
2334
2335 def txnaborthook(tr2):
2335 def txnaborthook(tr2):
2336 """To be run if transaction is aborted
2336 """To be run if transaction is aborted
2337 """
2337 """
2338 reporef().hook(
2338 reporef().hook(
2339 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2339 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2340 )
2340 )
2341
2341
2342 tr.addabort(b'txnabort-hook', txnaborthook)
2342 tr.addabort(b'txnabort-hook', txnaborthook)
2343 # avoid eager cache invalidation. in-memory data should be identical
2343 # avoid eager cache invalidation. in-memory data should be identical
2344 # to stored data if transaction has no error.
2344 # to stored data if transaction has no error.
2345 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2345 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2346 self._transref = weakref.ref(tr)
2346 self._transref = weakref.ref(tr)
2347 scmutil.registersummarycallback(self, tr, desc)
2347 scmutil.registersummarycallback(self, tr, desc)
2348 return tr
2348 return tr
2349
2349
2350 def _journalfiles(self):
2350 def _journalfiles(self):
2351 return (
2351 return (
2352 (self.svfs, b'journal'),
2352 (self.svfs, b'journal'),
2353 (self.svfs, b'journal.narrowspec'),
2353 (self.svfs, b'journal.narrowspec'),
2354 (self.vfs, b'journal.narrowspec.dirstate'),
2354 (self.vfs, b'journal.narrowspec.dirstate'),
2355 (self.vfs, b'journal.dirstate'),
2355 (self.vfs, b'journal.dirstate'),
2356 (self.vfs, b'journal.branch'),
2356 (self.vfs, b'journal.branch'),
2357 (self.vfs, b'journal.desc'),
2357 (self.vfs, b'journal.desc'),
2358 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2358 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2359 (self.svfs, b'journal.phaseroots'),
2359 (self.svfs, b'journal.phaseroots'),
2360 )
2360 )
2361
2361
2362 def undofiles(self):
2362 def undofiles(self):
2363 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2363 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2364
2364
2365 @unfilteredmethod
2365 @unfilteredmethod
2366 def _writejournal(self, desc):
2366 def _writejournal(self, desc):
2367 self.dirstate.savebackup(None, b'journal.dirstate')
2367 self.dirstate.savebackup(None, b'journal.dirstate')
2368 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2368 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2369 narrowspec.savebackup(self, b'journal.narrowspec')
2369 narrowspec.savebackup(self, b'journal.narrowspec')
2370 self.vfs.write(
2370 self.vfs.write(
2371 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2371 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2372 )
2372 )
2373 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2373 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2374 bookmarksvfs = bookmarks.bookmarksvfs(self)
2374 bookmarksvfs = bookmarks.bookmarksvfs(self)
2375 bookmarksvfs.write(
2375 bookmarksvfs.write(
2376 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2376 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2377 )
2377 )
2378 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2378 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2379
2379
2380 def recover(self):
2380 def recover(self):
2381 with self.lock():
2381 with self.lock():
2382 if self.svfs.exists(b"journal"):
2382 if self.svfs.exists(b"journal"):
2383 self.ui.status(_(b"rolling back interrupted transaction\n"))
2383 self.ui.status(_(b"rolling back interrupted transaction\n"))
2384 vfsmap = {
2384 vfsmap = {
2385 b'': self.svfs,
2385 b'': self.svfs,
2386 b'plain': self.vfs,
2386 b'plain': self.vfs,
2387 }
2387 }
2388 transaction.rollback(
2388 transaction.rollback(
2389 self.svfs,
2389 self.svfs,
2390 vfsmap,
2390 vfsmap,
2391 b"journal",
2391 b"journal",
2392 self.ui.warn,
2392 self.ui.warn,
2393 checkambigfiles=_cachedfiles,
2393 checkambigfiles=_cachedfiles,
2394 )
2394 )
2395 self.invalidate()
2395 self.invalidate()
2396 return True
2396 return True
2397 else:
2397 else:
2398 self.ui.warn(_(b"no interrupted transaction available\n"))
2398 self.ui.warn(_(b"no interrupted transaction available\n"))
2399 return False
2399 return False
2400
2400
2401 def rollback(self, dryrun=False, force=False):
2401 def rollback(self, dryrun=False, force=False):
2402 wlock = lock = dsguard = None
2402 wlock = lock = dsguard = None
2403 try:
2403 try:
2404 wlock = self.wlock()
2404 wlock = self.wlock()
2405 lock = self.lock()
2405 lock = self.lock()
2406 if self.svfs.exists(b"undo"):
2406 if self.svfs.exists(b"undo"):
2407 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2407 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2408
2408
2409 return self._rollback(dryrun, force, dsguard)
2409 return self._rollback(dryrun, force, dsguard)
2410 else:
2410 else:
2411 self.ui.warn(_(b"no rollback information available\n"))
2411 self.ui.warn(_(b"no rollback information available\n"))
2412 return 1
2412 return 1
2413 finally:
2413 finally:
2414 release(dsguard, lock, wlock)
2414 release(dsguard, lock, wlock)
2415
2415
2416 @unfilteredmethod # Until we get smarter cache management
2416 @unfilteredmethod # Until we get smarter cache management
2417 def _rollback(self, dryrun, force, dsguard):
2417 def _rollback(self, dryrun, force, dsguard):
2418 ui = self.ui
2418 ui = self.ui
2419 try:
2419 try:
2420 args = self.vfs.read(b'undo.desc').splitlines()
2420 args = self.vfs.read(b'undo.desc').splitlines()
2421 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2421 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2422 if len(args) >= 3:
2422 if len(args) >= 3:
2423 detail = args[2]
2423 detail = args[2]
2424 oldtip = oldlen - 1
2424 oldtip = oldlen - 1
2425
2425
2426 if detail and ui.verbose:
2426 if detail and ui.verbose:
2427 msg = _(
2427 msg = _(
2428 b'repository tip rolled back to revision %d'
2428 b'repository tip rolled back to revision %d'
2429 b' (undo %s: %s)\n'
2429 b' (undo %s: %s)\n'
2430 ) % (oldtip, desc, detail)
2430 ) % (oldtip, desc, detail)
2431 else:
2431 else:
2432 msg = _(
2432 msg = _(
2433 b'repository tip rolled back to revision %d (undo %s)\n'
2433 b'repository tip rolled back to revision %d (undo %s)\n'
2434 ) % (oldtip, desc)
2434 ) % (oldtip, desc)
2435 except IOError:
2435 except IOError:
2436 msg = _(b'rolling back unknown transaction\n')
2436 msg = _(b'rolling back unknown transaction\n')
2437 desc = None
2437 desc = None
2438
2438
2439 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2439 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2440 raise error.Abort(
2440 raise error.Abort(
2441 _(
2441 _(
2442 b'rollback of last commit while not checked out '
2442 b'rollback of last commit while not checked out '
2443 b'may lose data'
2443 b'may lose data'
2444 ),
2444 ),
2445 hint=_(b'use -f to force'),
2445 hint=_(b'use -f to force'),
2446 )
2446 )
2447
2447
2448 ui.status(msg)
2448 ui.status(msg)
2449 if dryrun:
2449 if dryrun:
2450 return 0
2450 return 0
2451
2451
2452 parents = self.dirstate.parents()
2452 parents = self.dirstate.parents()
2453 self.destroying()
2453 self.destroying()
2454 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2454 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2455 transaction.rollback(
2455 transaction.rollback(
2456 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2456 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2457 )
2457 )
2458 bookmarksvfs = bookmarks.bookmarksvfs(self)
2458 bookmarksvfs = bookmarks.bookmarksvfs(self)
2459 if bookmarksvfs.exists(b'undo.bookmarks'):
2459 if bookmarksvfs.exists(b'undo.bookmarks'):
2460 bookmarksvfs.rename(
2460 bookmarksvfs.rename(
2461 b'undo.bookmarks', b'bookmarks', checkambig=True
2461 b'undo.bookmarks', b'bookmarks', checkambig=True
2462 )
2462 )
2463 if self.svfs.exists(b'undo.phaseroots'):
2463 if self.svfs.exists(b'undo.phaseroots'):
2464 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2464 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2465 self.invalidate()
2465 self.invalidate()
2466
2466
2467 has_node = self.changelog.index.has_node
2467 has_node = self.changelog.index.has_node
2468 parentgone = any(not has_node(p) for p in parents)
2468 parentgone = any(not has_node(p) for p in parents)
2469 if parentgone:
2469 if parentgone:
2470 # prevent dirstateguard from overwriting already restored one
2470 # prevent dirstateguard from overwriting already restored one
2471 dsguard.close()
2471 dsguard.close()
2472
2472
2473 narrowspec.restorebackup(self, b'undo.narrowspec')
2473 narrowspec.restorebackup(self, b'undo.narrowspec')
2474 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2474 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2475 self.dirstate.restorebackup(None, b'undo.dirstate')
2475 self.dirstate.restorebackup(None, b'undo.dirstate')
2476 try:
2476 try:
2477 branch = self.vfs.read(b'undo.branch')
2477 branch = self.vfs.read(b'undo.branch')
2478 self.dirstate.setbranch(encoding.tolocal(branch))
2478 self.dirstate.setbranch(encoding.tolocal(branch))
2479 except IOError:
2479 except IOError:
2480 ui.warn(
2480 ui.warn(
2481 _(
2481 _(
2482 b'named branch could not be reset: '
2482 b'named branch could not be reset: '
2483 b'current branch is still \'%s\'\n'
2483 b'current branch is still \'%s\'\n'
2484 )
2484 )
2485 % self.dirstate.branch()
2485 % self.dirstate.branch()
2486 )
2486 )
2487
2487
2488 parents = tuple([p.rev() for p in self[None].parents()])
2488 parents = tuple([p.rev() for p in self[None].parents()])
2489 if len(parents) > 1:
2489 if len(parents) > 1:
2490 ui.status(
2490 ui.status(
2491 _(
2491 _(
2492 b'working directory now based on '
2492 b'working directory now based on '
2493 b'revisions %d and %d\n'
2493 b'revisions %d and %d\n'
2494 )
2494 )
2495 % parents
2495 % parents
2496 )
2496 )
2497 else:
2497 else:
2498 ui.status(
2498 ui.status(
2499 _(b'working directory now based on revision %d\n') % parents
2499 _(b'working directory now based on revision %d\n') % parents
2500 )
2500 )
2501 mergestatemod.mergestate.clean(self, self[b'.'].node())
2501 mergestatemod.mergestate.clean(self, self[b'.'].node())
2502
2502
2503 # TODO: if we know which new heads may result from this rollback, pass
2503 # TODO: if we know which new heads may result from this rollback, pass
2504 # them to destroy(), which will prevent the branchhead cache from being
2504 # them to destroy(), which will prevent the branchhead cache from being
2505 # invalidated.
2505 # invalidated.
2506 self.destroyed()
2506 self.destroyed()
2507 return 0
2507 return 0
2508
2508
2509 def _buildcacheupdater(self, newtransaction):
2509 def _buildcacheupdater(self, newtransaction):
2510 """called during transaction to build the callback updating cache
2510 """called during transaction to build the callback updating cache
2511
2511
2512 Lives on the repository to help extension who might want to augment
2512 Lives on the repository to help extension who might want to augment
2513 this logic. For this purpose, the created transaction is passed to the
2513 this logic. For this purpose, the created transaction is passed to the
2514 method.
2514 method.
2515 """
2515 """
2516 # we must avoid cyclic reference between repo and transaction.
2516 # we must avoid cyclic reference between repo and transaction.
2517 reporef = weakref.ref(self)
2517 reporef = weakref.ref(self)
2518
2518
2519 def updater(tr):
2519 def updater(tr):
2520 repo = reporef()
2520 repo = reporef()
2521 repo.updatecaches(tr)
2521 repo.updatecaches(tr)
2522
2522
2523 return updater
2523 return updater
2524
2524
2525 @unfilteredmethod
2525 @unfilteredmethod
2526 def updatecaches(self, tr=None, full=False):
2526 def updatecaches(self, tr=None, full=False):
2527 """warm appropriate caches
2527 """warm appropriate caches
2528
2528
2529 If this function is called after a transaction closed. The transaction
2529 If this function is called after a transaction closed. The transaction
2530 will be available in the 'tr' argument. This can be used to selectively
2530 will be available in the 'tr' argument. This can be used to selectively
2531 update caches relevant to the changes in that transaction.
2531 update caches relevant to the changes in that transaction.
2532
2532
2533 If 'full' is set, make sure all caches the function knows about have
2533 If 'full' is set, make sure all caches the function knows about have
2534 up-to-date data. Even the ones usually loaded more lazily.
2534 up-to-date data. Even the ones usually loaded more lazily.
2535 """
2535 """
2536 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2536 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2537 # During strip, many caches are invalid but
2537 # During strip, many caches are invalid but
2538 # later call to `destroyed` will refresh them.
2538 # later call to `destroyed` will refresh them.
2539 return
2539 return
2540
2540
2541 if tr is None or tr.changes[b'origrepolen'] < len(self):
2541 if tr is None or tr.changes[b'origrepolen'] < len(self):
2542 # accessing the 'ser ved' branchmap should refresh all the others,
2542 # accessing the 'ser ved' branchmap should refresh all the others,
2543 self.ui.debug(b'updating the branch cache\n')
2543 self.ui.debug(b'updating the branch cache\n')
2544 self.filtered(b'served').branchmap()
2544 self.filtered(b'served').branchmap()
2545 self.filtered(b'served.hidden').branchmap()
2545 self.filtered(b'served.hidden').branchmap()
2546
2546
2547 if full:
2547 if full:
2548 unfi = self.unfiltered()
2548 unfi = self.unfiltered()
2549
2549
2550 self.changelog.update_caches(transaction=tr)
2550 self.changelog.update_caches(transaction=tr)
2551 self.manifestlog.update_caches(transaction=tr)
2551 self.manifestlog.update_caches(transaction=tr)
2552
2552
2553 rbc = unfi.revbranchcache()
2553 rbc = unfi.revbranchcache()
2554 for r in unfi.changelog:
2554 for r in unfi.changelog:
2555 rbc.branchinfo(r)
2555 rbc.branchinfo(r)
2556 rbc.write()
2556 rbc.write()
2557
2557
2558 # ensure the working copy parents are in the manifestfulltextcache
2558 # ensure the working copy parents are in the manifestfulltextcache
2559 for ctx in self[b'.'].parents():
2559 for ctx in self[b'.'].parents():
2560 ctx.manifest() # accessing the manifest is enough
2560 ctx.manifest() # accessing the manifest is enough
2561
2561
2562 # accessing fnode cache warms the cache
2562 # accessing fnode cache warms the cache
2563 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2563 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2564 # accessing tags warm the cache
2564 # accessing tags warm the cache
2565 self.tags()
2565 self.tags()
2566 self.filtered(b'served').tags()
2566 self.filtered(b'served').tags()
2567
2567
2568 # The `full` arg is documented as updating even the lazily-loaded
2568 # The `full` arg is documented as updating even the lazily-loaded
2569 # caches immediately, so we're forcing a write to cause these caches
2569 # caches immediately, so we're forcing a write to cause these caches
2570 # to be warmed up even if they haven't explicitly been requested
2570 # to be warmed up even if they haven't explicitly been requested
2571 # yet (if they've never been used by hg, they won't ever have been
2571 # yet (if they've never been used by hg, they won't ever have been
2572 # written, even if they're a subset of another kind of cache that
2572 # written, even if they're a subset of another kind of cache that
2573 # *has* been used).
2573 # *has* been used).
2574 for filt in repoview.filtertable.keys():
2574 for filt in repoview.filtertable.keys():
2575 filtered = self.filtered(filt)
2575 filtered = self.filtered(filt)
2576 filtered.branchmap().write(filtered)
2576 filtered.branchmap().write(filtered)
2577
2577
2578 def invalidatecaches(self):
2578 def invalidatecaches(self):
2579
2579
2580 if '_tagscache' in vars(self):
2580 if '_tagscache' in vars(self):
2581 # can't use delattr on proxy
2581 # can't use delattr on proxy
2582 del self.__dict__['_tagscache']
2582 del self.__dict__['_tagscache']
2583
2583
2584 self._branchcaches.clear()
2584 self._branchcaches.clear()
2585 self.invalidatevolatilesets()
2585 self.invalidatevolatilesets()
2586 self._sparsesignaturecache.clear()
2586 self._sparsesignaturecache.clear()
2587
2587
2588 def invalidatevolatilesets(self):
2588 def invalidatevolatilesets(self):
2589 self.filteredrevcache.clear()
2589 self.filteredrevcache.clear()
2590 obsolete.clearobscaches(self)
2590 obsolete.clearobscaches(self)
2591 self._quick_access_changeid_invalidate()
2591 self._quick_access_changeid_invalidate()
2592
2592
2593 def invalidatedirstate(self):
2593 def invalidatedirstate(self):
2594 '''Invalidates the dirstate, causing the next call to dirstate
2594 '''Invalidates the dirstate, causing the next call to dirstate
2595 to check if it was modified since the last time it was read,
2595 to check if it was modified since the last time it was read,
2596 rereading it if it has.
2596 rereading it if it has.
2597
2597
2598 This is different to dirstate.invalidate() that it doesn't always
2598 This is different to dirstate.invalidate() that it doesn't always
2599 rereads the dirstate. Use dirstate.invalidate() if you want to
2599 rereads the dirstate. Use dirstate.invalidate() if you want to
2600 explicitly read the dirstate again (i.e. restoring it to a previous
2600 explicitly read the dirstate again (i.e. restoring it to a previous
2601 known good state).'''
2601 known good state).'''
2602 if hasunfilteredcache(self, 'dirstate'):
2602 if hasunfilteredcache(self, 'dirstate'):
2603 for k in self.dirstate._filecache:
2603 for k in self.dirstate._filecache:
2604 try:
2604 try:
2605 delattr(self.dirstate, k)
2605 delattr(self.dirstate, k)
2606 except AttributeError:
2606 except AttributeError:
2607 pass
2607 pass
2608 delattr(self.unfiltered(), 'dirstate')
2608 delattr(self.unfiltered(), 'dirstate')
2609
2609
2610 def invalidate(self, clearfilecache=False):
2610 def invalidate(self, clearfilecache=False):
2611 '''Invalidates both store and non-store parts other than dirstate
2611 '''Invalidates both store and non-store parts other than dirstate
2612
2612
2613 If a transaction is running, invalidation of store is omitted,
2613 If a transaction is running, invalidation of store is omitted,
2614 because discarding in-memory changes might cause inconsistency
2614 because discarding in-memory changes might cause inconsistency
2615 (e.g. incomplete fncache causes unintentional failure, but
2615 (e.g. incomplete fncache causes unintentional failure, but
2616 redundant one doesn't).
2616 redundant one doesn't).
2617 '''
2617 '''
2618 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2618 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2619 for k in list(self._filecache.keys()):
2619 for k in list(self._filecache.keys()):
2620 # dirstate is invalidated separately in invalidatedirstate()
2620 # dirstate is invalidated separately in invalidatedirstate()
2621 if k == b'dirstate':
2621 if k == b'dirstate':
2622 continue
2622 continue
2623 if (
2623 if (
2624 k == b'changelog'
2624 k == b'changelog'
2625 and self.currenttransaction()
2625 and self.currenttransaction()
2626 and self.changelog._delayed
2626 and self.changelog._delayed
2627 ):
2627 ):
2628 # The changelog object may store unwritten revisions. We don't
2628 # The changelog object may store unwritten revisions. We don't
2629 # want to lose them.
2629 # want to lose them.
2630 # TODO: Solve the problem instead of working around it.
2630 # TODO: Solve the problem instead of working around it.
2631 continue
2631 continue
2632
2632
2633 if clearfilecache:
2633 if clearfilecache:
2634 del self._filecache[k]
2634 del self._filecache[k]
2635 try:
2635 try:
2636 delattr(unfiltered, k)
2636 delattr(unfiltered, k)
2637 except AttributeError:
2637 except AttributeError:
2638 pass
2638 pass
2639 self.invalidatecaches()
2639 self.invalidatecaches()
2640 if not self.currenttransaction():
2640 if not self.currenttransaction():
2641 # TODO: Changing contents of store outside transaction
2641 # TODO: Changing contents of store outside transaction
2642 # causes inconsistency. We should make in-memory store
2642 # causes inconsistency. We should make in-memory store
2643 # changes detectable, and abort if changed.
2643 # changes detectable, and abort if changed.
2644 self.store.invalidatecaches()
2644 self.store.invalidatecaches()
2645
2645
2646 def invalidateall(self):
2646 def invalidateall(self):
2647 '''Fully invalidates both store and non-store parts, causing the
2647 '''Fully invalidates both store and non-store parts, causing the
2648 subsequent operation to reread any outside changes.'''
2648 subsequent operation to reread any outside changes.'''
2649 # extension should hook this to invalidate its caches
2649 # extension should hook this to invalidate its caches
2650 self.invalidate()
2650 self.invalidate()
2651 self.invalidatedirstate()
2651 self.invalidatedirstate()
2652
2652
2653 @unfilteredmethod
2653 @unfilteredmethod
2654 def _refreshfilecachestats(self, tr):
2654 def _refreshfilecachestats(self, tr):
2655 """Reload stats of cached files so that they are flagged as valid"""
2655 """Reload stats of cached files so that they are flagged as valid"""
2656 for k, ce in self._filecache.items():
2656 for k, ce in self._filecache.items():
2657 k = pycompat.sysstr(k)
2657 k = pycompat.sysstr(k)
2658 if k == 'dirstate' or k not in self.__dict__:
2658 if k == 'dirstate' or k not in self.__dict__:
2659 continue
2659 continue
2660 ce.refresh()
2660 ce.refresh()
2661
2661
2662 def _lock(
2662 def _lock(
2663 self,
2663 self,
2664 vfs,
2664 vfs,
2665 lockname,
2665 lockname,
2666 wait,
2666 wait,
2667 releasefn,
2667 releasefn,
2668 acquirefn,
2668 acquirefn,
2669 desc,
2669 desc,
2670 inheritchecker=None,
2670 inheritchecker=None,
2671 parentenvvar=None,
2671 parentenvvar=None,
2672 ):
2672 ):
2673 parentlock = None
2673 parentlock = None
2674 # the contents of parentenvvar are used by the underlying lock to
2674 # the contents of parentenvvar are used by the underlying lock to
2675 # determine whether it can be inherited
2675 # determine whether it can be inherited
2676 if parentenvvar is not None:
2676 if parentenvvar is not None:
2677 parentlock = encoding.environ.get(parentenvvar)
2677 parentlock = encoding.environ.get(parentenvvar)
2678
2678
2679 timeout = 0
2679 timeout = 0
2680 warntimeout = 0
2680 warntimeout = 0
2681 if wait:
2681 if wait:
2682 timeout = self.ui.configint(b"ui", b"timeout")
2682 timeout = self.ui.configint(b"ui", b"timeout")
2683 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2683 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2684 # internal config: ui.signal-safe-lock
2684 # internal config: ui.signal-safe-lock
2685 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2685 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2686
2686
2687 l = lockmod.trylock(
2687 l = lockmod.trylock(
2688 self.ui,
2688 self.ui,
2689 vfs,
2689 vfs,
2690 lockname,
2690 lockname,
2691 timeout,
2691 timeout,
2692 warntimeout,
2692 warntimeout,
2693 releasefn=releasefn,
2693 releasefn=releasefn,
2694 acquirefn=acquirefn,
2694 acquirefn=acquirefn,
2695 desc=desc,
2695 desc=desc,
2696 inheritchecker=inheritchecker,
2696 inheritchecker=inheritchecker,
2697 parentlock=parentlock,
2697 parentlock=parentlock,
2698 signalsafe=signalsafe,
2698 signalsafe=signalsafe,
2699 )
2699 )
2700 return l
2700 return l
2701
2701
2702 def _afterlock(self, callback):
2702 def _afterlock(self, callback):
2703 """add a callback to be run when the repository is fully unlocked
2703 """add a callback to be run when the repository is fully unlocked
2704
2704
2705 The callback will be executed when the outermost lock is released
2705 The callback will be executed when the outermost lock is released
2706 (with wlock being higher level than 'lock')."""
2706 (with wlock being higher level than 'lock')."""
2707 for ref in (self._wlockref, self._lockref):
2707 for ref in (self._wlockref, self._lockref):
2708 l = ref and ref()
2708 l = ref and ref()
2709 if l and l.held:
2709 if l and l.held:
2710 l.postrelease.append(callback)
2710 l.postrelease.append(callback)
2711 break
2711 break
2712 else: # no lock have been found.
2712 else: # no lock have been found.
2713 callback(True)
2713 callback(True)
2714
2714
2715 def lock(self, wait=True):
2715 def lock(self, wait=True):
2716 '''Lock the repository store (.hg/store) and return a weak reference
2716 '''Lock the repository store (.hg/store) and return a weak reference
2717 to the lock. Use this before modifying the store (e.g. committing or
2717 to the lock. Use this before modifying the store (e.g. committing or
2718 stripping). If you are opening a transaction, get a lock as well.)
2718 stripping). If you are opening a transaction, get a lock as well.)
2719
2719
2720 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2720 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2721 'wlock' first to avoid a dead-lock hazard.'''
2721 'wlock' first to avoid a dead-lock hazard.'''
2722 l = self._currentlock(self._lockref)
2722 l = self._currentlock(self._lockref)
2723 if l is not None:
2723 if l is not None:
2724 l.lock()
2724 l.lock()
2725 return l
2725 return l
2726
2726
2727 l = self._lock(
2727 l = self._lock(
2728 vfs=self.svfs,
2728 vfs=self.svfs,
2729 lockname=b"lock",
2729 lockname=b"lock",
2730 wait=wait,
2730 wait=wait,
2731 releasefn=None,
2731 releasefn=None,
2732 acquirefn=self.invalidate,
2732 acquirefn=self.invalidate,
2733 desc=_(b'repository %s') % self.origroot,
2733 desc=_(b'repository %s') % self.origroot,
2734 )
2734 )
2735 self._lockref = weakref.ref(l)
2735 self._lockref = weakref.ref(l)
2736 return l
2736 return l
2737
2737
2738 def _wlockchecktransaction(self):
2738 def _wlockchecktransaction(self):
2739 if self.currenttransaction() is not None:
2739 if self.currenttransaction() is not None:
2740 raise error.LockInheritanceContractViolation(
2740 raise error.LockInheritanceContractViolation(
2741 b'wlock cannot be inherited in the middle of a transaction'
2741 b'wlock cannot be inherited in the middle of a transaction'
2742 )
2742 )
2743
2743
2744 def wlock(self, wait=True):
2744 def wlock(self, wait=True):
2745 '''Lock the non-store parts of the repository (everything under
2745 '''Lock the non-store parts of the repository (everything under
2746 .hg except .hg/store) and return a weak reference to the lock.
2746 .hg except .hg/store) and return a weak reference to the lock.
2747
2747
2748 Use this before modifying files in .hg.
2748 Use this before modifying files in .hg.
2749
2749
2750 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2750 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2751 'wlock' first to avoid a dead-lock hazard.'''
2751 'wlock' first to avoid a dead-lock hazard.'''
2752 l = self._wlockref and self._wlockref()
2752 l = self._wlockref and self._wlockref()
2753 if l is not None and l.held:
2753 if l is not None and l.held:
2754 l.lock()
2754 l.lock()
2755 return l
2755 return l
2756
2756
2757 # We do not need to check for non-waiting lock acquisition. Such
2757 # We do not need to check for non-waiting lock acquisition. Such
2758 # acquisition would not cause dead-lock as they would just fail.
2758 # acquisition would not cause dead-lock as they would just fail.
2759 if wait and (
2759 if wait and (
2760 self.ui.configbool(b'devel', b'all-warnings')
2760 self.ui.configbool(b'devel', b'all-warnings')
2761 or self.ui.configbool(b'devel', b'check-locks')
2761 or self.ui.configbool(b'devel', b'check-locks')
2762 ):
2762 ):
2763 if self._currentlock(self._lockref) is not None:
2763 if self._currentlock(self._lockref) is not None:
2764 self.ui.develwarn(b'"wlock" acquired after "lock"')
2764 self.ui.develwarn(b'"wlock" acquired after "lock"')
2765
2765
2766 def unlock():
2766 def unlock():
2767 if self.dirstate.pendingparentchange():
2767 if self.dirstate.pendingparentchange():
2768 self.dirstate.invalidate()
2768 self.dirstate.invalidate()
2769 else:
2769 else:
2770 self.dirstate.write(None)
2770 self.dirstate.write(None)
2771
2771
2772 self._filecache[b'dirstate'].refresh()
2772 self._filecache[b'dirstate'].refresh()
2773
2773
2774 l = self._lock(
2774 l = self._lock(
2775 self.vfs,
2775 self.vfs,
2776 b"wlock",
2776 b"wlock",
2777 wait,
2777 wait,
2778 unlock,
2778 unlock,
2779 self.invalidatedirstate,
2779 self.invalidatedirstate,
2780 _(b'working directory of %s') % self.origroot,
2780 _(b'working directory of %s') % self.origroot,
2781 inheritchecker=self._wlockchecktransaction,
2781 inheritchecker=self._wlockchecktransaction,
2782 parentenvvar=b'HG_WLOCK_LOCKER',
2782 parentenvvar=b'HG_WLOCK_LOCKER',
2783 )
2783 )
2784 self._wlockref = weakref.ref(l)
2784 self._wlockref = weakref.ref(l)
2785 return l
2785 return l
2786
2786
2787 def _currentlock(self, lockref):
2787 def _currentlock(self, lockref):
2788 """Returns the lock if it's held, or None if it's not."""
2788 """Returns the lock if it's held, or None if it's not."""
2789 if lockref is None:
2789 if lockref is None:
2790 return None
2790 return None
2791 l = lockref()
2791 l = lockref()
2792 if l is None or not l.held:
2792 if l is None or not l.held:
2793 return None
2793 return None
2794 return l
2794 return l
2795
2795
2796 def currentwlock(self):
2796 def currentwlock(self):
2797 """Returns the wlock if it's held, or None if it's not."""
2797 """Returns the wlock if it's held, or None if it's not."""
2798 return self._currentlock(self._wlockref)
2798 return self._currentlock(self._wlockref)
2799
2799
2800 def checkcommitpatterns(self, wctx, match, status, fail):
2800 def checkcommitpatterns(self, wctx, match, status, fail):
2801 """check for commit arguments that aren't committable"""
2801 """check for commit arguments that aren't committable"""
2802 if match.isexact() or match.prefix():
2802 if match.isexact() or match.prefix():
2803 matched = set(status.modified + status.added + status.removed)
2803 matched = set(status.modified + status.added + status.removed)
2804
2804
2805 for f in match.files():
2805 for f in match.files():
2806 f = self.dirstate.normalize(f)
2806 f = self.dirstate.normalize(f)
2807 if f == b'.' or f in matched or f in wctx.substate:
2807 if f == b'.' or f in matched or f in wctx.substate:
2808 continue
2808 continue
2809 if f in status.deleted:
2809 if f in status.deleted:
2810 fail(f, _(b'file not found!'))
2810 fail(f, _(b'file not found!'))
2811 # Is it a directory that exists or used to exist?
2811 # Is it a directory that exists or used to exist?
2812 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2812 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2813 d = f + b'/'
2813 d = f + b'/'
2814 for mf in matched:
2814 for mf in matched:
2815 if mf.startswith(d):
2815 if mf.startswith(d):
2816 break
2816 break
2817 else:
2817 else:
2818 fail(f, _(b"no match under directory!"))
2818 fail(f, _(b"no match under directory!"))
2819 elif f not in self.dirstate:
2819 elif f not in self.dirstate:
2820 fail(f, _(b"file not tracked!"))
2820 fail(f, _(b"file not tracked!"))
2821
2821
2822 @unfilteredmethod
2822 @unfilteredmethod
2823 def commit(
2823 def commit(
2824 self,
2824 self,
2825 text=b"",
2825 text=b"",
2826 user=None,
2826 user=None,
2827 date=None,
2827 date=None,
2828 match=None,
2828 match=None,
2829 force=False,
2829 force=False,
2830 editor=None,
2830 editor=None,
2831 extra=None,
2831 extra=None,
2832 ):
2832 ):
2833 """Add a new revision to current repository.
2833 """Add a new revision to current repository.
2834
2834
2835 Revision information is gathered from the working directory,
2835 Revision information is gathered from the working directory,
2836 match can be used to filter the committed files. If editor is
2836 match can be used to filter the committed files. If editor is
2837 supplied, it is called to get a commit message.
2837 supplied, it is called to get a commit message.
2838 """
2838 """
2839 if extra is None:
2839 if extra is None:
2840 extra = {}
2840 extra = {}
2841
2841
2842 def fail(f, msg):
2842 def fail(f, msg):
2843 raise error.Abort(b'%s: %s' % (f, msg))
2843 raise error.Abort(b'%s: %s' % (f, msg))
2844
2844
2845 if not match:
2845 if not match:
2846 match = matchmod.always()
2846 match = matchmod.always()
2847
2847
2848 if not force:
2848 if not force:
2849 match.bad = fail
2849 match.bad = fail
2850
2850
2851 # lock() for recent changelog (see issue4368)
2851 # lock() for recent changelog (see issue4368)
2852 with self.wlock(), self.lock():
2852 with self.wlock(), self.lock():
2853 wctx = self[None]
2853 wctx = self[None]
2854 merge = len(wctx.parents()) > 1
2854 merge = len(wctx.parents()) > 1
2855
2855
2856 if not force and merge and not match.always():
2856 if not force and merge and not match.always():
2857 raise error.Abort(
2857 raise error.Abort(
2858 _(
2858 _(
2859 b'cannot partially commit a merge '
2859 b'cannot partially commit a merge '
2860 b'(do not specify files or patterns)'
2860 b'(do not specify files or patterns)'
2861 )
2861 )
2862 )
2862 )
2863
2863
2864 status = self.status(match=match, clean=force)
2864 status = self.status(match=match, clean=force)
2865 if force:
2865 if force:
2866 status.modified.extend(
2866 status.modified.extend(
2867 status.clean
2867 status.clean
2868 ) # mq may commit clean files
2868 ) # mq may commit clean files
2869
2869
2870 # check subrepos
2870 # check subrepos
2871 subs, commitsubs, newstate = subrepoutil.precommit(
2871 subs, commitsubs, newstate = subrepoutil.precommit(
2872 self.ui, wctx, status, match, force=force
2872 self.ui, wctx, status, match, force=force
2873 )
2873 )
2874
2874
2875 # make sure all explicit patterns are matched
2875 # make sure all explicit patterns are matched
2876 if not force:
2876 if not force:
2877 self.checkcommitpatterns(wctx, match, status, fail)
2877 self.checkcommitpatterns(wctx, match, status, fail)
2878
2878
2879 cctx = context.workingcommitctx(
2879 cctx = context.workingcommitctx(
2880 self, status, text, user, date, extra
2880 self, status, text, user, date, extra
2881 )
2881 )
2882
2882
2883 ms = mergestatemod.mergestate.read(self)
2883 ms = mergestatemod.mergestate.read(self)
2884 mergeutil.checkunresolved(ms)
2884 mergeutil.checkunresolved(ms)
2885
2885
2886 # internal config: ui.allowemptycommit
2886 # internal config: ui.allowemptycommit
2887 if cctx.isempty() and not self.ui.configbool(
2887 if cctx.isempty() and not self.ui.configbool(
2888 b'ui', b'allowemptycommit'
2888 b'ui', b'allowemptycommit'
2889 ):
2889 ):
2890 self.ui.debug(b'nothing to commit, clearing merge state\n')
2890 self.ui.debug(b'nothing to commit, clearing merge state\n')
2891 ms.reset()
2891 ms.reset()
2892 return None
2892 return None
2893
2893
2894 if merge and cctx.deleted():
2894 if merge and cctx.deleted():
2895 raise error.Abort(_(b"cannot commit merge with missing files"))
2895 raise error.Abort(_(b"cannot commit merge with missing files"))
2896
2896
2897 if editor:
2897 if editor:
2898 cctx._text = editor(self, cctx, subs)
2898 cctx._text = editor(self, cctx, subs)
2899 edited = text != cctx._text
2899 edited = text != cctx._text
2900
2900
2901 # Save commit message in case this transaction gets rolled back
2901 # Save commit message in case this transaction gets rolled back
2902 # (e.g. by a pretxncommit hook). Leave the content alone on
2902 # (e.g. by a pretxncommit hook). Leave the content alone on
2903 # the assumption that the user will use the same editor again.
2903 # the assumption that the user will use the same editor again.
2904 msgfn = self.savecommitmessage(cctx._text)
2904 msgfn = self.savecommitmessage(cctx._text)
2905
2905
2906 # commit subs and write new state
2906 # commit subs and write new state
2907 if subs:
2907 if subs:
2908 uipathfn = scmutil.getuipathfn(self)
2908 uipathfn = scmutil.getuipathfn(self)
2909 for s in sorted(commitsubs):
2909 for s in sorted(commitsubs):
2910 sub = wctx.sub(s)
2910 sub = wctx.sub(s)
2911 self.ui.status(
2911 self.ui.status(
2912 _(b'committing subrepository %s\n')
2912 _(b'committing subrepository %s\n')
2913 % uipathfn(subrepoutil.subrelpath(sub))
2913 % uipathfn(subrepoutil.subrelpath(sub))
2914 )
2914 )
2915 sr = sub.commit(cctx._text, user, date)
2915 sr = sub.commit(cctx._text, user, date)
2916 newstate[s] = (newstate[s][0], sr)
2916 newstate[s] = (newstate[s][0], sr)
2917 subrepoutil.writestate(self, newstate)
2917 subrepoutil.writestate(self, newstate)
2918
2918
2919 p1, p2 = self.dirstate.parents()
2919 p1, p2 = self.dirstate.parents()
2920 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2920 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2921 try:
2921 try:
2922 self.hook(
2922 self.hook(
2923 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2923 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2924 )
2924 )
2925 with self.transaction(b'commit'):
2925 with self.transaction(b'commit'):
2926 ret = self.commitctx(cctx, True)
2926 ret = self.commitctx(cctx, True)
2927 # update bookmarks, dirstate and mergestate
2927 # update bookmarks, dirstate and mergestate
2928 bookmarks.update(self, [p1, p2], ret)
2928 bookmarks.update(self, [p1, p2], ret)
2929 cctx.markcommitted(ret)
2929 cctx.markcommitted(ret)
2930 ms.reset()
2930 ms.reset()
2931 except: # re-raises
2931 except: # re-raises
2932 if edited:
2932 if edited:
2933 self.ui.write(
2933 self.ui.write(
2934 _(b'note: commit message saved in %s\n') % msgfn
2934 _(b'note: commit message saved in %s\n') % msgfn
2935 )
2935 )
2936 self.ui.write(
2936 self.ui.write(
2937 _(
2937 _(
2938 b"note: use 'hg commit --logfile "
2938 b"note: use 'hg commit --logfile "
2939 b".hg/last-message.txt --edit' to reuse it\n"
2939 b".hg/last-message.txt --edit' to reuse it\n"
2940 )
2940 )
2941 )
2941 )
2942 raise
2942 raise
2943
2943
2944 def commithook(unused_success):
2944 def commithook(unused_success):
2945 # hack for command that use a temporary commit (eg: histedit)
2945 # hack for command that use a temporary commit (eg: histedit)
2946 # temporary commit got stripped before hook release
2946 # temporary commit got stripped before hook release
2947 if self.changelog.hasnode(ret):
2947 if self.changelog.hasnode(ret):
2948 self.hook(
2948 self.hook(
2949 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2949 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2950 )
2950 )
2951
2951
2952 self._afterlock(commithook)
2952 self._afterlock(commithook)
2953 return ret
2953 return ret
2954
2954
2955 @unfilteredmethod
2955 @unfilteredmethod
2956 def commitctx(self, ctx, error=False, origctx=None):
2956 def commitctx(self, ctx, error=False, origctx=None):
2957 return commit.commitctx(self, ctx, error=error, origctx=origctx)
2957 return commit.commitctx(self, ctx, error=error, origctx=origctx)
2958
2958
2959 @unfilteredmethod
2959 @unfilteredmethod
2960 def destroying(self):
2960 def destroying(self):
2961 '''Inform the repository that nodes are about to be destroyed.
2961 '''Inform the repository that nodes are about to be destroyed.
2962 Intended for use by strip and rollback, so there's a common
2962 Intended for use by strip and rollback, so there's a common
2963 place for anything that has to be done before destroying history.
2963 place for anything that has to be done before destroying history.
2964
2964
2965 This is mostly useful for saving state that is in memory and waiting
2965 This is mostly useful for saving state that is in memory and waiting
2966 to be flushed when the current lock is released. Because a call to
2966 to be flushed when the current lock is released. Because a call to
2967 destroyed is imminent, the repo will be invalidated causing those
2967 destroyed is imminent, the repo will be invalidated causing those
2968 changes to stay in memory (waiting for the next unlock), or vanish
2968 changes to stay in memory (waiting for the next unlock), or vanish
2969 completely.
2969 completely.
2970 '''
2970 '''
2971 # When using the same lock to commit and strip, the phasecache is left
2971 # When using the same lock to commit and strip, the phasecache is left
2972 # dirty after committing. Then when we strip, the repo is invalidated,
2972 # dirty after committing. Then when we strip, the repo is invalidated,
2973 # causing those changes to disappear.
2973 # causing those changes to disappear.
2974 if '_phasecache' in vars(self):
2974 if '_phasecache' in vars(self):
2975 self._phasecache.write()
2975 self._phasecache.write()
2976
2976
2977 @unfilteredmethod
2977 @unfilteredmethod
2978 def destroyed(self):
2978 def destroyed(self):
2979 '''Inform the repository that nodes have been destroyed.
2979 '''Inform the repository that nodes have been destroyed.
2980 Intended for use by strip and rollback, so there's a common
2980 Intended for use by strip and rollback, so there's a common
2981 place for anything that has to be done after destroying history.
2981 place for anything that has to be done after destroying history.
2982 '''
2982 '''
2983 # When one tries to:
2983 # When one tries to:
2984 # 1) destroy nodes thus calling this method (e.g. strip)
2984 # 1) destroy nodes thus calling this method (e.g. strip)
2985 # 2) use phasecache somewhere (e.g. commit)
2985 # 2) use phasecache somewhere (e.g. commit)
2986 #
2986 #
2987 # then 2) will fail because the phasecache contains nodes that were
2987 # then 2) will fail because the phasecache contains nodes that were
2988 # removed. We can either remove phasecache from the filecache,
2988 # removed. We can either remove phasecache from the filecache,
2989 # causing it to reload next time it is accessed, or simply filter
2989 # causing it to reload next time it is accessed, or simply filter
2990 # the removed nodes now and write the updated cache.
2990 # the removed nodes now and write the updated cache.
2991 self._phasecache.filterunknown(self)
2991 self._phasecache.filterunknown(self)
2992 self._phasecache.write()
2992 self._phasecache.write()
2993
2993
2994 # refresh all repository caches
2994 # refresh all repository caches
2995 self.updatecaches()
2995 self.updatecaches()
2996
2996
2997 # Ensure the persistent tag cache is updated. Doing it now
2997 # Ensure the persistent tag cache is updated. Doing it now
2998 # means that the tag cache only has to worry about destroyed
2998 # means that the tag cache only has to worry about destroyed
2999 # heads immediately after a strip/rollback. That in turn
2999 # heads immediately after a strip/rollback. That in turn
3000 # guarantees that "cachetip == currenttip" (comparing both rev
3000 # guarantees that "cachetip == currenttip" (comparing both rev
3001 # and node) always means no nodes have been added or destroyed.
3001 # and node) always means no nodes have been added or destroyed.
3002
3002
3003 # XXX this is suboptimal when qrefresh'ing: we strip the current
3003 # XXX this is suboptimal when qrefresh'ing: we strip the current
3004 # head, refresh the tag cache, then immediately add a new head.
3004 # head, refresh the tag cache, then immediately add a new head.
3005 # But I think doing it this way is necessary for the "instant
3005 # But I think doing it this way is necessary for the "instant
3006 # tag cache retrieval" case to work.
3006 # tag cache retrieval" case to work.
3007 self.invalidate()
3007 self.invalidate()
3008
3008
3009 def status(
3009 def status(
3010 self,
3010 self,
3011 node1=b'.',
3011 node1=b'.',
3012 node2=None,
3012 node2=None,
3013 match=None,
3013 match=None,
3014 ignored=False,
3014 ignored=False,
3015 clean=False,
3015 clean=False,
3016 unknown=False,
3016 unknown=False,
3017 listsubrepos=False,
3017 listsubrepos=False,
3018 ):
3018 ):
3019 '''a convenience method that calls node1.status(node2)'''
3019 '''a convenience method that calls node1.status(node2)'''
3020 return self[node1].status(
3020 return self[node1].status(
3021 node2, match, ignored, clean, unknown, listsubrepos
3021 node2, match, ignored, clean, unknown, listsubrepos
3022 )
3022 )
3023
3023
3024 def addpostdsstatus(self, ps):
3024 def addpostdsstatus(self, ps):
3025 """Add a callback to run within the wlock, at the point at which status
3025 """Add a callback to run within the wlock, at the point at which status
3026 fixups happen.
3026 fixups happen.
3027
3027
3028 On status completion, callback(wctx, status) will be called with the
3028 On status completion, callback(wctx, status) will be called with the
3029 wlock held, unless the dirstate has changed from underneath or the wlock
3029 wlock held, unless the dirstate has changed from underneath or the wlock
3030 couldn't be grabbed.
3030 couldn't be grabbed.
3031
3031
3032 Callbacks should not capture and use a cached copy of the dirstate --
3032 Callbacks should not capture and use a cached copy of the dirstate --
3033 it might change in the meanwhile. Instead, they should access the
3033 it might change in the meanwhile. Instead, they should access the
3034 dirstate via wctx.repo().dirstate.
3034 dirstate via wctx.repo().dirstate.
3035
3035
3036 This list is emptied out after each status run -- extensions should
3036 This list is emptied out after each status run -- extensions should
3037 make sure it adds to this list each time dirstate.status is called.
3037 make sure it adds to this list each time dirstate.status is called.
3038 Extensions should also make sure they don't call this for statuses
3038 Extensions should also make sure they don't call this for statuses
3039 that don't involve the dirstate.
3039 that don't involve the dirstate.
3040 """
3040 """
3041
3041
3042 # The list is located here for uniqueness reasons -- it is actually
3042 # The list is located here for uniqueness reasons -- it is actually
3043 # managed by the workingctx, but that isn't unique per-repo.
3043 # managed by the workingctx, but that isn't unique per-repo.
3044 self._postdsstatus.append(ps)
3044 self._postdsstatus.append(ps)
3045
3045
3046 def postdsstatus(self):
3046 def postdsstatus(self):
3047 """Used by workingctx to get the list of post-dirstate-status hooks."""
3047 """Used by workingctx to get the list of post-dirstate-status hooks."""
3048 return self._postdsstatus
3048 return self._postdsstatus
3049
3049
3050 def clearpostdsstatus(self):
3050 def clearpostdsstatus(self):
3051 """Used by workingctx to clear post-dirstate-status hooks."""
3051 """Used by workingctx to clear post-dirstate-status hooks."""
3052 del self._postdsstatus[:]
3052 del self._postdsstatus[:]
3053
3053
3054 def heads(self, start=None):
3054 def heads(self, start=None):
3055 if start is None:
3055 if start is None:
3056 cl = self.changelog
3056 cl = self.changelog
3057 headrevs = reversed(cl.headrevs())
3057 headrevs = reversed(cl.headrevs())
3058 return [cl.node(rev) for rev in headrevs]
3058 return [cl.node(rev) for rev in headrevs]
3059
3059
3060 heads = self.changelog.heads(start)
3060 heads = self.changelog.heads(start)
3061 # sort the output in rev descending order
3061 # sort the output in rev descending order
3062 return sorted(heads, key=self.changelog.rev, reverse=True)
3062 return sorted(heads, key=self.changelog.rev, reverse=True)
3063
3063
3064 def branchheads(self, branch=None, start=None, closed=False):
3064 def branchheads(self, branch=None, start=None, closed=False):
3065 '''return a (possibly filtered) list of heads for the given branch
3065 '''return a (possibly filtered) list of heads for the given branch
3066
3066
3067 Heads are returned in topological order, from newest to oldest.
3067 Heads are returned in topological order, from newest to oldest.
3068 If branch is None, use the dirstate branch.
3068 If branch is None, use the dirstate branch.
3069 If start is not None, return only heads reachable from start.
3069 If start is not None, return only heads reachable from start.
3070 If closed is True, return heads that are marked as closed as well.
3070 If closed is True, return heads that are marked as closed as well.
3071 '''
3071 '''
3072 if branch is None:
3072 if branch is None:
3073 branch = self[None].branch()
3073 branch = self[None].branch()
3074 branches = self.branchmap()
3074 branches = self.branchmap()
3075 if not branches.hasbranch(branch):
3075 if not branches.hasbranch(branch):
3076 return []
3076 return []
3077 # the cache returns heads ordered lowest to highest
3077 # the cache returns heads ordered lowest to highest
3078 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3078 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3079 if start is not None:
3079 if start is not None:
3080 # filter out the heads that cannot be reached from startrev
3080 # filter out the heads that cannot be reached from startrev
3081 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3081 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3082 bheads = [h for h in bheads if h in fbheads]
3082 bheads = [h for h in bheads if h in fbheads]
3083 return bheads
3083 return bheads
3084
3084
3085 def branches(self, nodes):
3085 def branches(self, nodes):
3086 if not nodes:
3086 if not nodes:
3087 nodes = [self.changelog.tip()]
3087 nodes = [self.changelog.tip()]
3088 b = []
3088 b = []
3089 for n in nodes:
3089 for n in nodes:
3090 t = n
3090 t = n
3091 while True:
3091 while True:
3092 p = self.changelog.parents(n)
3092 p = self.changelog.parents(n)
3093 if p[1] != nullid or p[0] == nullid:
3093 if p[1] != nullid or p[0] == nullid:
3094 b.append((t, n, p[0], p[1]))
3094 b.append((t, n, p[0], p[1]))
3095 break
3095 break
3096 n = p[0]
3096 n = p[0]
3097 return b
3097 return b
3098
3098
3099 def between(self, pairs):
3099 def between(self, pairs):
3100 r = []
3100 r = []
3101
3101
3102 for top, bottom in pairs:
3102 for top, bottom in pairs:
3103 n, l, i = top, [], 0
3103 n, l, i = top, [], 0
3104 f = 1
3104 f = 1
3105
3105
3106 while n != bottom and n != nullid:
3106 while n != bottom and n != nullid:
3107 p = self.changelog.parents(n)[0]
3107 p = self.changelog.parents(n)[0]
3108 if i == f:
3108 if i == f:
3109 l.append(n)
3109 l.append(n)
3110 f = f * 2
3110 f = f * 2
3111 n = p
3111 n = p
3112 i += 1
3112 i += 1
3113
3113
3114 r.append(l)
3114 r.append(l)
3115
3115
3116 return r
3116 return r
3117
3117
3118 def checkpush(self, pushop):
3118 def checkpush(self, pushop):
3119 """Extensions can override this function if additional checks have
3119 """Extensions can override this function if additional checks have
3120 to be performed before pushing, or call it if they override push
3120 to be performed before pushing, or call it if they override push
3121 command.
3121 command.
3122 """
3122 """
3123
3123
3124 @unfilteredpropertycache
3124 @unfilteredpropertycache
3125 def prepushoutgoinghooks(self):
3125 def prepushoutgoinghooks(self):
3126 """Return util.hooks consists of a pushop with repo, remote, outgoing
3126 """Return util.hooks consists of a pushop with repo, remote, outgoing
3127 methods, which are called before pushing changesets.
3127 methods, which are called before pushing changesets.
3128 """
3128 """
3129 return util.hooks()
3129 return util.hooks()
3130
3130
3131 def pushkey(self, namespace, key, old, new):
3131 def pushkey(self, namespace, key, old, new):
3132 try:
3132 try:
3133 tr = self.currenttransaction()
3133 tr = self.currenttransaction()
3134 hookargs = {}
3134 hookargs = {}
3135 if tr is not None:
3135 if tr is not None:
3136 hookargs.update(tr.hookargs)
3136 hookargs.update(tr.hookargs)
3137 hookargs = pycompat.strkwargs(hookargs)
3137 hookargs = pycompat.strkwargs(hookargs)
3138 hookargs['namespace'] = namespace
3138 hookargs['namespace'] = namespace
3139 hookargs['key'] = key
3139 hookargs['key'] = key
3140 hookargs['old'] = old
3140 hookargs['old'] = old
3141 hookargs['new'] = new
3141 hookargs['new'] = new
3142 self.hook(b'prepushkey', throw=True, **hookargs)
3142 self.hook(b'prepushkey', throw=True, **hookargs)
3143 except error.HookAbort as exc:
3143 except error.HookAbort as exc:
3144 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3144 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3145 if exc.hint:
3145 if exc.hint:
3146 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3146 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3147 return False
3147 return False
3148 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3148 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3149 ret = pushkey.push(self, namespace, key, old, new)
3149 ret = pushkey.push(self, namespace, key, old, new)
3150
3150
3151 def runhook(unused_success):
3151 def runhook(unused_success):
3152 self.hook(
3152 self.hook(
3153 b'pushkey',
3153 b'pushkey',
3154 namespace=namespace,
3154 namespace=namespace,
3155 key=key,
3155 key=key,
3156 old=old,
3156 old=old,
3157 new=new,
3157 new=new,
3158 ret=ret,
3158 ret=ret,
3159 )
3159 )
3160
3160
3161 self._afterlock(runhook)
3161 self._afterlock(runhook)
3162 return ret
3162 return ret
3163
3163
3164 def listkeys(self, namespace):
3164 def listkeys(self, namespace):
3165 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3165 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3166 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3166 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3167 values = pushkey.list(self, namespace)
3167 values = pushkey.list(self, namespace)
3168 self.hook(b'listkeys', namespace=namespace, values=values)
3168 self.hook(b'listkeys', namespace=namespace, values=values)
3169 return values
3169 return values
3170
3170
3171 def debugwireargs(self, one, two, three=None, four=None, five=None):
3171 def debugwireargs(self, one, two, three=None, four=None, five=None):
3172 '''used to test argument passing over the wire'''
3172 '''used to test argument passing over the wire'''
3173 return b"%s %s %s %s %s" % (
3173 return b"%s %s %s %s %s" % (
3174 one,
3174 one,
3175 two,
3175 two,
3176 pycompat.bytestr(three),
3176 pycompat.bytestr(three),
3177 pycompat.bytestr(four),
3177 pycompat.bytestr(four),
3178 pycompat.bytestr(five),
3178 pycompat.bytestr(five),
3179 )
3179 )
3180
3180
3181 def savecommitmessage(self, text):
3181 def savecommitmessage(self, text):
3182 fp = self.vfs(b'last-message.txt', b'wb')
3182 fp = self.vfs(b'last-message.txt', b'wb')
3183 try:
3183 try:
3184 fp.write(text)
3184 fp.write(text)
3185 finally:
3185 finally:
3186 fp.close()
3186 fp.close()
3187 return self.pathto(fp.name[len(self.root) + 1 :])
3187 return self.pathto(fp.name[len(self.root) + 1 :])
3188
3188
3189
3189
3190 # used to avoid circular references so destructors work
3190 # used to avoid circular references so destructors work
3191 def aftertrans(files):
3191 def aftertrans(files):
3192 renamefiles = [tuple(t) for t in files]
3192 renamefiles = [tuple(t) for t in files]
3193
3193
3194 def a():
3194 def a():
3195 for vfs, src, dest in renamefiles:
3195 for vfs, src, dest in renamefiles:
3196 # if src and dest refer to a same file, vfs.rename is a no-op,
3196 # if src and dest refer to a same file, vfs.rename is a no-op,
3197 # leaving both src and dest on disk. delete dest to make sure
3197 # leaving both src and dest on disk. delete dest to make sure
3198 # the rename couldn't be such a no-op.
3198 # the rename couldn't be such a no-op.
3199 vfs.tryunlink(dest)
3199 vfs.tryunlink(dest)
3200 try:
3200 try:
3201 vfs.rename(src, dest)
3201 vfs.rename(src, dest)
3202 except OSError: # journal file does not yet exist
3202 except OSError: # journal file does not yet exist
3203 pass
3203 pass
3204
3204
3205 return a
3205 return a
3206
3206
3207
3207
3208 def undoname(fn):
3208 def undoname(fn):
3209 base, name = os.path.split(fn)
3209 base, name = os.path.split(fn)
3210 assert name.startswith(b'journal')
3210 assert name.startswith(b'journal')
3211 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3211 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3212
3212
3213
3213
3214 def instance(ui, path, create, intents=None, createopts=None):
3214 def instance(ui, path, create, intents=None, createopts=None):
3215 localpath = util.urllocalpath(path)
3215 localpath = util.urllocalpath(path)
3216 if create:
3216 if create:
3217 createrepository(ui, localpath, createopts=createopts)
3217 createrepository(ui, localpath, createopts=createopts)
3218
3218
3219 return makelocalrepository(ui, localpath, intents=intents)
3219 return makelocalrepository(ui, localpath, intents=intents)
3220
3220
3221
3221
3222 def islocal(path):
3222 def islocal(path):
3223 return True
3223 return True
3224
3224
3225
3225
3226 def defaultcreateopts(ui, createopts=None):
3226 def defaultcreateopts(ui, createopts=None):
3227 """Populate the default creation options for a repository.
3227 """Populate the default creation options for a repository.
3228
3228
3229 A dictionary of explicitly requested creation options can be passed
3229 A dictionary of explicitly requested creation options can be passed
3230 in. Missing keys will be populated.
3230 in. Missing keys will be populated.
3231 """
3231 """
3232 createopts = dict(createopts or {})
3232 createopts = dict(createopts or {})
3233
3233
3234 if b'backend' not in createopts:
3234 if b'backend' not in createopts:
3235 # experimental config: storage.new-repo-backend
3235 # experimental config: storage.new-repo-backend
3236 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3236 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3237
3237
3238 return createopts
3238 return createopts
3239
3239
3240
3240
3241 def newreporequirements(ui, createopts):
3241 def newreporequirements(ui, createopts):
3242 """Determine the set of requirements for a new local repository.
3242 """Determine the set of requirements for a new local repository.
3243
3243
3244 Extensions can wrap this function to specify custom requirements for
3244 Extensions can wrap this function to specify custom requirements for
3245 new repositories.
3245 new repositories.
3246 """
3246 """
3247 # If the repo is being created from a shared repository, we copy
3247 # If the repo is being created from a shared repository, we copy
3248 # its requirements.
3248 # its requirements.
3249 if b'sharedrepo' in createopts:
3249 if b'sharedrepo' in createopts:
3250 requirements = set(createopts[b'sharedrepo'].requirements)
3250 requirements = set(createopts[b'sharedrepo'].requirements)
3251 if createopts.get(b'sharedrelative'):
3251 if createopts.get(b'sharedrelative'):
3252 requirements.add(b'relshared')
3252 requirements.add(b'relshared')
3253 else:
3253 else:
3254 requirements.add(b'shared')
3254 requirements.add(b'shared')
3255
3255
3256 return requirements
3256 return requirements
3257
3257
3258 if b'backend' not in createopts:
3258 if b'backend' not in createopts:
3259 raise error.ProgrammingError(
3259 raise error.ProgrammingError(
3260 b'backend key not present in createopts; '
3260 b'backend key not present in createopts; '
3261 b'was defaultcreateopts() called?'
3261 b'was defaultcreateopts() called?'
3262 )
3262 )
3263
3263
3264 if createopts[b'backend'] != b'revlogv1':
3264 if createopts[b'backend'] != b'revlogv1':
3265 raise error.Abort(
3265 raise error.Abort(
3266 _(
3266 _(
3267 b'unable to determine repository requirements for '
3267 b'unable to determine repository requirements for '
3268 b'storage backend: %s'
3268 b'storage backend: %s'
3269 )
3269 )
3270 % createopts[b'backend']
3270 % createopts[b'backend']
3271 )
3271 )
3272
3272
3273 requirements = {b'revlogv1'}
3273 requirements = {b'revlogv1'}
3274 if ui.configbool(b'format', b'usestore'):
3274 if ui.configbool(b'format', b'usestore'):
3275 requirements.add(b'store')
3275 requirements.add(b'store')
3276 if ui.configbool(b'format', b'usefncache'):
3276 if ui.configbool(b'format', b'usefncache'):
3277 requirements.add(b'fncache')
3277 requirements.add(b'fncache')
3278 if ui.configbool(b'format', b'dotencode'):
3278 if ui.configbool(b'format', b'dotencode'):
3279 requirements.add(b'dotencode')
3279 requirements.add(b'dotencode')
3280
3280
3281 compengines = ui.configlist(b'format', b'revlog-compression')
3281 compengines = ui.configlist(b'format', b'revlog-compression')
3282 for compengine in compengines:
3282 for compengine in compengines:
3283 if compengine in util.compengines:
3283 if compengine in util.compengines:
3284 break
3284 break
3285 else:
3285 else:
3286 raise error.Abort(
3286 raise error.Abort(
3287 _(
3287 _(
3288 b'compression engines %s defined by '
3288 b'compression engines %s defined by '
3289 b'format.revlog-compression not available'
3289 b'format.revlog-compression not available'
3290 )
3290 )
3291 % b', '.join(b'"%s"' % e for e in compengines),
3291 % b', '.join(b'"%s"' % e for e in compengines),
3292 hint=_(
3292 hint=_(
3293 b'run "hg debuginstall" to list available '
3293 b'run "hg debuginstall" to list available '
3294 b'compression engines'
3294 b'compression engines'
3295 ),
3295 ),
3296 )
3296 )
3297
3297
3298 # zlib is the historical default and doesn't need an explicit requirement.
3298 # zlib is the historical default and doesn't need an explicit requirement.
3299 if compengine == b'zstd':
3299 if compengine == b'zstd':
3300 requirements.add(b'revlog-compression-zstd')
3300 requirements.add(b'revlog-compression-zstd')
3301 elif compengine != b'zlib':
3301 elif compengine != b'zlib':
3302 requirements.add(b'exp-compression-%s' % compengine)
3302 requirements.add(b'exp-compression-%s' % compengine)
3303
3303
3304 if scmutil.gdinitconfig(ui):
3304 if scmutil.gdinitconfig(ui):
3305 requirements.add(b'generaldelta')
3305 requirements.add(b'generaldelta')
3306 if ui.configbool(b'format', b'sparse-revlog'):
3306 if ui.configbool(b'format', b'sparse-revlog'):
3307 requirements.add(SPARSEREVLOG_REQUIREMENT)
3307 requirements.add(SPARSEREVLOG_REQUIREMENT)
3308
3308
3309 # experimental config: format.exp-use-side-data
3309 # experimental config: format.exp-use-side-data
3310 if ui.configbool(b'format', b'exp-use-side-data'):
3310 if ui.configbool(b'format', b'exp-use-side-data'):
3311 requirements.add(SIDEDATA_REQUIREMENT)
3311 requirements.add(SIDEDATA_REQUIREMENT)
3312 # experimental config: format.exp-use-copies-side-data-changeset
3312 # experimental config: format.exp-use-copies-side-data-changeset
3313 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3313 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3314 requirements.add(SIDEDATA_REQUIREMENT)
3314 requirements.add(SIDEDATA_REQUIREMENT)
3315 requirements.add(COPIESSDC_REQUIREMENT)
3315 requirements.add(COPIESSDC_REQUIREMENT)
3316 if ui.configbool(b'experimental', b'treemanifest'):
3316 if ui.configbool(b'experimental', b'treemanifest'):
3317 requirements.add(b'treemanifest')
3317 requirements.add(b'treemanifest')
3318
3318
3319 revlogv2 = ui.config(b'experimental', b'revlogv2')
3319 revlogv2 = ui.config(b'experimental', b'revlogv2')
3320 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3320 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3321 requirements.remove(b'revlogv1')
3321 requirements.remove(b'revlogv1')
3322 # generaldelta is implied by revlogv2.
3322 # generaldelta is implied by revlogv2.
3323 requirements.discard(b'generaldelta')
3323 requirements.discard(b'generaldelta')
3324 requirements.add(REVLOGV2_REQUIREMENT)
3324 requirements.add(REVLOGV2_REQUIREMENT)
3325 # experimental config: format.internal-phase
3325 # experimental config: format.internal-phase
3326 if ui.configbool(b'format', b'internal-phase'):
3326 if ui.configbool(b'format', b'internal-phase'):
3327 requirements.add(b'internal-phase')
3327 requirements.add(repository.INTERNAL_PHASE_REQUIREMENT)
3328
3328
3329 if createopts.get(b'narrowfiles'):
3329 if createopts.get(b'narrowfiles'):
3330 requirements.add(repository.NARROW_REQUIREMENT)
3330 requirements.add(repository.NARROW_REQUIREMENT)
3331
3331
3332 if createopts.get(b'lfs'):
3332 if createopts.get(b'lfs'):
3333 requirements.add(b'lfs')
3333 requirements.add(b'lfs')
3334
3334
3335 if ui.configbool(b'format', b'bookmarks-in-store'):
3335 if ui.configbool(b'format', b'bookmarks-in-store'):
3336 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3336 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3337
3337
3338 if ui.configbool(b'format', b'use-persistent-nodemap'):
3338 if ui.configbool(b'format', b'use-persistent-nodemap'):
3339 requirements.add(NODEMAP_REQUIREMENT)
3339 requirements.add(NODEMAP_REQUIREMENT)
3340
3340
3341 return requirements
3341 return requirements
3342
3342
3343
3343
3344 def checkrequirementscompat(ui, requirements):
3344 def checkrequirementscompat(ui, requirements):
3345 """ Checks compatibility of repository requirements enabled and disabled.
3345 """ Checks compatibility of repository requirements enabled and disabled.
3346
3346
3347 Returns a set of requirements which needs to be dropped because dependend
3347 Returns a set of requirements which needs to be dropped because dependend
3348 requirements are not enabled. Also warns users about it """
3348 requirements are not enabled. Also warns users about it """
3349
3349
3350 dropped = set()
3350 dropped = set()
3351
3351
3352 if b'store' not in requirements:
3352 if b'store' not in requirements:
3353 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3353 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3354 ui.warn(
3354 ui.warn(
3355 _(
3355 _(
3356 b'ignoring enabled \'format.bookmarks-in-store\' config '
3356 b'ignoring enabled \'format.bookmarks-in-store\' config '
3357 b'beacuse it is incompatible with disabled '
3357 b'beacuse it is incompatible with disabled '
3358 b'\'format.usestore\' config\n'
3358 b'\'format.usestore\' config\n'
3359 )
3359 )
3360 )
3360 )
3361 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3361 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3362
3362
3363 if b'shared' in requirements or b'relshared' in requirements:
3363 if b'shared' in requirements or b'relshared' in requirements:
3364 raise error.Abort(
3364 raise error.Abort(
3365 _(
3365 _(
3366 b"cannot create shared repository as source was created"
3366 b"cannot create shared repository as source was created"
3367 b" with 'format.usestore' config disabled"
3367 b" with 'format.usestore' config disabled"
3368 )
3368 )
3369 )
3369 )
3370
3370
3371 return dropped
3371 return dropped
3372
3372
3373
3373
3374 def filterknowncreateopts(ui, createopts):
3374 def filterknowncreateopts(ui, createopts):
3375 """Filters a dict of repo creation options against options that are known.
3375 """Filters a dict of repo creation options against options that are known.
3376
3376
3377 Receives a dict of repo creation options and returns a dict of those
3377 Receives a dict of repo creation options and returns a dict of those
3378 options that we don't know how to handle.
3378 options that we don't know how to handle.
3379
3379
3380 This function is called as part of repository creation. If the
3380 This function is called as part of repository creation. If the
3381 returned dict contains any items, repository creation will not
3381 returned dict contains any items, repository creation will not
3382 be allowed, as it means there was a request to create a repository
3382 be allowed, as it means there was a request to create a repository
3383 with options not recognized by loaded code.
3383 with options not recognized by loaded code.
3384
3384
3385 Extensions can wrap this function to filter out creation options
3385 Extensions can wrap this function to filter out creation options
3386 they know how to handle.
3386 they know how to handle.
3387 """
3387 """
3388 known = {
3388 known = {
3389 b'backend',
3389 b'backend',
3390 b'lfs',
3390 b'lfs',
3391 b'narrowfiles',
3391 b'narrowfiles',
3392 b'sharedrepo',
3392 b'sharedrepo',
3393 b'sharedrelative',
3393 b'sharedrelative',
3394 b'shareditems',
3394 b'shareditems',
3395 b'shallowfilestore',
3395 b'shallowfilestore',
3396 }
3396 }
3397
3397
3398 return {k: v for k, v in createopts.items() if k not in known}
3398 return {k: v for k, v in createopts.items() if k not in known}
3399
3399
3400
3400
3401 def createrepository(ui, path, createopts=None):
3401 def createrepository(ui, path, createopts=None):
3402 """Create a new repository in a vfs.
3402 """Create a new repository in a vfs.
3403
3403
3404 ``path`` path to the new repo's working directory.
3404 ``path`` path to the new repo's working directory.
3405 ``createopts`` options for the new repository.
3405 ``createopts`` options for the new repository.
3406
3406
3407 The following keys for ``createopts`` are recognized:
3407 The following keys for ``createopts`` are recognized:
3408
3408
3409 backend
3409 backend
3410 The storage backend to use.
3410 The storage backend to use.
3411 lfs
3411 lfs
3412 Repository will be created with ``lfs`` requirement. The lfs extension
3412 Repository will be created with ``lfs`` requirement. The lfs extension
3413 will automatically be loaded when the repository is accessed.
3413 will automatically be loaded when the repository is accessed.
3414 narrowfiles
3414 narrowfiles
3415 Set up repository to support narrow file storage.
3415 Set up repository to support narrow file storage.
3416 sharedrepo
3416 sharedrepo
3417 Repository object from which storage should be shared.
3417 Repository object from which storage should be shared.
3418 sharedrelative
3418 sharedrelative
3419 Boolean indicating if the path to the shared repo should be
3419 Boolean indicating if the path to the shared repo should be
3420 stored as relative. By default, the pointer to the "parent" repo
3420 stored as relative. By default, the pointer to the "parent" repo
3421 is stored as an absolute path.
3421 is stored as an absolute path.
3422 shareditems
3422 shareditems
3423 Set of items to share to the new repository (in addition to storage).
3423 Set of items to share to the new repository (in addition to storage).
3424 shallowfilestore
3424 shallowfilestore
3425 Indicates that storage for files should be shallow (not all ancestor
3425 Indicates that storage for files should be shallow (not all ancestor
3426 revisions are known).
3426 revisions are known).
3427 """
3427 """
3428 createopts = defaultcreateopts(ui, createopts=createopts)
3428 createopts = defaultcreateopts(ui, createopts=createopts)
3429
3429
3430 unknownopts = filterknowncreateopts(ui, createopts)
3430 unknownopts = filterknowncreateopts(ui, createopts)
3431
3431
3432 if not isinstance(unknownopts, dict):
3432 if not isinstance(unknownopts, dict):
3433 raise error.ProgrammingError(
3433 raise error.ProgrammingError(
3434 b'filterknowncreateopts() did not return a dict'
3434 b'filterknowncreateopts() did not return a dict'
3435 )
3435 )
3436
3436
3437 if unknownopts:
3437 if unknownopts:
3438 raise error.Abort(
3438 raise error.Abort(
3439 _(
3439 _(
3440 b'unable to create repository because of unknown '
3440 b'unable to create repository because of unknown '
3441 b'creation option: %s'
3441 b'creation option: %s'
3442 )
3442 )
3443 % b', '.join(sorted(unknownopts)),
3443 % b', '.join(sorted(unknownopts)),
3444 hint=_(b'is a required extension not loaded?'),
3444 hint=_(b'is a required extension not loaded?'),
3445 )
3445 )
3446
3446
3447 requirements = newreporequirements(ui, createopts=createopts)
3447 requirements = newreporequirements(ui, createopts=createopts)
3448 requirements -= checkrequirementscompat(ui, requirements)
3448 requirements -= checkrequirementscompat(ui, requirements)
3449
3449
3450 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3450 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3451
3451
3452 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3452 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3453 if hgvfs.exists():
3453 if hgvfs.exists():
3454 raise error.RepoError(_(b'repository %s already exists') % path)
3454 raise error.RepoError(_(b'repository %s already exists') % path)
3455
3455
3456 if b'sharedrepo' in createopts:
3456 if b'sharedrepo' in createopts:
3457 sharedpath = createopts[b'sharedrepo'].sharedpath
3457 sharedpath = createopts[b'sharedrepo'].sharedpath
3458
3458
3459 if createopts.get(b'sharedrelative'):
3459 if createopts.get(b'sharedrelative'):
3460 try:
3460 try:
3461 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3461 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3462 except (IOError, ValueError) as e:
3462 except (IOError, ValueError) as e:
3463 # ValueError is raised on Windows if the drive letters differ
3463 # ValueError is raised on Windows if the drive letters differ
3464 # on each path.
3464 # on each path.
3465 raise error.Abort(
3465 raise error.Abort(
3466 _(b'cannot calculate relative path'),
3466 _(b'cannot calculate relative path'),
3467 hint=stringutil.forcebytestr(e),
3467 hint=stringutil.forcebytestr(e),
3468 )
3468 )
3469
3469
3470 if not wdirvfs.exists():
3470 if not wdirvfs.exists():
3471 wdirvfs.makedirs()
3471 wdirvfs.makedirs()
3472
3472
3473 hgvfs.makedir(notindexed=True)
3473 hgvfs.makedir(notindexed=True)
3474 if b'sharedrepo' not in createopts:
3474 if b'sharedrepo' not in createopts:
3475 hgvfs.mkdir(b'cache')
3475 hgvfs.mkdir(b'cache')
3476 hgvfs.mkdir(b'wcache')
3476 hgvfs.mkdir(b'wcache')
3477
3477
3478 if b'store' in requirements and b'sharedrepo' not in createopts:
3478 if b'store' in requirements and b'sharedrepo' not in createopts:
3479 hgvfs.mkdir(b'store')
3479 hgvfs.mkdir(b'store')
3480
3480
3481 # We create an invalid changelog outside the store so very old
3481 # We create an invalid changelog outside the store so very old
3482 # Mercurial versions (which didn't know about the requirements
3482 # Mercurial versions (which didn't know about the requirements
3483 # file) encounter an error on reading the changelog. This
3483 # file) encounter an error on reading the changelog. This
3484 # effectively locks out old clients and prevents them from
3484 # effectively locks out old clients and prevents them from
3485 # mucking with a repo in an unknown format.
3485 # mucking with a repo in an unknown format.
3486 #
3486 #
3487 # The revlog header has version 2, which won't be recognized by
3487 # The revlog header has version 2, which won't be recognized by
3488 # such old clients.
3488 # such old clients.
3489 hgvfs.append(
3489 hgvfs.append(
3490 b'00changelog.i',
3490 b'00changelog.i',
3491 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3491 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3492 b'layout',
3492 b'layout',
3493 )
3493 )
3494
3494
3495 scmutil.writerequires(hgvfs, requirements)
3495 scmutil.writerequires(hgvfs, requirements)
3496
3496
3497 # Write out file telling readers where to find the shared store.
3497 # Write out file telling readers where to find the shared store.
3498 if b'sharedrepo' in createopts:
3498 if b'sharedrepo' in createopts:
3499 hgvfs.write(b'sharedpath', sharedpath)
3499 hgvfs.write(b'sharedpath', sharedpath)
3500
3500
3501 if createopts.get(b'shareditems'):
3501 if createopts.get(b'shareditems'):
3502 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3502 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3503 hgvfs.write(b'shared', shared)
3503 hgvfs.write(b'shared', shared)
3504
3504
3505
3505
3506 def poisonrepository(repo):
3506 def poisonrepository(repo):
3507 """Poison a repository instance so it can no longer be used."""
3507 """Poison a repository instance so it can no longer be used."""
3508 # Perform any cleanup on the instance.
3508 # Perform any cleanup on the instance.
3509 repo.close()
3509 repo.close()
3510
3510
3511 # Our strategy is to replace the type of the object with one that
3511 # Our strategy is to replace the type of the object with one that
3512 # has all attribute lookups result in error.
3512 # has all attribute lookups result in error.
3513 #
3513 #
3514 # But we have to allow the close() method because some constructors
3514 # But we have to allow the close() method because some constructors
3515 # of repos call close() on repo references.
3515 # of repos call close() on repo references.
3516 class poisonedrepository(object):
3516 class poisonedrepository(object):
3517 def __getattribute__(self, item):
3517 def __getattribute__(self, item):
3518 if item == 'close':
3518 if item == 'close':
3519 return object.__getattribute__(self, item)
3519 return object.__getattribute__(self, item)
3520
3520
3521 raise error.ProgrammingError(
3521 raise error.ProgrammingError(
3522 b'repo instances should not be used after unshare'
3522 b'repo instances should not be used after unshare'
3523 )
3523 )
3524
3524
3525 def close(self):
3525 def close(self):
3526 pass
3526 pass
3527
3527
3528 # We may have a repoview, which intercepts __setattr__. So be sure
3528 # We may have a repoview, which intercepts __setattr__. So be sure
3529 # we operate at the lowest level possible.
3529 # we operate at the lowest level possible.
3530 object.__setattr__(repo, '__class__', poisonedrepository)
3530 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,929 +1,930 b''
1 """ Mercurial phases support code
1 """ Mercurial phases support code
2
2
3 ---
3 ---
4
4
5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
6 Logilab SA <contact@logilab.fr>
6 Logilab SA <contact@logilab.fr>
7 Augie Fackler <durin42@gmail.com>
7 Augie Fackler <durin42@gmail.com>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License version 2 or any later version.
10 of the GNU General Public License version 2 or any later version.
11
11
12 ---
12 ---
13
13
14 This module implements most phase logic in mercurial.
14 This module implements most phase logic in mercurial.
15
15
16
16
17 Basic Concept
17 Basic Concept
18 =============
18 =============
19
19
20 A 'changeset phase' is an indicator that tells us how a changeset is
20 A 'changeset phase' is an indicator that tells us how a changeset is
21 manipulated and communicated. The details of each phase is described
21 manipulated and communicated. The details of each phase is described
22 below, here we describe the properties they have in common.
22 below, here we describe the properties they have in common.
23
23
24 Like bookmarks, phases are not stored in history and thus are not
24 Like bookmarks, phases are not stored in history and thus are not
25 permanent and leave no audit trail.
25 permanent and leave no audit trail.
26
26
27 First, no changeset can be in two phases at once. Phases are ordered,
27 First, no changeset can be in two phases at once. Phases are ordered,
28 so they can be considered from lowest to highest. The default, lowest
28 so they can be considered from lowest to highest. The default, lowest
29 phase is 'public' - this is the normal phase of existing changesets. A
29 phase is 'public' - this is the normal phase of existing changesets. A
30 child changeset can not be in a lower phase than its parents.
30 child changeset can not be in a lower phase than its parents.
31
31
32 These phases share a hierarchy of traits:
32 These phases share a hierarchy of traits:
33
33
34 immutable shared
34 immutable shared
35 public: X X
35 public: X X
36 draft: X
36 draft: X
37 secret:
37 secret:
38
38
39 Local commits are draft by default.
39 Local commits are draft by default.
40
40
41 Phase Movement and Exchange
41 Phase Movement and Exchange
42 ===========================
42 ===========================
43
43
44 Phase data is exchanged by pushkey on pull and push. Some servers have
44 Phase data is exchanged by pushkey on pull and push. Some servers have
45 a publish option set, we call such a server a "publishing server".
45 a publish option set, we call such a server a "publishing server".
46 Pushing a draft changeset to a publishing server changes the phase to
46 Pushing a draft changeset to a publishing server changes the phase to
47 public.
47 public.
48
48
49 A small list of fact/rules define the exchange of phase:
49 A small list of fact/rules define the exchange of phase:
50
50
51 * old client never changes server states
51 * old client never changes server states
52 * pull never changes server states
52 * pull never changes server states
53 * publish and old server changesets are seen as public by client
53 * publish and old server changesets are seen as public by client
54 * any secret changeset seen in another repository is lowered to at
54 * any secret changeset seen in another repository is lowered to at
55 least draft
55 least draft
56
56
57 Here is the final table summing up the 49 possible use cases of phase
57 Here is the final table summing up the 49 possible use cases of phase
58 exchange:
58 exchange:
59
59
60 server
60 server
61 old publish non-publish
61 old publish non-publish
62 N X N D P N D P
62 N X N D P N D P
63 old client
63 old client
64 pull
64 pull
65 N - X/X - X/D X/P - X/D X/P
65 N - X/X - X/D X/P - X/D X/P
66 X - X/X - X/D X/P - X/D X/P
66 X - X/X - X/D X/P - X/D X/P
67 push
67 push
68 X X/X X/X X/P X/P X/P X/D X/D X/P
68 X X/X X/X X/P X/P X/P X/D X/D X/P
69 new client
69 new client
70 pull
70 pull
71 N - P/X - P/D P/P - D/D P/P
71 N - P/X - P/D P/P - D/D P/P
72 D - P/X - P/D P/P - D/D P/P
72 D - P/X - P/D P/P - D/D P/P
73 P - P/X - P/D P/P - P/D P/P
73 P - P/X - P/D P/P - P/D P/P
74 push
74 push
75 D P/X P/X P/P P/P P/P D/D D/D P/P
75 D P/X P/X P/P P/P P/P D/D D/D P/P
76 P P/X P/X P/P P/P P/P P/P P/P P/P
76 P P/X P/X P/P P/P P/P P/P P/P P/P
77
77
78 Legend:
78 Legend:
79
79
80 A/B = final state on client / state on server
80 A/B = final state on client / state on server
81
81
82 * N = new/not present,
82 * N = new/not present,
83 * P = public,
83 * P = public,
84 * D = draft,
84 * D = draft,
85 * X = not tracked (i.e., the old client or server has no internal
85 * X = not tracked (i.e., the old client or server has no internal
86 way of recording the phase.)
86 way of recording the phase.)
87
87
88 passive = only pushes
88 passive = only pushes
89
89
90
90
91 A cell here can be read like this:
91 A cell here can be read like this:
92
92
93 "When a new client pushes a draft changeset (D) to a publishing
93 "When a new client pushes a draft changeset (D) to a publishing
94 server where it's not present (N), it's marked public on both
94 server where it's not present (N), it's marked public on both
95 sides (P/P)."
95 sides (P/P)."
96
96
97 Note: old client behave as a publishing server with draft only content
97 Note: old client behave as a publishing server with draft only content
98 - other people see it as public
98 - other people see it as public
99 - content is pushed as draft
99 - content is pushed as draft
100
100
101 """
101 """
102
102
103 from __future__ import absolute_import
103 from __future__ import absolute_import
104
104
105 import errno
105 import errno
106 import struct
106 import struct
107
107
108 from .i18n import _
108 from .i18n import _
109 from .node import (
109 from .node import (
110 bin,
110 bin,
111 hex,
111 hex,
112 nullid,
112 nullid,
113 nullrev,
113 nullrev,
114 short,
114 short,
115 wdirrev,
115 wdirrev,
116 )
116 )
117 from .pycompat import (
117 from .pycompat import (
118 getattr,
118 getattr,
119 setattr,
119 setattr,
120 )
120 )
121 from . import (
121 from . import (
122 error,
122 error,
123 pycompat,
123 pycompat,
124 smartset,
124 smartset,
125 txnutil,
125 txnutil,
126 util,
126 util,
127 )
127 )
128 from .interfaces import repository
128
129
129 _fphasesentry = struct.Struct(b'>i20s')
130 _fphasesentry = struct.Struct(b'>i20s')
130
131
131 # record phase index
132 # record phase index
132 public, draft, secret = range(3)
133 public, draft, secret = range(3)
133 archived = 32 # non-continuous for compatibility
134 archived = 32 # non-continuous for compatibility
134 internal = 96 # non-continuous for compatibility
135 internal = 96 # non-continuous for compatibility
135 allphases = (public, draft, secret, archived, internal)
136 allphases = (public, draft, secret, archived, internal)
136 trackedphases = (draft, secret, archived, internal)
137 trackedphases = (draft, secret, archived, internal)
137 # record phase names
138 # record phase names
138 cmdphasenames = [b'public', b'draft', b'secret'] # known to `hg phase` command
139 cmdphasenames = [b'public', b'draft', b'secret'] # known to `hg phase` command
139 phasenames = dict(enumerate(cmdphasenames))
140 phasenames = dict(enumerate(cmdphasenames))
140 phasenames[archived] = b'archived'
141 phasenames[archived] = b'archived'
141 phasenames[internal] = b'internal'
142 phasenames[internal] = b'internal'
142 # map phase name to phase number
143 # map phase name to phase number
143 phasenumber = {name: phase for phase, name in phasenames.items()}
144 phasenumber = {name: phase for phase, name in phasenames.items()}
144 # like phasenumber, but also include maps for the numeric and binary
145 # like phasenumber, but also include maps for the numeric and binary
145 # phase number to the phase number
146 # phase number to the phase number
146 phasenumber2 = phasenumber.copy()
147 phasenumber2 = phasenumber.copy()
147 phasenumber2.update({phase: phase for phase in phasenames})
148 phasenumber2.update({phase: phase for phase in phasenames})
148 phasenumber2.update({b'%i' % phase: phase for phase in phasenames})
149 phasenumber2.update({b'%i' % phase: phase for phase in phasenames})
149 # record phase property
150 # record phase property
150 mutablephases = (draft, secret, archived, internal)
151 mutablephases = (draft, secret, archived, internal)
151 remotehiddenphases = (secret, archived, internal)
152 remotehiddenphases = (secret, archived, internal)
152 localhiddenphases = (internal, archived)
153 localhiddenphases = (internal, archived)
153
154
154
155
155 def supportinternal(repo):
156 def supportinternal(repo):
156 """True if the internal phase can be used on a repository"""
157 """True if the internal phase can be used on a repository"""
157 return b'internal-phase' in repo.requirements
158 return repository.INTERNAL_PHASE_REQUIREMENT in repo.requirements
158
159
159
160
160 def _readroots(repo, phasedefaults=None):
161 def _readroots(repo, phasedefaults=None):
161 """Read phase roots from disk
162 """Read phase roots from disk
162
163
163 phasedefaults is a list of fn(repo, roots) callable, which are
164 phasedefaults is a list of fn(repo, roots) callable, which are
164 executed if the phase roots file does not exist. When phases are
165 executed if the phase roots file does not exist. When phases are
165 being initialized on an existing repository, this could be used to
166 being initialized on an existing repository, this could be used to
166 set selected changesets phase to something else than public.
167 set selected changesets phase to something else than public.
167
168
168 Return (roots, dirty) where dirty is true if roots differ from
169 Return (roots, dirty) where dirty is true if roots differ from
169 what is being stored.
170 what is being stored.
170 """
171 """
171 repo = repo.unfiltered()
172 repo = repo.unfiltered()
172 dirty = False
173 dirty = False
173 roots = {i: set() for i in allphases}
174 roots = {i: set() for i in allphases}
174 try:
175 try:
175 f, pending = txnutil.trypending(repo.root, repo.svfs, b'phaseroots')
176 f, pending = txnutil.trypending(repo.root, repo.svfs, b'phaseroots')
176 try:
177 try:
177 for line in f:
178 for line in f:
178 phase, nh = line.split()
179 phase, nh = line.split()
179 roots[int(phase)].add(bin(nh))
180 roots[int(phase)].add(bin(nh))
180 finally:
181 finally:
181 f.close()
182 f.close()
182 except IOError as inst:
183 except IOError as inst:
183 if inst.errno != errno.ENOENT:
184 if inst.errno != errno.ENOENT:
184 raise
185 raise
185 if phasedefaults:
186 if phasedefaults:
186 for f in phasedefaults:
187 for f in phasedefaults:
187 roots = f(repo, roots)
188 roots = f(repo, roots)
188 dirty = True
189 dirty = True
189 return roots, dirty
190 return roots, dirty
190
191
191
192
192 def binaryencode(phasemapping):
193 def binaryencode(phasemapping):
193 """encode a 'phase -> nodes' mapping into a binary stream
194 """encode a 'phase -> nodes' mapping into a binary stream
194
195
195 The revision lists are encoded as (phase, root) pairs.
196 The revision lists are encoded as (phase, root) pairs.
196 """
197 """
197 binarydata = []
198 binarydata = []
198 for phase, nodes in pycompat.iteritems(phasemapping):
199 for phase, nodes in pycompat.iteritems(phasemapping):
199 for head in nodes:
200 for head in nodes:
200 binarydata.append(_fphasesentry.pack(phase, head))
201 binarydata.append(_fphasesentry.pack(phase, head))
201 return b''.join(binarydata)
202 return b''.join(binarydata)
202
203
203
204
204 def binarydecode(stream):
205 def binarydecode(stream):
205 """decode a binary stream into a 'phase -> nodes' mapping
206 """decode a binary stream into a 'phase -> nodes' mapping
206
207
207 The (phase, root) pairs are turned back into a dictionary with
208 The (phase, root) pairs are turned back into a dictionary with
208 the phase as index and the aggregated roots of that phase as value."""
209 the phase as index and the aggregated roots of that phase as value."""
209 headsbyphase = {i: [] for i in allphases}
210 headsbyphase = {i: [] for i in allphases}
210 entrysize = _fphasesentry.size
211 entrysize = _fphasesentry.size
211 while True:
212 while True:
212 entry = stream.read(entrysize)
213 entry = stream.read(entrysize)
213 if len(entry) < entrysize:
214 if len(entry) < entrysize:
214 if entry:
215 if entry:
215 raise error.Abort(_(b'bad phase-heads stream'))
216 raise error.Abort(_(b'bad phase-heads stream'))
216 break
217 break
217 phase, node = _fphasesentry.unpack(entry)
218 phase, node = _fphasesentry.unpack(entry)
218 headsbyphase[phase].append(node)
219 headsbyphase[phase].append(node)
219 return headsbyphase
220 return headsbyphase
220
221
221
222
222 def _sortedrange_insert(data, idx, rev, t):
223 def _sortedrange_insert(data, idx, rev, t):
223 merge_before = False
224 merge_before = False
224 if idx:
225 if idx:
225 r1, t1 = data[idx - 1]
226 r1, t1 = data[idx - 1]
226 merge_before = r1[-1] + 1 == rev and t1 == t
227 merge_before = r1[-1] + 1 == rev and t1 == t
227 merge_after = False
228 merge_after = False
228 if idx < len(data):
229 if idx < len(data):
229 r2, t2 = data[idx]
230 r2, t2 = data[idx]
230 merge_after = r2[0] == rev + 1 and t2 == t
231 merge_after = r2[0] == rev + 1 and t2 == t
231
232
232 if merge_before and merge_after:
233 if merge_before and merge_after:
233 data[idx - 1] = (pycompat.xrange(r1[0], r2[-1] + 1), t)
234 data[idx - 1] = (pycompat.xrange(r1[0], r2[-1] + 1), t)
234 data.pop(idx)
235 data.pop(idx)
235 elif merge_before:
236 elif merge_before:
236 data[idx - 1] = (pycompat.xrange(r1[0], rev + 1), t)
237 data[idx - 1] = (pycompat.xrange(r1[0], rev + 1), t)
237 elif merge_after:
238 elif merge_after:
238 data[idx] = (pycompat.xrange(rev, r2[-1] + 1), t)
239 data[idx] = (pycompat.xrange(rev, r2[-1] + 1), t)
239 else:
240 else:
240 data.insert(idx, (pycompat.xrange(rev, rev + 1), t))
241 data.insert(idx, (pycompat.xrange(rev, rev + 1), t))
241
242
242
243
243 def _sortedrange_split(data, idx, rev, t):
244 def _sortedrange_split(data, idx, rev, t):
244 r1, t1 = data[idx]
245 r1, t1 = data[idx]
245 if t == t1:
246 if t == t1:
246 return
247 return
247 t = (t1[0], t[1])
248 t = (t1[0], t[1])
248 if len(r1) == 1:
249 if len(r1) == 1:
249 data.pop(idx)
250 data.pop(idx)
250 _sortedrange_insert(data, idx, rev, t)
251 _sortedrange_insert(data, idx, rev, t)
251 elif r1[0] == rev:
252 elif r1[0] == rev:
252 data[idx] = (pycompat.xrange(rev + 1, r1[-1] + 1), t1)
253 data[idx] = (pycompat.xrange(rev + 1, r1[-1] + 1), t1)
253 _sortedrange_insert(data, idx, rev, t)
254 _sortedrange_insert(data, idx, rev, t)
254 elif r1[-1] == rev:
255 elif r1[-1] == rev:
255 data[idx] = (pycompat.xrange(r1[0], rev), t1)
256 data[idx] = (pycompat.xrange(r1[0], rev), t1)
256 _sortedrange_insert(data, idx + 1, rev, t)
257 _sortedrange_insert(data, idx + 1, rev, t)
257 else:
258 else:
258 data[idx : idx + 1] = [
259 data[idx : idx + 1] = [
259 (pycompat.xrange(r1[0], rev), t1),
260 (pycompat.xrange(r1[0], rev), t1),
260 (pycompat.xrange(rev, rev + 1), t),
261 (pycompat.xrange(rev, rev + 1), t),
261 (pycompat.xrange(rev + 1, r1[-1] + 1), t1),
262 (pycompat.xrange(rev + 1, r1[-1] + 1), t1),
262 ]
263 ]
263
264
264
265
265 def _trackphasechange(data, rev, old, new):
266 def _trackphasechange(data, rev, old, new):
266 """add a phase move to the <data> list of ranges
267 """add a phase move to the <data> list of ranges
267
268
268 If data is None, nothing happens.
269 If data is None, nothing happens.
269 """
270 """
270 if data is None:
271 if data is None:
271 return
272 return
272
273
273 # If data is empty, create a one-revision range and done
274 # If data is empty, create a one-revision range and done
274 if not data:
275 if not data:
275 data.insert(0, (pycompat.xrange(rev, rev + 1), (old, new)))
276 data.insert(0, (pycompat.xrange(rev, rev + 1), (old, new)))
276 return
277 return
277
278
278 low = 0
279 low = 0
279 high = len(data)
280 high = len(data)
280 t = (old, new)
281 t = (old, new)
281 while low < high:
282 while low < high:
282 mid = (low + high) // 2
283 mid = (low + high) // 2
283 revs = data[mid][0]
284 revs = data[mid][0]
284
285
285 if rev in revs:
286 if rev in revs:
286 _sortedrange_split(data, mid, rev, t)
287 _sortedrange_split(data, mid, rev, t)
287 return
288 return
288
289
289 if revs[0] == rev + 1:
290 if revs[0] == rev + 1:
290 if mid and data[mid - 1][0][-1] == rev:
291 if mid and data[mid - 1][0][-1] == rev:
291 _sortedrange_split(data, mid - 1, rev, t)
292 _sortedrange_split(data, mid - 1, rev, t)
292 else:
293 else:
293 _sortedrange_insert(data, mid, rev, t)
294 _sortedrange_insert(data, mid, rev, t)
294 return
295 return
295
296
296 if revs[-1] == rev - 1:
297 if revs[-1] == rev - 1:
297 if mid + 1 < len(data) and data[mid + 1][0][0] == rev:
298 if mid + 1 < len(data) and data[mid + 1][0][0] == rev:
298 _sortedrange_split(data, mid + 1, rev, t)
299 _sortedrange_split(data, mid + 1, rev, t)
299 else:
300 else:
300 _sortedrange_insert(data, mid + 1, rev, t)
301 _sortedrange_insert(data, mid + 1, rev, t)
301 return
302 return
302
303
303 if revs[0] > rev:
304 if revs[0] > rev:
304 high = mid
305 high = mid
305 else:
306 else:
306 low = mid + 1
307 low = mid + 1
307
308
308 if low == len(data):
309 if low == len(data):
309 data.append((pycompat.xrange(rev, rev + 1), t))
310 data.append((pycompat.xrange(rev, rev + 1), t))
310 return
311 return
311
312
312 r1, t1 = data[low]
313 r1, t1 = data[low]
313 if r1[0] > rev:
314 if r1[0] > rev:
314 data.insert(low, (pycompat.xrange(rev, rev + 1), t))
315 data.insert(low, (pycompat.xrange(rev, rev + 1), t))
315 else:
316 else:
316 data.insert(low + 1, (pycompat.xrange(rev, rev + 1), t))
317 data.insert(low + 1, (pycompat.xrange(rev, rev + 1), t))
317
318
318
319
319 class phasecache(object):
320 class phasecache(object):
320 def __init__(self, repo, phasedefaults, _load=True):
321 def __init__(self, repo, phasedefaults, _load=True):
321 if _load:
322 if _load:
322 # Cheap trick to allow shallow-copy without copy module
323 # Cheap trick to allow shallow-copy without copy module
323 self.phaseroots, self.dirty = _readroots(repo, phasedefaults)
324 self.phaseroots, self.dirty = _readroots(repo, phasedefaults)
324 self._loadedrevslen = 0
325 self._loadedrevslen = 0
325 self._phasesets = None
326 self._phasesets = None
326 self.filterunknown(repo)
327 self.filterunknown(repo)
327 self.opener = repo.svfs
328 self.opener = repo.svfs
328
329
329 def hasnonpublicphases(self, repo):
330 def hasnonpublicphases(self, repo):
330 """detect if there are revisions with non-public phase"""
331 """detect if there are revisions with non-public phase"""
331 repo = repo.unfiltered()
332 repo = repo.unfiltered()
332 cl = repo.changelog
333 cl = repo.changelog
333 if len(cl) >= self._loadedrevslen:
334 if len(cl) >= self._loadedrevslen:
334 self.invalidate()
335 self.invalidate()
335 self.loadphaserevs(repo)
336 self.loadphaserevs(repo)
336 return any(
337 return any(
337 revs
338 revs
338 for phase, revs in pycompat.iteritems(self.phaseroots)
339 for phase, revs in pycompat.iteritems(self.phaseroots)
339 if phase != public
340 if phase != public
340 )
341 )
341
342
342 def nonpublicphaseroots(self, repo):
343 def nonpublicphaseroots(self, repo):
343 """returns the roots of all non-public phases
344 """returns the roots of all non-public phases
344
345
345 The roots are not minimized, so if the secret revisions are
346 The roots are not minimized, so if the secret revisions are
346 descendants of draft revisions, their roots will still be present.
347 descendants of draft revisions, their roots will still be present.
347 """
348 """
348 repo = repo.unfiltered()
349 repo = repo.unfiltered()
349 cl = repo.changelog
350 cl = repo.changelog
350 if len(cl) >= self._loadedrevslen:
351 if len(cl) >= self._loadedrevslen:
351 self.invalidate()
352 self.invalidate()
352 self.loadphaserevs(repo)
353 self.loadphaserevs(repo)
353 return set().union(
354 return set().union(
354 *[
355 *[
355 revs
356 revs
356 for phase, revs in pycompat.iteritems(self.phaseroots)
357 for phase, revs in pycompat.iteritems(self.phaseroots)
357 if phase != public
358 if phase != public
358 ]
359 ]
359 )
360 )
360
361
361 def getrevset(self, repo, phases, subset=None):
362 def getrevset(self, repo, phases, subset=None):
362 """return a smartset for the given phases"""
363 """return a smartset for the given phases"""
363 self.loadphaserevs(repo) # ensure phase's sets are loaded
364 self.loadphaserevs(repo) # ensure phase's sets are loaded
364 phases = set(phases)
365 phases = set(phases)
365 publicphase = public in phases
366 publicphase = public in phases
366
367
367 if publicphase:
368 if publicphase:
368 # In this case, phases keeps all the *other* phases.
369 # In this case, phases keeps all the *other* phases.
369 phases = set(allphases).difference(phases)
370 phases = set(allphases).difference(phases)
370 if not phases:
371 if not phases:
371 return smartset.fullreposet(repo)
372 return smartset.fullreposet(repo)
372
373
373 # fast path: _phasesets contains the interesting sets,
374 # fast path: _phasesets contains the interesting sets,
374 # might only need a union and post-filtering.
375 # might only need a union and post-filtering.
375 revsneedscopy = False
376 revsneedscopy = False
376 if len(phases) == 1:
377 if len(phases) == 1:
377 [p] = phases
378 [p] = phases
378 revs = self._phasesets[p]
379 revs = self._phasesets[p]
379 revsneedscopy = True # Don't modify _phasesets
380 revsneedscopy = True # Don't modify _phasesets
380 else:
381 else:
381 # revs has the revisions in all *other* phases.
382 # revs has the revisions in all *other* phases.
382 revs = set.union(*[self._phasesets[p] for p in phases])
383 revs = set.union(*[self._phasesets[p] for p in phases])
383
384
384 def _addwdir(wdirsubset, wdirrevs):
385 def _addwdir(wdirsubset, wdirrevs):
385 if wdirrev in wdirsubset and repo[None].phase() in phases:
386 if wdirrev in wdirsubset and repo[None].phase() in phases:
386 if revsneedscopy:
387 if revsneedscopy:
387 wdirrevs = wdirrevs.copy()
388 wdirrevs = wdirrevs.copy()
388 # The working dir would never be in the # cache, but it was in
389 # The working dir would never be in the # cache, but it was in
389 # the subset being filtered for its phase (or filtered out,
390 # the subset being filtered for its phase (or filtered out,
390 # depending on publicphase), so add it to the output to be
391 # depending on publicphase), so add it to the output to be
391 # included (or filtered out).
392 # included (or filtered out).
392 wdirrevs.add(wdirrev)
393 wdirrevs.add(wdirrev)
393 return wdirrevs
394 return wdirrevs
394
395
395 if not publicphase:
396 if not publicphase:
396 if repo.changelog.filteredrevs:
397 if repo.changelog.filteredrevs:
397 revs = revs - repo.changelog.filteredrevs
398 revs = revs - repo.changelog.filteredrevs
398
399
399 if subset is None:
400 if subset is None:
400 return smartset.baseset(revs)
401 return smartset.baseset(revs)
401 else:
402 else:
402 revs = _addwdir(subset, revs)
403 revs = _addwdir(subset, revs)
403 return subset & smartset.baseset(revs)
404 return subset & smartset.baseset(revs)
404 else:
405 else:
405 if subset is None:
406 if subset is None:
406 subset = smartset.fullreposet(repo)
407 subset = smartset.fullreposet(repo)
407
408
408 revs = _addwdir(subset, revs)
409 revs = _addwdir(subset, revs)
409
410
410 if not revs:
411 if not revs:
411 return subset
412 return subset
412 return subset.filter(lambda r: r not in revs)
413 return subset.filter(lambda r: r not in revs)
413
414
414 def copy(self):
415 def copy(self):
415 # Shallow copy meant to ensure isolation in
416 # Shallow copy meant to ensure isolation in
416 # advance/retractboundary(), nothing more.
417 # advance/retractboundary(), nothing more.
417 ph = self.__class__(None, None, _load=False)
418 ph = self.__class__(None, None, _load=False)
418 ph.phaseroots = self.phaseroots.copy()
419 ph.phaseroots = self.phaseroots.copy()
419 ph.dirty = self.dirty
420 ph.dirty = self.dirty
420 ph.opener = self.opener
421 ph.opener = self.opener
421 ph._loadedrevslen = self._loadedrevslen
422 ph._loadedrevslen = self._loadedrevslen
422 ph._phasesets = self._phasesets
423 ph._phasesets = self._phasesets
423 return ph
424 return ph
424
425
425 def replace(self, phcache):
426 def replace(self, phcache):
426 """replace all values in 'self' with content of phcache"""
427 """replace all values in 'self' with content of phcache"""
427 for a in (
428 for a in (
428 b'phaseroots',
429 b'phaseroots',
429 b'dirty',
430 b'dirty',
430 b'opener',
431 b'opener',
431 b'_loadedrevslen',
432 b'_loadedrevslen',
432 b'_phasesets',
433 b'_phasesets',
433 ):
434 ):
434 setattr(self, a, getattr(phcache, a))
435 setattr(self, a, getattr(phcache, a))
435
436
436 def _getphaserevsnative(self, repo):
437 def _getphaserevsnative(self, repo):
437 repo = repo.unfiltered()
438 repo = repo.unfiltered()
438 return repo.changelog.computephases(self.phaseroots)
439 return repo.changelog.computephases(self.phaseroots)
439
440
440 def _computephaserevspure(self, repo):
441 def _computephaserevspure(self, repo):
441 repo = repo.unfiltered()
442 repo = repo.unfiltered()
442 cl = repo.changelog
443 cl = repo.changelog
443 self._phasesets = {phase: set() for phase in allphases}
444 self._phasesets = {phase: set() for phase in allphases}
444 lowerroots = set()
445 lowerroots = set()
445 for phase in reversed(trackedphases):
446 for phase in reversed(trackedphases):
446 roots = pycompat.maplist(cl.rev, self.phaseroots[phase])
447 roots = pycompat.maplist(cl.rev, self.phaseroots[phase])
447 if roots:
448 if roots:
448 ps = set(cl.descendants(roots))
449 ps = set(cl.descendants(roots))
449 for root in roots:
450 for root in roots:
450 ps.add(root)
451 ps.add(root)
451 ps.difference_update(lowerroots)
452 ps.difference_update(lowerroots)
452 lowerroots.update(ps)
453 lowerroots.update(ps)
453 self._phasesets[phase] = ps
454 self._phasesets[phase] = ps
454 self._loadedrevslen = len(cl)
455 self._loadedrevslen = len(cl)
455
456
456 def loadphaserevs(self, repo):
457 def loadphaserevs(self, repo):
457 """ensure phase information is loaded in the object"""
458 """ensure phase information is loaded in the object"""
458 if self._phasesets is None:
459 if self._phasesets is None:
459 try:
460 try:
460 res = self._getphaserevsnative(repo)
461 res = self._getphaserevsnative(repo)
461 self._loadedrevslen, self._phasesets = res
462 self._loadedrevslen, self._phasesets = res
462 except AttributeError:
463 except AttributeError:
463 self._computephaserevspure(repo)
464 self._computephaserevspure(repo)
464
465
465 def invalidate(self):
466 def invalidate(self):
466 self._loadedrevslen = 0
467 self._loadedrevslen = 0
467 self._phasesets = None
468 self._phasesets = None
468
469
469 def phase(self, repo, rev):
470 def phase(self, repo, rev):
470 # We need a repo argument here to be able to build _phasesets
471 # We need a repo argument here to be able to build _phasesets
471 # if necessary. The repository instance is not stored in
472 # if necessary. The repository instance is not stored in
472 # phasecache to avoid reference cycles. The changelog instance
473 # phasecache to avoid reference cycles. The changelog instance
473 # is not stored because it is a filecache() property and can
474 # is not stored because it is a filecache() property and can
474 # be replaced without us being notified.
475 # be replaced without us being notified.
475 if rev == nullrev:
476 if rev == nullrev:
476 return public
477 return public
477 if rev < nullrev:
478 if rev < nullrev:
478 raise ValueError(_(b'cannot lookup negative revision'))
479 raise ValueError(_(b'cannot lookup negative revision'))
479 if rev >= self._loadedrevslen:
480 if rev >= self._loadedrevslen:
480 self.invalidate()
481 self.invalidate()
481 self.loadphaserevs(repo)
482 self.loadphaserevs(repo)
482 for phase in trackedphases:
483 for phase in trackedphases:
483 if rev in self._phasesets[phase]:
484 if rev in self._phasesets[phase]:
484 return phase
485 return phase
485 return public
486 return public
486
487
487 def write(self):
488 def write(self):
488 if not self.dirty:
489 if not self.dirty:
489 return
490 return
490 f = self.opener(b'phaseroots', b'w', atomictemp=True, checkambig=True)
491 f = self.opener(b'phaseroots', b'w', atomictemp=True, checkambig=True)
491 try:
492 try:
492 self._write(f)
493 self._write(f)
493 finally:
494 finally:
494 f.close()
495 f.close()
495
496
496 def _write(self, fp):
497 def _write(self, fp):
497 for phase, roots in pycompat.iteritems(self.phaseroots):
498 for phase, roots in pycompat.iteritems(self.phaseroots):
498 for h in sorted(roots):
499 for h in sorted(roots):
499 fp.write(b'%i %s\n' % (phase, hex(h)))
500 fp.write(b'%i %s\n' % (phase, hex(h)))
500 self.dirty = False
501 self.dirty = False
501
502
502 def _updateroots(self, phase, newroots, tr):
503 def _updateroots(self, phase, newroots, tr):
503 self.phaseroots[phase] = newroots
504 self.phaseroots[phase] = newroots
504 self.invalidate()
505 self.invalidate()
505 self.dirty = True
506 self.dirty = True
506
507
507 tr.addfilegenerator(b'phase', (b'phaseroots',), self._write)
508 tr.addfilegenerator(b'phase', (b'phaseroots',), self._write)
508 tr.hookargs[b'phases_moved'] = b'1'
509 tr.hookargs[b'phases_moved'] = b'1'
509
510
510 def registernew(self, repo, tr, targetphase, nodes):
511 def registernew(self, repo, tr, targetphase, nodes):
511 repo = repo.unfiltered()
512 repo = repo.unfiltered()
512 self._retractboundary(repo, tr, targetphase, nodes)
513 self._retractboundary(repo, tr, targetphase, nodes)
513 if tr is not None and b'phases' in tr.changes:
514 if tr is not None and b'phases' in tr.changes:
514 phasetracking = tr.changes[b'phases']
515 phasetracking = tr.changes[b'phases']
515 torev = repo.changelog.rev
516 torev = repo.changelog.rev
516 phase = self.phase
517 phase = self.phase
517 revs = [torev(node) for node in nodes]
518 revs = [torev(node) for node in nodes]
518 revs.sort()
519 revs.sort()
519 for rev in revs:
520 for rev in revs:
520 revphase = phase(repo, rev)
521 revphase = phase(repo, rev)
521 _trackphasechange(phasetracking, rev, None, revphase)
522 _trackphasechange(phasetracking, rev, None, revphase)
522 repo.invalidatevolatilesets()
523 repo.invalidatevolatilesets()
523
524
524 def advanceboundary(self, repo, tr, targetphase, nodes, dryrun=None):
525 def advanceboundary(self, repo, tr, targetphase, nodes, dryrun=None):
525 """Set all 'nodes' to phase 'targetphase'
526 """Set all 'nodes' to phase 'targetphase'
526
527
527 Nodes with a phase lower than 'targetphase' are not affected.
528 Nodes with a phase lower than 'targetphase' are not affected.
528
529
529 If dryrun is True, no actions will be performed
530 If dryrun is True, no actions will be performed
530
531
531 Returns a set of revs whose phase is changed or should be changed
532 Returns a set of revs whose phase is changed or should be changed
532 """
533 """
533 # Be careful to preserve shallow-copied values: do not update
534 # Be careful to preserve shallow-copied values: do not update
534 # phaseroots values, replace them.
535 # phaseroots values, replace them.
535 if tr is None:
536 if tr is None:
536 phasetracking = None
537 phasetracking = None
537 else:
538 else:
538 phasetracking = tr.changes.get(b'phases')
539 phasetracking = tr.changes.get(b'phases')
539
540
540 repo = repo.unfiltered()
541 repo = repo.unfiltered()
541
542
542 changes = set() # set of revisions to be changed
543 changes = set() # set of revisions to be changed
543 delroots = [] # set of root deleted by this path
544 delroots = [] # set of root deleted by this path
544 for phase in (phase for phase in allphases if phase > targetphase):
545 for phase in (phase for phase in allphases if phase > targetphase):
545 # filter nodes that are not in a compatible phase already
546 # filter nodes that are not in a compatible phase already
546 nodes = [
547 nodes = [
547 n for n in nodes if self.phase(repo, repo[n].rev()) >= phase
548 n for n in nodes if self.phase(repo, repo[n].rev()) >= phase
548 ]
549 ]
549 if not nodes:
550 if not nodes:
550 break # no roots to move anymore
551 break # no roots to move anymore
551
552
552 olds = self.phaseroots[phase]
553 olds = self.phaseroots[phase]
553
554
554 affected = repo.revs(b'%ln::%ln', olds, nodes)
555 affected = repo.revs(b'%ln::%ln', olds, nodes)
555 changes.update(affected)
556 changes.update(affected)
556 if dryrun:
557 if dryrun:
557 continue
558 continue
558 for r in affected:
559 for r in affected:
559 _trackphasechange(
560 _trackphasechange(
560 phasetracking, r, self.phase(repo, r), targetphase
561 phasetracking, r, self.phase(repo, r), targetphase
561 )
562 )
562
563
563 roots = {
564 roots = {
564 ctx.node()
565 ctx.node()
565 for ctx in repo.set(b'roots((%ln::) - %ld)', olds, affected)
566 for ctx in repo.set(b'roots((%ln::) - %ld)', olds, affected)
566 }
567 }
567 if olds != roots:
568 if olds != roots:
568 self._updateroots(phase, roots, tr)
569 self._updateroots(phase, roots, tr)
569 # some roots may need to be declared for lower phases
570 # some roots may need to be declared for lower phases
570 delroots.extend(olds - roots)
571 delroots.extend(olds - roots)
571 if not dryrun:
572 if not dryrun:
572 # declare deleted root in the target phase
573 # declare deleted root in the target phase
573 if targetphase != 0:
574 if targetphase != 0:
574 self._retractboundary(repo, tr, targetphase, delroots)
575 self._retractboundary(repo, tr, targetphase, delroots)
575 repo.invalidatevolatilesets()
576 repo.invalidatevolatilesets()
576 return changes
577 return changes
577
578
578 def retractboundary(self, repo, tr, targetphase, nodes):
579 def retractboundary(self, repo, tr, targetphase, nodes):
579 oldroots = {
580 oldroots = {
580 phase: revs
581 phase: revs
581 for phase, revs in pycompat.iteritems(self.phaseroots)
582 for phase, revs in pycompat.iteritems(self.phaseroots)
582 if phase <= targetphase
583 if phase <= targetphase
583 }
584 }
584 if tr is None:
585 if tr is None:
585 phasetracking = None
586 phasetracking = None
586 else:
587 else:
587 phasetracking = tr.changes.get(b'phases')
588 phasetracking = tr.changes.get(b'phases')
588 repo = repo.unfiltered()
589 repo = repo.unfiltered()
589 if (
590 if (
590 self._retractboundary(repo, tr, targetphase, nodes)
591 self._retractboundary(repo, tr, targetphase, nodes)
591 and phasetracking is not None
592 and phasetracking is not None
592 ):
593 ):
593
594
594 # find the affected revisions
595 # find the affected revisions
595 new = self.phaseroots[targetphase]
596 new = self.phaseroots[targetphase]
596 old = oldroots[targetphase]
597 old = oldroots[targetphase]
597 affected = set(repo.revs(b'(%ln::) - (%ln::)', new, old))
598 affected = set(repo.revs(b'(%ln::) - (%ln::)', new, old))
598
599
599 # find the phase of the affected revision
600 # find the phase of the affected revision
600 for phase in pycompat.xrange(targetphase, -1, -1):
601 for phase in pycompat.xrange(targetphase, -1, -1):
601 if phase:
602 if phase:
602 roots = oldroots.get(phase, [])
603 roots = oldroots.get(phase, [])
603 revs = set(repo.revs(b'%ln::%ld', roots, affected))
604 revs = set(repo.revs(b'%ln::%ld', roots, affected))
604 affected -= revs
605 affected -= revs
605 else: # public phase
606 else: # public phase
606 revs = affected
607 revs = affected
607 for r in sorted(revs):
608 for r in sorted(revs):
608 _trackphasechange(phasetracking, r, phase, targetphase)
609 _trackphasechange(phasetracking, r, phase, targetphase)
609 repo.invalidatevolatilesets()
610 repo.invalidatevolatilesets()
610
611
611 def _retractboundary(self, repo, tr, targetphase, nodes):
612 def _retractboundary(self, repo, tr, targetphase, nodes):
612 # Be careful to preserve shallow-copied values: do not update
613 # Be careful to preserve shallow-copied values: do not update
613 # phaseroots values, replace them.
614 # phaseroots values, replace them.
614 if targetphase in (archived, internal) and not supportinternal(repo):
615 if targetphase in (archived, internal) and not supportinternal(repo):
615 name = phasenames[targetphase]
616 name = phasenames[targetphase]
616 msg = b'this repository does not support the %s phase' % name
617 msg = b'this repository does not support the %s phase' % name
617 raise error.ProgrammingError(msg)
618 raise error.ProgrammingError(msg)
618
619
619 repo = repo.unfiltered()
620 repo = repo.unfiltered()
620 torev = repo.changelog.rev
621 torev = repo.changelog.rev
621 tonode = repo.changelog.node
622 tonode = repo.changelog.node
622 currentroots = {torev(node) for node in self.phaseroots[targetphase]}
623 currentroots = {torev(node) for node in self.phaseroots[targetphase]}
623 finalroots = oldroots = set(currentroots)
624 finalroots = oldroots = set(currentroots)
624 newroots = [torev(node) for node in nodes]
625 newroots = [torev(node) for node in nodes]
625 newroots = [
626 newroots = [
626 rev for rev in newroots if self.phase(repo, rev) < targetphase
627 rev for rev in newroots if self.phase(repo, rev) < targetphase
627 ]
628 ]
628
629
629 if newroots:
630 if newroots:
630 if nullrev in newroots:
631 if nullrev in newroots:
631 raise error.Abort(_(b'cannot change null revision phase'))
632 raise error.Abort(_(b'cannot change null revision phase'))
632 currentroots.update(newroots)
633 currentroots.update(newroots)
633
634
634 # Only compute new roots for revs above the roots that are being
635 # Only compute new roots for revs above the roots that are being
635 # retracted.
636 # retracted.
636 minnewroot = min(newroots)
637 minnewroot = min(newroots)
637 aboveroots = [rev for rev in currentroots if rev >= minnewroot]
638 aboveroots = [rev for rev in currentroots if rev >= minnewroot]
638 updatedroots = repo.revs(b'roots(%ld::)', aboveroots)
639 updatedroots = repo.revs(b'roots(%ld::)', aboveroots)
639
640
640 finalroots = {rev for rev in currentroots if rev < minnewroot}
641 finalroots = {rev for rev in currentroots if rev < minnewroot}
641 finalroots.update(updatedroots)
642 finalroots.update(updatedroots)
642 if finalroots != oldroots:
643 if finalroots != oldroots:
643 self._updateroots(
644 self._updateroots(
644 targetphase, {tonode(rev) for rev in finalroots}, tr
645 targetphase, {tonode(rev) for rev in finalroots}, tr
645 )
646 )
646 return True
647 return True
647 return False
648 return False
648
649
649 def filterunknown(self, repo):
650 def filterunknown(self, repo):
650 """remove unknown nodes from the phase boundary
651 """remove unknown nodes from the phase boundary
651
652
652 Nothing is lost as unknown nodes only hold data for their descendants.
653 Nothing is lost as unknown nodes only hold data for their descendants.
653 """
654 """
654 filtered = False
655 filtered = False
655 has_node = repo.changelog.index.has_node # to filter unknown nodes
656 has_node = repo.changelog.index.has_node # to filter unknown nodes
656 for phase, nodes in pycompat.iteritems(self.phaseroots):
657 for phase, nodes in pycompat.iteritems(self.phaseroots):
657 missing = sorted(node for node in nodes if not has_node(node))
658 missing = sorted(node for node in nodes if not has_node(node))
658 if missing:
659 if missing:
659 for mnode in missing:
660 for mnode in missing:
660 repo.ui.debug(
661 repo.ui.debug(
661 b'removing unknown node %s from %i-phase boundary\n'
662 b'removing unknown node %s from %i-phase boundary\n'
662 % (short(mnode), phase)
663 % (short(mnode), phase)
663 )
664 )
664 nodes.symmetric_difference_update(missing)
665 nodes.symmetric_difference_update(missing)
665 filtered = True
666 filtered = True
666 if filtered:
667 if filtered:
667 self.dirty = True
668 self.dirty = True
668 # filterunknown is called by repo.destroyed, we may have no changes in
669 # filterunknown is called by repo.destroyed, we may have no changes in
669 # root but _phasesets contents is certainly invalid (or at least we
670 # root but _phasesets contents is certainly invalid (or at least we
670 # have not proper way to check that). related to issue 3858.
671 # have not proper way to check that). related to issue 3858.
671 #
672 #
672 # The other caller is __init__ that have no _phasesets initialized
673 # The other caller is __init__ that have no _phasesets initialized
673 # anyway. If this change we should consider adding a dedicated
674 # anyway. If this change we should consider adding a dedicated
674 # "destroyed" function to phasecache or a proper cache key mechanism
675 # "destroyed" function to phasecache or a proper cache key mechanism
675 # (see branchmap one)
676 # (see branchmap one)
676 self.invalidate()
677 self.invalidate()
677
678
678
679
679 def advanceboundary(repo, tr, targetphase, nodes, dryrun=None):
680 def advanceboundary(repo, tr, targetphase, nodes, dryrun=None):
680 """Add nodes to a phase changing other nodes phases if necessary.
681 """Add nodes to a phase changing other nodes phases if necessary.
681
682
682 This function move boundary *forward* this means that all nodes
683 This function move boundary *forward* this means that all nodes
683 are set in the target phase or kept in a *lower* phase.
684 are set in the target phase or kept in a *lower* phase.
684
685
685 Simplify boundary to contains phase roots only.
686 Simplify boundary to contains phase roots only.
686
687
687 If dryrun is True, no actions will be performed
688 If dryrun is True, no actions will be performed
688
689
689 Returns a set of revs whose phase is changed or should be changed
690 Returns a set of revs whose phase is changed or should be changed
690 """
691 """
691 phcache = repo._phasecache.copy()
692 phcache = repo._phasecache.copy()
692 changes = phcache.advanceboundary(
693 changes = phcache.advanceboundary(
693 repo, tr, targetphase, nodes, dryrun=dryrun
694 repo, tr, targetphase, nodes, dryrun=dryrun
694 )
695 )
695 if not dryrun:
696 if not dryrun:
696 repo._phasecache.replace(phcache)
697 repo._phasecache.replace(phcache)
697 return changes
698 return changes
698
699
699
700
700 def retractboundary(repo, tr, targetphase, nodes):
701 def retractboundary(repo, tr, targetphase, nodes):
701 """Set nodes back to a phase changing other nodes phases if
702 """Set nodes back to a phase changing other nodes phases if
702 necessary.
703 necessary.
703
704
704 This function move boundary *backward* this means that all nodes
705 This function move boundary *backward* this means that all nodes
705 are set in the target phase or kept in a *higher* phase.
706 are set in the target phase or kept in a *higher* phase.
706
707
707 Simplify boundary to contains phase roots only."""
708 Simplify boundary to contains phase roots only."""
708 phcache = repo._phasecache.copy()
709 phcache = repo._phasecache.copy()
709 phcache.retractboundary(repo, tr, targetphase, nodes)
710 phcache.retractboundary(repo, tr, targetphase, nodes)
710 repo._phasecache.replace(phcache)
711 repo._phasecache.replace(phcache)
711
712
712
713
713 def registernew(repo, tr, targetphase, nodes):
714 def registernew(repo, tr, targetphase, nodes):
714 """register a new revision and its phase
715 """register a new revision and its phase
715
716
716 Code adding revisions to the repository should use this function to
717 Code adding revisions to the repository should use this function to
717 set new changeset in their target phase (or higher).
718 set new changeset in their target phase (or higher).
718 """
719 """
719 phcache = repo._phasecache.copy()
720 phcache = repo._phasecache.copy()
720 phcache.registernew(repo, tr, targetphase, nodes)
721 phcache.registernew(repo, tr, targetphase, nodes)
721 repo._phasecache.replace(phcache)
722 repo._phasecache.replace(phcache)
722
723
723
724
724 def listphases(repo):
725 def listphases(repo):
725 """List phases root for serialization over pushkey"""
726 """List phases root for serialization over pushkey"""
726 # Use ordered dictionary so behavior is deterministic.
727 # Use ordered dictionary so behavior is deterministic.
727 keys = util.sortdict()
728 keys = util.sortdict()
728 value = b'%i' % draft
729 value = b'%i' % draft
729 cl = repo.unfiltered().changelog
730 cl = repo.unfiltered().changelog
730 for root in repo._phasecache.phaseroots[draft]:
731 for root in repo._phasecache.phaseroots[draft]:
731 if repo._phasecache.phase(repo, cl.rev(root)) <= draft:
732 if repo._phasecache.phase(repo, cl.rev(root)) <= draft:
732 keys[hex(root)] = value
733 keys[hex(root)] = value
733
734
734 if repo.publishing():
735 if repo.publishing():
735 # Add an extra data to let remote know we are a publishing
736 # Add an extra data to let remote know we are a publishing
736 # repo. Publishing repo can't just pretend they are old repo.
737 # repo. Publishing repo can't just pretend they are old repo.
737 # When pushing to a publishing repo, the client still need to
738 # When pushing to a publishing repo, the client still need to
738 # push phase boundary
739 # push phase boundary
739 #
740 #
740 # Push do not only push changeset. It also push phase data.
741 # Push do not only push changeset. It also push phase data.
741 # New phase data may apply to common changeset which won't be
742 # New phase data may apply to common changeset which won't be
742 # push (as they are common). Here is a very simple example:
743 # push (as they are common). Here is a very simple example:
743 #
744 #
744 # 1) repo A push changeset X as draft to repo B
745 # 1) repo A push changeset X as draft to repo B
745 # 2) repo B make changeset X public
746 # 2) repo B make changeset X public
746 # 3) repo B push to repo A. X is not pushed but the data that
747 # 3) repo B push to repo A. X is not pushed but the data that
747 # X as now public should
748 # X as now public should
748 #
749 #
749 # The server can't handle it on it's own as it has no idea of
750 # The server can't handle it on it's own as it has no idea of
750 # client phase data.
751 # client phase data.
751 keys[b'publishing'] = b'True'
752 keys[b'publishing'] = b'True'
752 return keys
753 return keys
753
754
754
755
755 def pushphase(repo, nhex, oldphasestr, newphasestr):
756 def pushphase(repo, nhex, oldphasestr, newphasestr):
756 """List phases root for serialization over pushkey"""
757 """List phases root for serialization over pushkey"""
757 repo = repo.unfiltered()
758 repo = repo.unfiltered()
758 with repo.lock():
759 with repo.lock():
759 currentphase = repo[nhex].phase()
760 currentphase = repo[nhex].phase()
760 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
761 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
761 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
762 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
762 if currentphase == oldphase and newphase < oldphase:
763 if currentphase == oldphase and newphase < oldphase:
763 with repo.transaction(b'pushkey-phase') as tr:
764 with repo.transaction(b'pushkey-phase') as tr:
764 advanceboundary(repo, tr, newphase, [bin(nhex)])
765 advanceboundary(repo, tr, newphase, [bin(nhex)])
765 return True
766 return True
766 elif currentphase == newphase:
767 elif currentphase == newphase:
767 # raced, but got correct result
768 # raced, but got correct result
768 return True
769 return True
769 else:
770 else:
770 return False
771 return False
771
772
772
773
773 def subsetphaseheads(repo, subset):
774 def subsetphaseheads(repo, subset):
774 """Finds the phase heads for a subset of a history
775 """Finds the phase heads for a subset of a history
775
776
776 Returns a list indexed by phase number where each item is a list of phase
777 Returns a list indexed by phase number where each item is a list of phase
777 head nodes.
778 head nodes.
778 """
779 """
779 cl = repo.changelog
780 cl = repo.changelog
780
781
781 headsbyphase = {i: [] for i in allphases}
782 headsbyphase = {i: [] for i in allphases}
782 # No need to keep track of secret phase; any heads in the subset that
783 # No need to keep track of secret phase; any heads in the subset that
783 # are not mentioned are implicitly secret.
784 # are not mentioned are implicitly secret.
784 for phase in allphases[:secret]:
785 for phase in allphases[:secret]:
785 revset = b"heads(%%ln & %s())" % phasenames[phase]
786 revset = b"heads(%%ln & %s())" % phasenames[phase]
786 headsbyphase[phase] = [cl.node(r) for r in repo.revs(revset, subset)]
787 headsbyphase[phase] = [cl.node(r) for r in repo.revs(revset, subset)]
787 return headsbyphase
788 return headsbyphase
788
789
789
790
790 def updatephases(repo, trgetter, headsbyphase):
791 def updatephases(repo, trgetter, headsbyphase):
791 """Updates the repo with the given phase heads"""
792 """Updates the repo with the given phase heads"""
792 # Now advance phase boundaries of all phases
793 # Now advance phase boundaries of all phases
793 #
794 #
794 # run the update (and fetch transaction) only if there are actually things
795 # run the update (and fetch transaction) only if there are actually things
795 # to update. This avoid creating empty transaction during no-op operation.
796 # to update. This avoid creating empty transaction during no-op operation.
796
797
797 for phase in allphases:
798 for phase in allphases:
798 revset = b'%ln - _phase(%s)'
799 revset = b'%ln - _phase(%s)'
799 heads = [c.node() for c in repo.set(revset, headsbyphase[phase], phase)]
800 heads = [c.node() for c in repo.set(revset, headsbyphase[phase], phase)]
800 if heads:
801 if heads:
801 advanceboundary(repo, trgetter(), phase, heads)
802 advanceboundary(repo, trgetter(), phase, heads)
802
803
803
804
804 def analyzeremotephases(repo, subset, roots):
805 def analyzeremotephases(repo, subset, roots):
805 """Compute phases heads and root in a subset of node from root dict
806 """Compute phases heads and root in a subset of node from root dict
806
807
807 * subset is heads of the subset
808 * subset is heads of the subset
808 * roots is {<nodeid> => phase} mapping. key and value are string.
809 * roots is {<nodeid> => phase} mapping. key and value are string.
809
810
810 Accept unknown element input
811 Accept unknown element input
811 """
812 """
812 repo = repo.unfiltered()
813 repo = repo.unfiltered()
813 # build list from dictionary
814 # build list from dictionary
814 draftroots = []
815 draftroots = []
815 has_node = repo.changelog.index.has_node # to filter unknown nodes
816 has_node = repo.changelog.index.has_node # to filter unknown nodes
816 for nhex, phase in pycompat.iteritems(roots):
817 for nhex, phase in pycompat.iteritems(roots):
817 if nhex == b'publishing': # ignore data related to publish option
818 if nhex == b'publishing': # ignore data related to publish option
818 continue
819 continue
819 node = bin(nhex)
820 node = bin(nhex)
820 phase = int(phase)
821 phase = int(phase)
821 if phase == public:
822 if phase == public:
822 if node != nullid:
823 if node != nullid:
823 repo.ui.warn(
824 repo.ui.warn(
824 _(
825 _(
825 b'ignoring inconsistent public root'
826 b'ignoring inconsistent public root'
826 b' from remote: %s\n'
827 b' from remote: %s\n'
827 )
828 )
828 % nhex
829 % nhex
829 )
830 )
830 elif phase == draft:
831 elif phase == draft:
831 if has_node(node):
832 if has_node(node):
832 draftroots.append(node)
833 draftroots.append(node)
833 else:
834 else:
834 repo.ui.warn(
835 repo.ui.warn(
835 _(b'ignoring unexpected root from remote: %i %s\n')
836 _(b'ignoring unexpected root from remote: %i %s\n')
836 % (phase, nhex)
837 % (phase, nhex)
837 )
838 )
838 # compute heads
839 # compute heads
839 publicheads = newheads(repo, subset, draftroots)
840 publicheads = newheads(repo, subset, draftroots)
840 return publicheads, draftroots
841 return publicheads, draftroots
841
842
842
843
843 class remotephasessummary(object):
844 class remotephasessummary(object):
844 """summarize phase information on the remote side
845 """summarize phase information on the remote side
845
846
846 :publishing: True is the remote is publishing
847 :publishing: True is the remote is publishing
847 :publicheads: list of remote public phase heads (nodes)
848 :publicheads: list of remote public phase heads (nodes)
848 :draftheads: list of remote draft phase heads (nodes)
849 :draftheads: list of remote draft phase heads (nodes)
849 :draftroots: list of remote draft phase root (nodes)
850 :draftroots: list of remote draft phase root (nodes)
850 """
851 """
851
852
852 def __init__(self, repo, remotesubset, remoteroots):
853 def __init__(self, repo, remotesubset, remoteroots):
853 unfi = repo.unfiltered()
854 unfi = repo.unfiltered()
854 self._allremoteroots = remoteroots
855 self._allremoteroots = remoteroots
855
856
856 self.publishing = remoteroots.get(b'publishing', False)
857 self.publishing = remoteroots.get(b'publishing', False)
857
858
858 ana = analyzeremotephases(repo, remotesubset, remoteroots)
859 ana = analyzeremotephases(repo, remotesubset, remoteroots)
859 self.publicheads, self.draftroots = ana
860 self.publicheads, self.draftroots = ana
860 # Get the list of all "heads" revs draft on remote
861 # Get the list of all "heads" revs draft on remote
861 dheads = unfi.set(b'heads(%ln::%ln)', self.draftroots, remotesubset)
862 dheads = unfi.set(b'heads(%ln::%ln)', self.draftroots, remotesubset)
862 self.draftheads = [c.node() for c in dheads]
863 self.draftheads = [c.node() for c in dheads]
863
864
864
865
865 def newheads(repo, heads, roots):
866 def newheads(repo, heads, roots):
866 """compute new head of a subset minus another
867 """compute new head of a subset minus another
867
868
868 * `heads`: define the first subset
869 * `heads`: define the first subset
869 * `roots`: define the second we subtract from the first"""
870 * `roots`: define the second we subtract from the first"""
870 # prevent an import cycle
871 # prevent an import cycle
871 # phases > dagop > patch > copies > scmutil > obsolete > obsutil > phases
872 # phases > dagop > patch > copies > scmutil > obsolete > obsutil > phases
872 from . import dagop
873 from . import dagop
873
874
874 repo = repo.unfiltered()
875 repo = repo.unfiltered()
875 cl = repo.changelog
876 cl = repo.changelog
876 rev = cl.index.get_rev
877 rev = cl.index.get_rev
877 if not roots:
878 if not roots:
878 return heads
879 return heads
879 if not heads or heads == [nullid]:
880 if not heads or heads == [nullid]:
880 return []
881 return []
881 # The logic operated on revisions, convert arguments early for convenience
882 # The logic operated on revisions, convert arguments early for convenience
882 new_heads = {rev(n) for n in heads if n != nullid}
883 new_heads = {rev(n) for n in heads if n != nullid}
883 roots = [rev(n) for n in roots]
884 roots = [rev(n) for n in roots]
884 # compute the area we need to remove
885 # compute the area we need to remove
885 affected_zone = repo.revs(b"(%ld::%ld)", roots, new_heads)
886 affected_zone = repo.revs(b"(%ld::%ld)", roots, new_heads)
886 # heads in the area are no longer heads
887 # heads in the area are no longer heads
887 new_heads.difference_update(affected_zone)
888 new_heads.difference_update(affected_zone)
888 # revisions in the area have children outside of it,
889 # revisions in the area have children outside of it,
889 # They might be new heads
890 # They might be new heads
890 candidates = repo.revs(
891 candidates = repo.revs(
891 b"parents(%ld + (%ld and merge())) and not null", roots, affected_zone
892 b"parents(%ld + (%ld and merge())) and not null", roots, affected_zone
892 )
893 )
893 candidates -= affected_zone
894 candidates -= affected_zone
894 if new_heads or candidates:
895 if new_heads or candidates:
895 # remove candidate that are ancestors of other heads
896 # remove candidate that are ancestors of other heads
896 new_heads.update(candidates)
897 new_heads.update(candidates)
897 prunestart = repo.revs(b"parents(%ld) and not null", new_heads)
898 prunestart = repo.revs(b"parents(%ld) and not null", new_heads)
898 pruned = dagop.reachableroots(repo, candidates, prunestart)
899 pruned = dagop.reachableroots(repo, candidates, prunestart)
899 new_heads.difference_update(pruned)
900 new_heads.difference_update(pruned)
900
901
901 return pycompat.maplist(cl.node, sorted(new_heads))
902 return pycompat.maplist(cl.node, sorted(new_heads))
902
903
903
904
904 def newcommitphase(ui):
905 def newcommitphase(ui):
905 """helper to get the target phase of new commit
906 """helper to get the target phase of new commit
906
907
907 Handle all possible values for the phases.new-commit options.
908 Handle all possible values for the phases.new-commit options.
908
909
909 """
910 """
910 v = ui.config(b'phases', b'new-commit')
911 v = ui.config(b'phases', b'new-commit')
911 try:
912 try:
912 return phasenumber2[v]
913 return phasenumber2[v]
913 except KeyError:
914 except KeyError:
914 raise error.ConfigError(
915 raise error.ConfigError(
915 _(b"phases.new-commit: not a valid phase name ('%s')") % v
916 _(b"phases.new-commit: not a valid phase name ('%s')") % v
916 )
917 )
917
918
918
919
919 def hassecret(repo):
920 def hassecret(repo):
920 """utility function that check if a repo have any secret changeset."""
921 """utility function that check if a repo have any secret changeset."""
921 return bool(repo._phasecache.phaseroots[secret])
922 return bool(repo._phasecache.phaseroots[secret])
922
923
923
924
924 def preparehookargs(node, old, new):
925 def preparehookargs(node, old, new):
925 if old is None:
926 if old is None:
926 old = b''
927 old = b''
927 else:
928 else:
928 old = phasenames[old]
929 old = phasenames[old]
929 return {b'node': node, b'oldphase': old, b'phase': phasenames[new]}
930 return {b'node': node, b'oldphase': old, b'phase': phasenames[new]}
General Comments 0
You need to be logged in to leave comments. Login now