##// END OF EJS Templates
nodemap: also warm manifest nodemap with other caches...
marmoute -
r45291:97ebdb19 default
parent child Browse files
Show More
@@ -1,1972 +1,1975
1 # repository.py - Interfaces and base classes for repositories and peers.
1 # repository.py - Interfaces and base classes for repositories and peers.
2 #
2 #
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from ..i18n import _
10 from ..i18n import _
11 from .. import error
11 from .. import error
12 from . import util as interfaceutil
12 from . import util as interfaceutil
13
13
14 # When narrowing is finalized and no longer subject to format changes,
14 # When narrowing is finalized and no longer subject to format changes,
15 # we should move this to just "narrow" or similar.
15 # we should move this to just "narrow" or similar.
16 NARROW_REQUIREMENT = b'narrowhg-experimental'
16 NARROW_REQUIREMENT = b'narrowhg-experimental'
17
17
18 # Local repository feature string.
18 # Local repository feature string.
19
19
20 # Revlogs are being used for file storage.
20 # Revlogs are being used for file storage.
21 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
21 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
22 # The storage part of the repository is shared from an external source.
22 # The storage part of the repository is shared from an external source.
23 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
23 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
24 # LFS supported for backing file storage.
24 # LFS supported for backing file storage.
25 REPO_FEATURE_LFS = b'lfs'
25 REPO_FEATURE_LFS = b'lfs'
26 # Repository supports being stream cloned.
26 # Repository supports being stream cloned.
27 REPO_FEATURE_STREAM_CLONE = b'streamclone'
27 REPO_FEATURE_STREAM_CLONE = b'streamclone'
28 # Files storage may lack data for all ancestors.
28 # Files storage may lack data for all ancestors.
29 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
29 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
30
30
31 REVISION_FLAG_CENSORED = 1 << 15
31 REVISION_FLAG_CENSORED = 1 << 15
32 REVISION_FLAG_ELLIPSIS = 1 << 14
32 REVISION_FLAG_ELLIPSIS = 1 << 14
33 REVISION_FLAG_EXTSTORED = 1 << 13
33 REVISION_FLAG_EXTSTORED = 1 << 13
34 REVISION_FLAG_SIDEDATA = 1 << 12
34 REVISION_FLAG_SIDEDATA = 1 << 12
35
35
36 REVISION_FLAGS_KNOWN = (
36 REVISION_FLAGS_KNOWN = (
37 REVISION_FLAG_CENSORED
37 REVISION_FLAG_CENSORED
38 | REVISION_FLAG_ELLIPSIS
38 | REVISION_FLAG_ELLIPSIS
39 | REVISION_FLAG_EXTSTORED
39 | REVISION_FLAG_EXTSTORED
40 | REVISION_FLAG_SIDEDATA
40 | REVISION_FLAG_SIDEDATA
41 )
41 )
42
42
43 CG_DELTAMODE_STD = b'default'
43 CG_DELTAMODE_STD = b'default'
44 CG_DELTAMODE_PREV = b'previous'
44 CG_DELTAMODE_PREV = b'previous'
45 CG_DELTAMODE_FULL = b'fulltext'
45 CG_DELTAMODE_FULL = b'fulltext'
46 CG_DELTAMODE_P1 = b'p1'
46 CG_DELTAMODE_P1 = b'p1'
47
47
48
48
49 class ipeerconnection(interfaceutil.Interface):
49 class ipeerconnection(interfaceutil.Interface):
50 """Represents a "connection" to a repository.
50 """Represents a "connection" to a repository.
51
51
52 This is the base interface for representing a connection to a repository.
52 This is the base interface for representing a connection to a repository.
53 It holds basic properties and methods applicable to all peer types.
53 It holds basic properties and methods applicable to all peer types.
54
54
55 This is not a complete interface definition and should not be used
55 This is not a complete interface definition and should not be used
56 outside of this module.
56 outside of this module.
57 """
57 """
58
58
59 ui = interfaceutil.Attribute("""ui.ui instance""")
59 ui = interfaceutil.Attribute("""ui.ui instance""")
60
60
61 def url():
61 def url():
62 """Returns a URL string representing this peer.
62 """Returns a URL string representing this peer.
63
63
64 Currently, implementations expose the raw URL used to construct the
64 Currently, implementations expose the raw URL used to construct the
65 instance. It may contain credentials as part of the URL. The
65 instance. It may contain credentials as part of the URL. The
66 expectations of the value aren't well-defined and this could lead to
66 expectations of the value aren't well-defined and this could lead to
67 data leakage.
67 data leakage.
68
68
69 TODO audit/clean consumers and more clearly define the contents of this
69 TODO audit/clean consumers and more clearly define the contents of this
70 value.
70 value.
71 """
71 """
72
72
73 def local():
73 def local():
74 """Returns a local repository instance.
74 """Returns a local repository instance.
75
75
76 If the peer represents a local repository, returns an object that
76 If the peer represents a local repository, returns an object that
77 can be used to interface with it. Otherwise returns ``None``.
77 can be used to interface with it. Otherwise returns ``None``.
78 """
78 """
79
79
80 def peer():
80 def peer():
81 """Returns an object conforming to this interface.
81 """Returns an object conforming to this interface.
82
82
83 Most implementations will ``return self``.
83 Most implementations will ``return self``.
84 """
84 """
85
85
86 def canpush():
86 def canpush():
87 """Returns a boolean indicating if this peer can be pushed to."""
87 """Returns a boolean indicating if this peer can be pushed to."""
88
88
89 def close():
89 def close():
90 """Close the connection to this peer.
90 """Close the connection to this peer.
91
91
92 This is called when the peer will no longer be used. Resources
92 This is called when the peer will no longer be used. Resources
93 associated with the peer should be cleaned up.
93 associated with the peer should be cleaned up.
94 """
94 """
95
95
96
96
97 class ipeercapabilities(interfaceutil.Interface):
97 class ipeercapabilities(interfaceutil.Interface):
98 """Peer sub-interface related to capabilities."""
98 """Peer sub-interface related to capabilities."""
99
99
100 def capable(name):
100 def capable(name):
101 """Determine support for a named capability.
101 """Determine support for a named capability.
102
102
103 Returns ``False`` if capability not supported.
103 Returns ``False`` if capability not supported.
104
104
105 Returns ``True`` if boolean capability is supported. Returns a string
105 Returns ``True`` if boolean capability is supported. Returns a string
106 if capability support is non-boolean.
106 if capability support is non-boolean.
107
107
108 Capability strings may or may not map to wire protocol capabilities.
108 Capability strings may or may not map to wire protocol capabilities.
109 """
109 """
110
110
111 def requirecap(name, purpose):
111 def requirecap(name, purpose):
112 """Require a capability to be present.
112 """Require a capability to be present.
113
113
114 Raises a ``CapabilityError`` if the capability isn't present.
114 Raises a ``CapabilityError`` if the capability isn't present.
115 """
115 """
116
116
117
117
118 class ipeercommands(interfaceutil.Interface):
118 class ipeercommands(interfaceutil.Interface):
119 """Client-side interface for communicating over the wire protocol.
119 """Client-side interface for communicating over the wire protocol.
120
120
121 This interface is used as a gateway to the Mercurial wire protocol.
121 This interface is used as a gateway to the Mercurial wire protocol.
122 methods commonly call wire protocol commands of the same name.
122 methods commonly call wire protocol commands of the same name.
123 """
123 """
124
124
125 def branchmap():
125 def branchmap():
126 """Obtain heads in named branches.
126 """Obtain heads in named branches.
127
127
128 Returns a dict mapping branch name to an iterable of nodes that are
128 Returns a dict mapping branch name to an iterable of nodes that are
129 heads on that branch.
129 heads on that branch.
130 """
130 """
131
131
132 def capabilities():
132 def capabilities():
133 """Obtain capabilities of the peer.
133 """Obtain capabilities of the peer.
134
134
135 Returns a set of string capabilities.
135 Returns a set of string capabilities.
136 """
136 """
137
137
138 def clonebundles():
138 def clonebundles():
139 """Obtains the clone bundles manifest for the repo.
139 """Obtains the clone bundles manifest for the repo.
140
140
141 Returns the manifest as unparsed bytes.
141 Returns the manifest as unparsed bytes.
142 """
142 """
143
143
144 def debugwireargs(one, two, three=None, four=None, five=None):
144 def debugwireargs(one, two, three=None, four=None, five=None):
145 """Used to facilitate debugging of arguments passed over the wire."""
145 """Used to facilitate debugging of arguments passed over the wire."""
146
146
147 def getbundle(source, **kwargs):
147 def getbundle(source, **kwargs):
148 """Obtain remote repository data as a bundle.
148 """Obtain remote repository data as a bundle.
149
149
150 This command is how the bulk of repository data is transferred from
150 This command is how the bulk of repository data is transferred from
151 the peer to the local repository
151 the peer to the local repository
152
152
153 Returns a generator of bundle data.
153 Returns a generator of bundle data.
154 """
154 """
155
155
156 def heads():
156 def heads():
157 """Determine all known head revisions in the peer.
157 """Determine all known head revisions in the peer.
158
158
159 Returns an iterable of binary nodes.
159 Returns an iterable of binary nodes.
160 """
160 """
161
161
162 def known(nodes):
162 def known(nodes):
163 """Determine whether multiple nodes are known.
163 """Determine whether multiple nodes are known.
164
164
165 Accepts an iterable of nodes whose presence to check for.
165 Accepts an iterable of nodes whose presence to check for.
166
166
167 Returns an iterable of booleans indicating of the corresponding node
167 Returns an iterable of booleans indicating of the corresponding node
168 at that index is known to the peer.
168 at that index is known to the peer.
169 """
169 """
170
170
171 def listkeys(namespace):
171 def listkeys(namespace):
172 """Obtain all keys in a pushkey namespace.
172 """Obtain all keys in a pushkey namespace.
173
173
174 Returns an iterable of key names.
174 Returns an iterable of key names.
175 """
175 """
176
176
177 def lookup(key):
177 def lookup(key):
178 """Resolve a value to a known revision.
178 """Resolve a value to a known revision.
179
179
180 Returns a binary node of the resolved revision on success.
180 Returns a binary node of the resolved revision on success.
181 """
181 """
182
182
183 def pushkey(namespace, key, old, new):
183 def pushkey(namespace, key, old, new):
184 """Set a value using the ``pushkey`` protocol.
184 """Set a value using the ``pushkey`` protocol.
185
185
186 Arguments correspond to the pushkey namespace and key to operate on and
186 Arguments correspond to the pushkey namespace and key to operate on and
187 the old and new values for that key.
187 the old and new values for that key.
188
188
189 Returns a string with the peer result. The value inside varies by the
189 Returns a string with the peer result. The value inside varies by the
190 namespace.
190 namespace.
191 """
191 """
192
192
193 def stream_out():
193 def stream_out():
194 """Obtain streaming clone data.
194 """Obtain streaming clone data.
195
195
196 Successful result should be a generator of data chunks.
196 Successful result should be a generator of data chunks.
197 """
197 """
198
198
199 def unbundle(bundle, heads, url):
199 def unbundle(bundle, heads, url):
200 """Transfer repository data to the peer.
200 """Transfer repository data to the peer.
201
201
202 This is how the bulk of data during a push is transferred.
202 This is how the bulk of data during a push is transferred.
203
203
204 Returns the integer number of heads added to the peer.
204 Returns the integer number of heads added to the peer.
205 """
205 """
206
206
207
207
208 class ipeerlegacycommands(interfaceutil.Interface):
208 class ipeerlegacycommands(interfaceutil.Interface):
209 """Interface for implementing support for legacy wire protocol commands.
209 """Interface for implementing support for legacy wire protocol commands.
210
210
211 Wire protocol commands transition to legacy status when they are no longer
211 Wire protocol commands transition to legacy status when they are no longer
212 used by modern clients. To facilitate identifying which commands are
212 used by modern clients. To facilitate identifying which commands are
213 legacy, the interfaces are split.
213 legacy, the interfaces are split.
214 """
214 """
215
215
216 def between(pairs):
216 def between(pairs):
217 """Obtain nodes between pairs of nodes.
217 """Obtain nodes between pairs of nodes.
218
218
219 ``pairs`` is an iterable of node pairs.
219 ``pairs`` is an iterable of node pairs.
220
220
221 Returns an iterable of iterables of nodes corresponding to each
221 Returns an iterable of iterables of nodes corresponding to each
222 requested pair.
222 requested pair.
223 """
223 """
224
224
225 def branches(nodes):
225 def branches(nodes):
226 """Obtain ancestor changesets of specific nodes back to a branch point.
226 """Obtain ancestor changesets of specific nodes back to a branch point.
227
227
228 For each requested node, the peer finds the first ancestor node that is
228 For each requested node, the peer finds the first ancestor node that is
229 a DAG root or is a merge.
229 a DAG root or is a merge.
230
230
231 Returns an iterable of iterables with the resolved values for each node.
231 Returns an iterable of iterables with the resolved values for each node.
232 """
232 """
233
233
234 def changegroup(nodes, source):
234 def changegroup(nodes, source):
235 """Obtain a changegroup with data for descendants of specified nodes."""
235 """Obtain a changegroup with data for descendants of specified nodes."""
236
236
237 def changegroupsubset(bases, heads, source):
237 def changegroupsubset(bases, heads, source):
238 pass
238 pass
239
239
240
240
241 class ipeercommandexecutor(interfaceutil.Interface):
241 class ipeercommandexecutor(interfaceutil.Interface):
242 """Represents a mechanism to execute remote commands.
242 """Represents a mechanism to execute remote commands.
243
243
244 This is the primary interface for requesting that wire protocol commands
244 This is the primary interface for requesting that wire protocol commands
245 be executed. Instances of this interface are active in a context manager
245 be executed. Instances of this interface are active in a context manager
246 and have a well-defined lifetime. When the context manager exits, all
246 and have a well-defined lifetime. When the context manager exits, all
247 outstanding requests are waited on.
247 outstanding requests are waited on.
248 """
248 """
249
249
250 def callcommand(name, args):
250 def callcommand(name, args):
251 """Request that a named command be executed.
251 """Request that a named command be executed.
252
252
253 Receives the command name and a dictionary of command arguments.
253 Receives the command name and a dictionary of command arguments.
254
254
255 Returns a ``concurrent.futures.Future`` that will resolve to the
255 Returns a ``concurrent.futures.Future`` that will resolve to the
256 result of that command request. That exact value is left up to
256 result of that command request. That exact value is left up to
257 the implementation and possibly varies by command.
257 the implementation and possibly varies by command.
258
258
259 Not all commands can coexist with other commands in an executor
259 Not all commands can coexist with other commands in an executor
260 instance: it depends on the underlying wire protocol transport being
260 instance: it depends on the underlying wire protocol transport being
261 used and the command itself.
261 used and the command itself.
262
262
263 Implementations MAY call ``sendcommands()`` automatically if the
263 Implementations MAY call ``sendcommands()`` automatically if the
264 requested command can not coexist with other commands in this executor.
264 requested command can not coexist with other commands in this executor.
265
265
266 Implementations MAY call ``sendcommands()`` automatically when the
266 Implementations MAY call ``sendcommands()`` automatically when the
267 future's ``result()`` is called. So, consumers using multiple
267 future's ``result()`` is called. So, consumers using multiple
268 commands with an executor MUST ensure that ``result()`` is not called
268 commands with an executor MUST ensure that ``result()`` is not called
269 until all command requests have been issued.
269 until all command requests have been issued.
270 """
270 """
271
271
272 def sendcommands():
272 def sendcommands():
273 """Trigger submission of queued command requests.
273 """Trigger submission of queued command requests.
274
274
275 Not all transports submit commands as soon as they are requested to
275 Not all transports submit commands as soon as they are requested to
276 run. When called, this method forces queued command requests to be
276 run. When called, this method forces queued command requests to be
277 issued. It will no-op if all commands have already been sent.
277 issued. It will no-op if all commands have already been sent.
278
278
279 When called, no more new commands may be issued with this executor.
279 When called, no more new commands may be issued with this executor.
280 """
280 """
281
281
282 def close():
282 def close():
283 """Signal that this command request is finished.
283 """Signal that this command request is finished.
284
284
285 When called, no more new commands may be issued. All outstanding
285 When called, no more new commands may be issued. All outstanding
286 commands that have previously been issued are waited on before
286 commands that have previously been issued are waited on before
287 returning. This not only includes waiting for the futures to resolve,
287 returning. This not only includes waiting for the futures to resolve,
288 but also waiting for all response data to arrive. In other words,
288 but also waiting for all response data to arrive. In other words,
289 calling this waits for all on-wire state for issued command requests
289 calling this waits for all on-wire state for issued command requests
290 to finish.
290 to finish.
291
291
292 When used as a context manager, this method is called when exiting the
292 When used as a context manager, this method is called when exiting the
293 context manager.
293 context manager.
294
294
295 This method may call ``sendcommands()`` if there are buffered commands.
295 This method may call ``sendcommands()`` if there are buffered commands.
296 """
296 """
297
297
298
298
299 class ipeerrequests(interfaceutil.Interface):
299 class ipeerrequests(interfaceutil.Interface):
300 """Interface for executing commands on a peer."""
300 """Interface for executing commands on a peer."""
301
301
302 limitedarguments = interfaceutil.Attribute(
302 limitedarguments = interfaceutil.Attribute(
303 """True if the peer cannot receive large argument value for commands."""
303 """True if the peer cannot receive large argument value for commands."""
304 )
304 )
305
305
306 def commandexecutor():
306 def commandexecutor():
307 """A context manager that resolves to an ipeercommandexecutor.
307 """A context manager that resolves to an ipeercommandexecutor.
308
308
309 The object this resolves to can be used to issue command requests
309 The object this resolves to can be used to issue command requests
310 to the peer.
310 to the peer.
311
311
312 Callers should call its ``callcommand`` method to issue command
312 Callers should call its ``callcommand`` method to issue command
313 requests.
313 requests.
314
314
315 A new executor should be obtained for each distinct set of commands
315 A new executor should be obtained for each distinct set of commands
316 (possibly just a single command) that the consumer wants to execute
316 (possibly just a single command) that the consumer wants to execute
317 as part of a single operation or round trip. This is because some
317 as part of a single operation or round trip. This is because some
318 peers are half-duplex and/or don't support persistent connections.
318 peers are half-duplex and/or don't support persistent connections.
319 e.g. in the case of HTTP peers, commands sent to an executor represent
319 e.g. in the case of HTTP peers, commands sent to an executor represent
320 a single HTTP request. While some peers may support multiple command
320 a single HTTP request. While some peers may support multiple command
321 sends over the wire per executor, consumers need to code to the least
321 sends over the wire per executor, consumers need to code to the least
322 capable peer. So it should be assumed that command executors buffer
322 capable peer. So it should be assumed that command executors buffer
323 called commands until they are told to send them and that each
323 called commands until they are told to send them and that each
324 command executor could result in a new connection or wire-level request
324 command executor could result in a new connection or wire-level request
325 being issued.
325 being issued.
326 """
326 """
327
327
328
328
329 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
329 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
330 """Unified interface for peer repositories.
330 """Unified interface for peer repositories.
331
331
332 All peer instances must conform to this interface.
332 All peer instances must conform to this interface.
333 """
333 """
334
334
335
335
336 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
336 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
337 """Unified peer interface for wire protocol version 2 peers."""
337 """Unified peer interface for wire protocol version 2 peers."""
338
338
339 apidescriptor = interfaceutil.Attribute(
339 apidescriptor = interfaceutil.Attribute(
340 """Data structure holding description of server API."""
340 """Data structure holding description of server API."""
341 )
341 )
342
342
343
343
344 @interfaceutil.implementer(ipeerbase)
344 @interfaceutil.implementer(ipeerbase)
345 class peer(object):
345 class peer(object):
346 """Base class for peer repositories."""
346 """Base class for peer repositories."""
347
347
348 limitedarguments = False
348 limitedarguments = False
349
349
350 def capable(self, name):
350 def capable(self, name):
351 caps = self.capabilities()
351 caps = self.capabilities()
352 if name in caps:
352 if name in caps:
353 return True
353 return True
354
354
355 name = b'%s=' % name
355 name = b'%s=' % name
356 for cap in caps:
356 for cap in caps:
357 if cap.startswith(name):
357 if cap.startswith(name):
358 return cap[len(name) :]
358 return cap[len(name) :]
359
359
360 return False
360 return False
361
361
362 def requirecap(self, name, purpose):
362 def requirecap(self, name, purpose):
363 if self.capable(name):
363 if self.capable(name):
364 return
364 return
365
365
366 raise error.CapabilityError(
366 raise error.CapabilityError(
367 _(
367 _(
368 b'cannot %s; remote repository does not support the '
368 b'cannot %s; remote repository does not support the '
369 b'\'%s\' capability'
369 b'\'%s\' capability'
370 )
370 )
371 % (purpose, name)
371 % (purpose, name)
372 )
372 )
373
373
374
374
375 class iverifyproblem(interfaceutil.Interface):
375 class iverifyproblem(interfaceutil.Interface):
376 """Represents a problem with the integrity of the repository.
376 """Represents a problem with the integrity of the repository.
377
377
378 Instances of this interface are emitted to describe an integrity issue
378 Instances of this interface are emitted to describe an integrity issue
379 with a repository (e.g. corrupt storage, missing data, etc).
379 with a repository (e.g. corrupt storage, missing data, etc).
380
380
381 Instances are essentially messages associated with severity.
381 Instances are essentially messages associated with severity.
382 """
382 """
383
383
384 warning = interfaceutil.Attribute(
384 warning = interfaceutil.Attribute(
385 """Message indicating a non-fatal problem."""
385 """Message indicating a non-fatal problem."""
386 )
386 )
387
387
388 error = interfaceutil.Attribute("""Message indicating a fatal problem.""")
388 error = interfaceutil.Attribute("""Message indicating a fatal problem.""")
389
389
390 node = interfaceutil.Attribute(
390 node = interfaceutil.Attribute(
391 """Revision encountering the problem.
391 """Revision encountering the problem.
392
392
393 ``None`` means the problem doesn't apply to a single revision.
393 ``None`` means the problem doesn't apply to a single revision.
394 """
394 """
395 )
395 )
396
396
397
397
398 class irevisiondelta(interfaceutil.Interface):
398 class irevisiondelta(interfaceutil.Interface):
399 """Represents a delta between one revision and another.
399 """Represents a delta between one revision and another.
400
400
401 Instances convey enough information to allow a revision to be exchanged
401 Instances convey enough information to allow a revision to be exchanged
402 with another repository.
402 with another repository.
403
403
404 Instances represent the fulltext revision data or a delta against
404 Instances represent the fulltext revision data or a delta against
405 another revision. Therefore the ``revision`` and ``delta`` attributes
405 another revision. Therefore the ``revision`` and ``delta`` attributes
406 are mutually exclusive.
406 are mutually exclusive.
407
407
408 Typically used for changegroup generation.
408 Typically used for changegroup generation.
409 """
409 """
410
410
411 node = interfaceutil.Attribute("""20 byte node of this revision.""")
411 node = interfaceutil.Attribute("""20 byte node of this revision.""")
412
412
413 p1node = interfaceutil.Attribute(
413 p1node = interfaceutil.Attribute(
414 """20 byte node of 1st parent of this revision."""
414 """20 byte node of 1st parent of this revision."""
415 )
415 )
416
416
417 p2node = interfaceutil.Attribute(
417 p2node = interfaceutil.Attribute(
418 """20 byte node of 2nd parent of this revision."""
418 """20 byte node of 2nd parent of this revision."""
419 )
419 )
420
420
421 linknode = interfaceutil.Attribute(
421 linknode = interfaceutil.Attribute(
422 """20 byte node of the changelog revision this node is linked to."""
422 """20 byte node of the changelog revision this node is linked to."""
423 )
423 )
424
424
425 flags = interfaceutil.Attribute(
425 flags = interfaceutil.Attribute(
426 """2 bytes of integer flags that apply to this revision.
426 """2 bytes of integer flags that apply to this revision.
427
427
428 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
428 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
429 """
429 """
430 )
430 )
431
431
432 basenode = interfaceutil.Attribute(
432 basenode = interfaceutil.Attribute(
433 """20 byte node of the revision this data is a delta against.
433 """20 byte node of the revision this data is a delta against.
434
434
435 ``nullid`` indicates that the revision is a full revision and not
435 ``nullid`` indicates that the revision is a full revision and not
436 a delta.
436 a delta.
437 """
437 """
438 )
438 )
439
439
440 baserevisionsize = interfaceutil.Attribute(
440 baserevisionsize = interfaceutil.Attribute(
441 """Size of base revision this delta is against.
441 """Size of base revision this delta is against.
442
442
443 May be ``None`` if ``basenode`` is ``nullid``.
443 May be ``None`` if ``basenode`` is ``nullid``.
444 """
444 """
445 )
445 )
446
446
447 revision = interfaceutil.Attribute(
447 revision = interfaceutil.Attribute(
448 """Raw fulltext of revision data for this node."""
448 """Raw fulltext of revision data for this node."""
449 )
449 )
450
450
451 delta = interfaceutil.Attribute(
451 delta = interfaceutil.Attribute(
452 """Delta between ``basenode`` and ``node``.
452 """Delta between ``basenode`` and ``node``.
453
453
454 Stored in the bdiff delta format.
454 Stored in the bdiff delta format.
455 """
455 """
456 )
456 )
457
457
458
458
459 class ifilerevisionssequence(interfaceutil.Interface):
459 class ifilerevisionssequence(interfaceutil.Interface):
460 """Contains index data for all revisions of a file.
460 """Contains index data for all revisions of a file.
461
461
462 Types implementing this behave like lists of tuples. The index
462 Types implementing this behave like lists of tuples. The index
463 in the list corresponds to the revision number. The values contain
463 in the list corresponds to the revision number. The values contain
464 index metadata.
464 index metadata.
465
465
466 The *null* revision (revision number -1) is always the last item
466 The *null* revision (revision number -1) is always the last item
467 in the index.
467 in the index.
468 """
468 """
469
469
470 def __len__():
470 def __len__():
471 """The total number of revisions."""
471 """The total number of revisions."""
472
472
473 def __getitem__(rev):
473 def __getitem__(rev):
474 """Returns the object having a specific revision number.
474 """Returns the object having a specific revision number.
475
475
476 Returns an 8-tuple with the following fields:
476 Returns an 8-tuple with the following fields:
477
477
478 offset+flags
478 offset+flags
479 Contains the offset and flags for the revision. 64-bit unsigned
479 Contains the offset and flags for the revision. 64-bit unsigned
480 integer where first 6 bytes are the offset and the next 2 bytes
480 integer where first 6 bytes are the offset and the next 2 bytes
481 are flags. The offset can be 0 if it is not used by the store.
481 are flags. The offset can be 0 if it is not used by the store.
482 compressed size
482 compressed size
483 Size of the revision data in the store. It can be 0 if it isn't
483 Size of the revision data in the store. It can be 0 if it isn't
484 needed by the store.
484 needed by the store.
485 uncompressed size
485 uncompressed size
486 Fulltext size. It can be 0 if it isn't needed by the store.
486 Fulltext size. It can be 0 if it isn't needed by the store.
487 base revision
487 base revision
488 Revision number of revision the delta for storage is encoded
488 Revision number of revision the delta for storage is encoded
489 against. -1 indicates not encoded against a base revision.
489 against. -1 indicates not encoded against a base revision.
490 link revision
490 link revision
491 Revision number of changelog revision this entry is related to.
491 Revision number of changelog revision this entry is related to.
492 p1 revision
492 p1 revision
493 Revision number of 1st parent. -1 if no 1st parent.
493 Revision number of 1st parent. -1 if no 1st parent.
494 p2 revision
494 p2 revision
495 Revision number of 2nd parent. -1 if no 1st parent.
495 Revision number of 2nd parent. -1 if no 1st parent.
496 node
496 node
497 Binary node value for this revision number.
497 Binary node value for this revision number.
498
498
499 Negative values should index off the end of the sequence. ``-1``
499 Negative values should index off the end of the sequence. ``-1``
500 should return the null revision. ``-2`` should return the most
500 should return the null revision. ``-2`` should return the most
501 recent revision.
501 recent revision.
502 """
502 """
503
503
504 def __contains__(rev):
504 def __contains__(rev):
505 """Whether a revision number exists."""
505 """Whether a revision number exists."""
506
506
507 def insert(self, i, entry):
507 def insert(self, i, entry):
508 """Add an item to the index at specific revision."""
508 """Add an item to the index at specific revision."""
509
509
510
510
511 class ifileindex(interfaceutil.Interface):
511 class ifileindex(interfaceutil.Interface):
512 """Storage interface for index data of a single file.
512 """Storage interface for index data of a single file.
513
513
514 File storage data is divided into index metadata and data storage.
514 File storage data is divided into index metadata and data storage.
515 This interface defines the index portion of the interface.
515 This interface defines the index portion of the interface.
516
516
517 The index logically consists of:
517 The index logically consists of:
518
518
519 * A mapping between revision numbers and nodes.
519 * A mapping between revision numbers and nodes.
520 * DAG data (storing and querying the relationship between nodes).
520 * DAG data (storing and querying the relationship between nodes).
521 * Metadata to facilitate storage.
521 * Metadata to facilitate storage.
522 """
522 """
523
523
524 def __len__():
524 def __len__():
525 """Obtain the number of revisions stored for this file."""
525 """Obtain the number of revisions stored for this file."""
526
526
527 def __iter__():
527 def __iter__():
528 """Iterate over revision numbers for this file."""
528 """Iterate over revision numbers for this file."""
529
529
530 def hasnode(node):
530 def hasnode(node):
531 """Returns a bool indicating if a node is known to this store.
531 """Returns a bool indicating if a node is known to this store.
532
532
533 Implementations must only return True for full, binary node values:
533 Implementations must only return True for full, binary node values:
534 hex nodes, revision numbers, and partial node matches must be
534 hex nodes, revision numbers, and partial node matches must be
535 rejected.
535 rejected.
536
536
537 The null node is never present.
537 The null node is never present.
538 """
538 """
539
539
540 def revs(start=0, stop=None):
540 def revs(start=0, stop=None):
541 """Iterate over revision numbers for this file, with control."""
541 """Iterate over revision numbers for this file, with control."""
542
542
543 def parents(node):
543 def parents(node):
544 """Returns a 2-tuple of parent nodes for a revision.
544 """Returns a 2-tuple of parent nodes for a revision.
545
545
546 Values will be ``nullid`` if the parent is empty.
546 Values will be ``nullid`` if the parent is empty.
547 """
547 """
548
548
549 def parentrevs(rev):
549 def parentrevs(rev):
550 """Like parents() but operates on revision numbers."""
550 """Like parents() but operates on revision numbers."""
551
551
552 def rev(node):
552 def rev(node):
553 """Obtain the revision number given a node.
553 """Obtain the revision number given a node.
554
554
555 Raises ``error.LookupError`` if the node is not known.
555 Raises ``error.LookupError`` if the node is not known.
556 """
556 """
557
557
558 def node(rev):
558 def node(rev):
559 """Obtain the node value given a revision number.
559 """Obtain the node value given a revision number.
560
560
561 Raises ``IndexError`` if the node is not known.
561 Raises ``IndexError`` if the node is not known.
562 """
562 """
563
563
564 def lookup(node):
564 def lookup(node):
565 """Attempt to resolve a value to a node.
565 """Attempt to resolve a value to a node.
566
566
567 Value can be a binary node, hex node, revision number, or a string
567 Value can be a binary node, hex node, revision number, or a string
568 that can be converted to an integer.
568 that can be converted to an integer.
569
569
570 Raises ``error.LookupError`` if a node could not be resolved.
570 Raises ``error.LookupError`` if a node could not be resolved.
571 """
571 """
572
572
573 def linkrev(rev):
573 def linkrev(rev):
574 """Obtain the changeset revision number a revision is linked to."""
574 """Obtain the changeset revision number a revision is linked to."""
575
575
576 def iscensored(rev):
576 def iscensored(rev):
577 """Return whether a revision's content has been censored."""
577 """Return whether a revision's content has been censored."""
578
578
579 def commonancestorsheads(node1, node2):
579 def commonancestorsheads(node1, node2):
580 """Obtain an iterable of nodes containing heads of common ancestors.
580 """Obtain an iterable of nodes containing heads of common ancestors.
581
581
582 See ``ancestor.commonancestorsheads()``.
582 See ``ancestor.commonancestorsheads()``.
583 """
583 """
584
584
585 def descendants(revs):
585 def descendants(revs):
586 """Obtain descendant revision numbers for a set of revision numbers.
586 """Obtain descendant revision numbers for a set of revision numbers.
587
587
588 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
588 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
589 """
589 """
590
590
591 def heads(start=None, stop=None):
591 def heads(start=None, stop=None):
592 """Obtain a list of nodes that are DAG heads, with control.
592 """Obtain a list of nodes that are DAG heads, with control.
593
593
594 The set of revisions examined can be limited by specifying
594 The set of revisions examined can be limited by specifying
595 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
595 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
596 iterable of nodes. DAG traversal starts at earlier revision
596 iterable of nodes. DAG traversal starts at earlier revision
597 ``start`` and iterates forward until any node in ``stop`` is
597 ``start`` and iterates forward until any node in ``stop`` is
598 encountered.
598 encountered.
599 """
599 """
600
600
601 def children(node):
601 def children(node):
602 """Obtain nodes that are children of a node.
602 """Obtain nodes that are children of a node.
603
603
604 Returns a list of nodes.
604 Returns a list of nodes.
605 """
605 """
606
606
607
607
608 class ifiledata(interfaceutil.Interface):
608 class ifiledata(interfaceutil.Interface):
609 """Storage interface for data storage of a specific file.
609 """Storage interface for data storage of a specific file.
610
610
611 This complements ``ifileindex`` and provides an interface for accessing
611 This complements ``ifileindex`` and provides an interface for accessing
612 data for a tracked file.
612 data for a tracked file.
613 """
613 """
614
614
615 def size(rev):
615 def size(rev):
616 """Obtain the fulltext size of file data.
616 """Obtain the fulltext size of file data.
617
617
618 Any metadata is excluded from size measurements.
618 Any metadata is excluded from size measurements.
619 """
619 """
620
620
621 def revision(node, raw=False):
621 def revision(node, raw=False):
622 """"Obtain fulltext data for a node.
622 """"Obtain fulltext data for a node.
623
623
624 By default, any storage transformations are applied before the data
624 By default, any storage transformations are applied before the data
625 is returned. If ``raw`` is True, non-raw storage transformations
625 is returned. If ``raw`` is True, non-raw storage transformations
626 are not applied.
626 are not applied.
627
627
628 The fulltext data may contain a header containing metadata. Most
628 The fulltext data may contain a header containing metadata. Most
629 consumers should use ``read()`` to obtain the actual file data.
629 consumers should use ``read()`` to obtain the actual file data.
630 """
630 """
631
631
632 def rawdata(node):
632 def rawdata(node):
633 """Obtain raw data for a node.
633 """Obtain raw data for a node.
634 """
634 """
635
635
636 def read(node):
636 def read(node):
637 """Resolve file fulltext data.
637 """Resolve file fulltext data.
638
638
639 This is similar to ``revision()`` except any metadata in the data
639 This is similar to ``revision()`` except any metadata in the data
640 headers is stripped.
640 headers is stripped.
641 """
641 """
642
642
643 def renamed(node):
643 def renamed(node):
644 """Obtain copy metadata for a node.
644 """Obtain copy metadata for a node.
645
645
646 Returns ``False`` if no copy metadata is stored or a 2-tuple of
646 Returns ``False`` if no copy metadata is stored or a 2-tuple of
647 (path, node) from which this revision was copied.
647 (path, node) from which this revision was copied.
648 """
648 """
649
649
650 def cmp(node, fulltext):
650 def cmp(node, fulltext):
651 """Compare fulltext to another revision.
651 """Compare fulltext to another revision.
652
652
653 Returns True if the fulltext is different from what is stored.
653 Returns True if the fulltext is different from what is stored.
654
654
655 This takes copy metadata into account.
655 This takes copy metadata into account.
656
656
657 TODO better document the copy metadata and censoring logic.
657 TODO better document the copy metadata and censoring logic.
658 """
658 """
659
659
660 def emitrevisions(
660 def emitrevisions(
661 nodes,
661 nodes,
662 nodesorder=None,
662 nodesorder=None,
663 revisiondata=False,
663 revisiondata=False,
664 assumehaveparentrevisions=False,
664 assumehaveparentrevisions=False,
665 deltamode=CG_DELTAMODE_STD,
665 deltamode=CG_DELTAMODE_STD,
666 ):
666 ):
667 """Produce ``irevisiondelta`` for revisions.
667 """Produce ``irevisiondelta`` for revisions.
668
668
669 Given an iterable of nodes, emits objects conforming to the
669 Given an iterable of nodes, emits objects conforming to the
670 ``irevisiondelta`` interface that describe revisions in storage.
670 ``irevisiondelta`` interface that describe revisions in storage.
671
671
672 This method is a generator.
672 This method is a generator.
673
673
674 The input nodes may be unordered. Implementations must ensure that a
674 The input nodes may be unordered. Implementations must ensure that a
675 node's parents are emitted before the node itself. Transitively, this
675 node's parents are emitted before the node itself. Transitively, this
676 means that a node may only be emitted once all its ancestors in
676 means that a node may only be emitted once all its ancestors in
677 ``nodes`` have also been emitted.
677 ``nodes`` have also been emitted.
678
678
679 By default, emits "index" data (the ``node``, ``p1node``, and
679 By default, emits "index" data (the ``node``, ``p1node``, and
680 ``p2node`` attributes). If ``revisiondata`` is set, revision data
680 ``p2node`` attributes). If ``revisiondata`` is set, revision data
681 will also be present on the emitted objects.
681 will also be present on the emitted objects.
682
682
683 With default argument values, implementations can choose to emit
683 With default argument values, implementations can choose to emit
684 either fulltext revision data or a delta. When emitting deltas,
684 either fulltext revision data or a delta. When emitting deltas,
685 implementations must consider whether the delta's base revision
685 implementations must consider whether the delta's base revision
686 fulltext is available to the receiver.
686 fulltext is available to the receiver.
687
687
688 The base revision fulltext is guaranteed to be available if any of
688 The base revision fulltext is guaranteed to be available if any of
689 the following are met:
689 the following are met:
690
690
691 * Its fulltext revision was emitted by this method call.
691 * Its fulltext revision was emitted by this method call.
692 * A delta for that revision was emitted by this method call.
692 * A delta for that revision was emitted by this method call.
693 * ``assumehaveparentrevisions`` is True and the base revision is a
693 * ``assumehaveparentrevisions`` is True and the base revision is a
694 parent of the node.
694 parent of the node.
695
695
696 ``nodesorder`` can be used to control the order that revisions are
696 ``nodesorder`` can be used to control the order that revisions are
697 emitted. By default, revisions can be reordered as long as they are
697 emitted. By default, revisions can be reordered as long as they are
698 in DAG topological order (see above). If the value is ``nodes``,
698 in DAG topological order (see above). If the value is ``nodes``,
699 the iteration order from ``nodes`` should be used. If the value is
699 the iteration order from ``nodes`` should be used. If the value is
700 ``storage``, then the native order from the backing storage layer
700 ``storage``, then the native order from the backing storage layer
701 is used. (Not all storage layers will have strong ordering and behavior
701 is used. (Not all storage layers will have strong ordering and behavior
702 of this mode is storage-dependent.) ``nodes`` ordering can force
702 of this mode is storage-dependent.) ``nodes`` ordering can force
703 revisions to be emitted before their ancestors, so consumers should
703 revisions to be emitted before their ancestors, so consumers should
704 use it with care.
704 use it with care.
705
705
706 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
706 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
707 be set and it is the caller's responsibility to resolve it, if needed.
707 be set and it is the caller's responsibility to resolve it, if needed.
708
708
709 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
709 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
710 all revision data should be emitted as deltas against the revision
710 all revision data should be emitted as deltas against the revision
711 emitted just prior. The initial revision should be a delta against its
711 emitted just prior. The initial revision should be a delta against its
712 1st parent.
712 1st parent.
713 """
713 """
714
714
715
715
716 class ifilemutation(interfaceutil.Interface):
716 class ifilemutation(interfaceutil.Interface):
717 """Storage interface for mutation events of a tracked file."""
717 """Storage interface for mutation events of a tracked file."""
718
718
719 def add(filedata, meta, transaction, linkrev, p1, p2):
719 def add(filedata, meta, transaction, linkrev, p1, p2):
720 """Add a new revision to the store.
720 """Add a new revision to the store.
721
721
722 Takes file data, dictionary of metadata, a transaction, linkrev,
722 Takes file data, dictionary of metadata, a transaction, linkrev,
723 and parent nodes.
723 and parent nodes.
724
724
725 Returns the node that was added.
725 Returns the node that was added.
726
726
727 May no-op if a revision matching the supplied data is already stored.
727 May no-op if a revision matching the supplied data is already stored.
728 """
728 """
729
729
730 def addrevision(
730 def addrevision(
731 revisiondata,
731 revisiondata,
732 transaction,
732 transaction,
733 linkrev,
733 linkrev,
734 p1,
734 p1,
735 p2,
735 p2,
736 node=None,
736 node=None,
737 flags=0,
737 flags=0,
738 cachedelta=None,
738 cachedelta=None,
739 ):
739 ):
740 """Add a new revision to the store.
740 """Add a new revision to the store.
741
741
742 This is similar to ``add()`` except it operates at a lower level.
742 This is similar to ``add()`` except it operates at a lower level.
743
743
744 The data passed in already contains a metadata header, if any.
744 The data passed in already contains a metadata header, if any.
745
745
746 ``node`` and ``flags`` can be used to define the expected node and
746 ``node`` and ``flags`` can be used to define the expected node and
747 the flags to use with storage. ``flags`` is a bitwise value composed
747 the flags to use with storage. ``flags`` is a bitwise value composed
748 of the various ``REVISION_FLAG_*`` constants.
748 of the various ``REVISION_FLAG_*`` constants.
749
749
750 ``add()`` is usually called when adding files from e.g. the working
750 ``add()`` is usually called when adding files from e.g. the working
751 directory. ``addrevision()`` is often called by ``add()`` and for
751 directory. ``addrevision()`` is often called by ``add()`` and for
752 scenarios where revision data has already been computed, such as when
752 scenarios where revision data has already been computed, such as when
753 applying raw data from a peer repo.
753 applying raw data from a peer repo.
754 """
754 """
755
755
756 def addgroup(
756 def addgroup(
757 deltas,
757 deltas,
758 linkmapper,
758 linkmapper,
759 transaction,
759 transaction,
760 addrevisioncb=None,
760 addrevisioncb=None,
761 maybemissingparents=False,
761 maybemissingparents=False,
762 ):
762 ):
763 """Process a series of deltas for storage.
763 """Process a series of deltas for storage.
764
764
765 ``deltas`` is an iterable of 7-tuples of
765 ``deltas`` is an iterable of 7-tuples of
766 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
766 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
767 to add.
767 to add.
768
768
769 The ``delta`` field contains ``mpatch`` data to apply to a base
769 The ``delta`` field contains ``mpatch`` data to apply to a base
770 revision, identified by ``deltabase``. The base node can be
770 revision, identified by ``deltabase``. The base node can be
771 ``nullid``, in which case the header from the delta can be ignored
771 ``nullid``, in which case the header from the delta can be ignored
772 and the delta used as the fulltext.
772 and the delta used as the fulltext.
773
773
774 ``addrevisioncb`` should be called for each node as it is committed.
774 ``addrevisioncb`` should be called for each node as it is committed.
775
775
776 ``maybemissingparents`` is a bool indicating whether the incoming
776 ``maybemissingparents`` is a bool indicating whether the incoming
777 data may reference parents/ancestor revisions that aren't present.
777 data may reference parents/ancestor revisions that aren't present.
778 This flag is set when receiving data into a "shallow" store that
778 This flag is set when receiving data into a "shallow" store that
779 doesn't hold all history.
779 doesn't hold all history.
780
780
781 Returns a list of nodes that were processed. A node will be in the list
781 Returns a list of nodes that were processed. A node will be in the list
782 even if it existed in the store previously.
782 even if it existed in the store previously.
783 """
783 """
784
784
785 def censorrevision(tr, node, tombstone=b''):
785 def censorrevision(tr, node, tombstone=b''):
786 """Remove the content of a single revision.
786 """Remove the content of a single revision.
787
787
788 The specified ``node`` will have its content purged from storage.
788 The specified ``node`` will have its content purged from storage.
789 Future attempts to access the revision data for this node will
789 Future attempts to access the revision data for this node will
790 result in failure.
790 result in failure.
791
791
792 A ``tombstone`` message can optionally be stored. This message may be
792 A ``tombstone`` message can optionally be stored. This message may be
793 displayed to users when they attempt to access the missing revision
793 displayed to users when they attempt to access the missing revision
794 data.
794 data.
795
795
796 Storage backends may have stored deltas against the previous content
796 Storage backends may have stored deltas against the previous content
797 in this revision. As part of censoring a revision, these storage
797 in this revision. As part of censoring a revision, these storage
798 backends are expected to rewrite any internally stored deltas such
798 backends are expected to rewrite any internally stored deltas such
799 that they no longer reference the deleted content.
799 that they no longer reference the deleted content.
800 """
800 """
801
801
802 def getstrippoint(minlink):
802 def getstrippoint(minlink):
803 """Find the minimum revision that must be stripped to strip a linkrev.
803 """Find the minimum revision that must be stripped to strip a linkrev.
804
804
805 Returns a 2-tuple containing the minimum revision number and a set
805 Returns a 2-tuple containing the minimum revision number and a set
806 of all revisions numbers that would be broken by this strip.
806 of all revisions numbers that would be broken by this strip.
807
807
808 TODO this is highly revlog centric and should be abstracted into
808 TODO this is highly revlog centric and should be abstracted into
809 a higher-level deletion API. ``repair.strip()`` relies on this.
809 a higher-level deletion API. ``repair.strip()`` relies on this.
810 """
810 """
811
811
812 def strip(minlink, transaction):
812 def strip(minlink, transaction):
813 """Remove storage of items starting at a linkrev.
813 """Remove storage of items starting at a linkrev.
814
814
815 This uses ``getstrippoint()`` to determine the first node to remove.
815 This uses ``getstrippoint()`` to determine the first node to remove.
816 Then it effectively truncates storage for all revisions after that.
816 Then it effectively truncates storage for all revisions after that.
817
817
818 TODO this is highly revlog centric and should be abstracted into a
818 TODO this is highly revlog centric and should be abstracted into a
819 higher-level deletion API.
819 higher-level deletion API.
820 """
820 """
821
821
822
822
823 class ifilestorage(ifileindex, ifiledata, ifilemutation):
823 class ifilestorage(ifileindex, ifiledata, ifilemutation):
824 """Complete storage interface for a single tracked file."""
824 """Complete storage interface for a single tracked file."""
825
825
826 def files():
826 def files():
827 """Obtain paths that are backing storage for this file.
827 """Obtain paths that are backing storage for this file.
828
828
829 TODO this is used heavily by verify code and there should probably
829 TODO this is used heavily by verify code and there should probably
830 be a better API for that.
830 be a better API for that.
831 """
831 """
832
832
833 def storageinfo(
833 def storageinfo(
834 exclusivefiles=False,
834 exclusivefiles=False,
835 sharedfiles=False,
835 sharedfiles=False,
836 revisionscount=False,
836 revisionscount=False,
837 trackedsize=False,
837 trackedsize=False,
838 storedsize=False,
838 storedsize=False,
839 ):
839 ):
840 """Obtain information about storage for this file's data.
840 """Obtain information about storage for this file's data.
841
841
842 Returns a dict describing storage for this tracked path. The keys
842 Returns a dict describing storage for this tracked path. The keys
843 in the dict map to arguments of the same. The arguments are bools
843 in the dict map to arguments of the same. The arguments are bools
844 indicating whether to calculate and obtain that data.
844 indicating whether to calculate and obtain that data.
845
845
846 exclusivefiles
846 exclusivefiles
847 Iterable of (vfs, path) describing files that are exclusively
847 Iterable of (vfs, path) describing files that are exclusively
848 used to back storage for this tracked path.
848 used to back storage for this tracked path.
849
849
850 sharedfiles
850 sharedfiles
851 Iterable of (vfs, path) describing files that are used to back
851 Iterable of (vfs, path) describing files that are used to back
852 storage for this tracked path. Those files may also provide storage
852 storage for this tracked path. Those files may also provide storage
853 for other stored entities.
853 for other stored entities.
854
854
855 revisionscount
855 revisionscount
856 Number of revisions available for retrieval.
856 Number of revisions available for retrieval.
857
857
858 trackedsize
858 trackedsize
859 Total size in bytes of all tracked revisions. This is a sum of the
859 Total size in bytes of all tracked revisions. This is a sum of the
860 length of the fulltext of all revisions.
860 length of the fulltext of all revisions.
861
861
862 storedsize
862 storedsize
863 Total size in bytes used to store data for all tracked revisions.
863 Total size in bytes used to store data for all tracked revisions.
864 This is commonly less than ``trackedsize`` due to internal usage
864 This is commonly less than ``trackedsize`` due to internal usage
865 of deltas rather than fulltext revisions.
865 of deltas rather than fulltext revisions.
866
866
867 Not all storage backends may support all queries are have a reasonable
867 Not all storage backends may support all queries are have a reasonable
868 value to use. In that case, the value should be set to ``None`` and
868 value to use. In that case, the value should be set to ``None`` and
869 callers are expected to handle this special value.
869 callers are expected to handle this special value.
870 """
870 """
871
871
872 def verifyintegrity(state):
872 def verifyintegrity(state):
873 """Verifies the integrity of file storage.
873 """Verifies the integrity of file storage.
874
874
875 ``state`` is a dict holding state of the verifier process. It can be
875 ``state`` is a dict holding state of the verifier process. It can be
876 used to communicate data between invocations of multiple storage
876 used to communicate data between invocations of multiple storage
877 primitives.
877 primitives.
878
878
879 If individual revisions cannot have their revision content resolved,
879 If individual revisions cannot have their revision content resolved,
880 the method is expected to set the ``skipread`` key to a set of nodes
880 the method is expected to set the ``skipread`` key to a set of nodes
881 that encountered problems. If set, the method can also add the node(s)
881 that encountered problems. If set, the method can also add the node(s)
882 to ``safe_renamed`` in order to indicate nodes that may perform the
882 to ``safe_renamed`` in order to indicate nodes that may perform the
883 rename checks with currently accessible data.
883 rename checks with currently accessible data.
884
884
885 The method yields objects conforming to the ``iverifyproblem``
885 The method yields objects conforming to the ``iverifyproblem``
886 interface.
886 interface.
887 """
887 """
888
888
889
889
890 class idirs(interfaceutil.Interface):
890 class idirs(interfaceutil.Interface):
891 """Interface representing a collection of directories from paths.
891 """Interface representing a collection of directories from paths.
892
892
893 This interface is essentially a derived data structure representing
893 This interface is essentially a derived data structure representing
894 directories from a collection of paths.
894 directories from a collection of paths.
895 """
895 """
896
896
897 def addpath(path):
897 def addpath(path):
898 """Add a path to the collection.
898 """Add a path to the collection.
899
899
900 All directories in the path will be added to the collection.
900 All directories in the path will be added to the collection.
901 """
901 """
902
902
903 def delpath(path):
903 def delpath(path):
904 """Remove a path from the collection.
904 """Remove a path from the collection.
905
905
906 If the removal was the last path in a particular directory, the
906 If the removal was the last path in a particular directory, the
907 directory is removed from the collection.
907 directory is removed from the collection.
908 """
908 """
909
909
910 def __iter__():
910 def __iter__():
911 """Iterate over the directories in this collection of paths."""
911 """Iterate over the directories in this collection of paths."""
912
912
913 def __contains__(path):
913 def __contains__(path):
914 """Whether a specific directory is in this collection."""
914 """Whether a specific directory is in this collection."""
915
915
916
916
917 class imanifestdict(interfaceutil.Interface):
917 class imanifestdict(interfaceutil.Interface):
918 """Interface representing a manifest data structure.
918 """Interface representing a manifest data structure.
919
919
920 A manifest is effectively a dict mapping paths to entries. Each entry
920 A manifest is effectively a dict mapping paths to entries. Each entry
921 consists of a binary node and extra flags affecting that entry.
921 consists of a binary node and extra flags affecting that entry.
922 """
922 """
923
923
924 def __getitem__(path):
924 def __getitem__(path):
925 """Returns the binary node value for a path in the manifest.
925 """Returns the binary node value for a path in the manifest.
926
926
927 Raises ``KeyError`` if the path does not exist in the manifest.
927 Raises ``KeyError`` if the path does not exist in the manifest.
928
928
929 Equivalent to ``self.find(path)[0]``.
929 Equivalent to ``self.find(path)[0]``.
930 """
930 """
931
931
932 def find(path):
932 def find(path):
933 """Returns the entry for a path in the manifest.
933 """Returns the entry for a path in the manifest.
934
934
935 Returns a 2-tuple of (node, flags).
935 Returns a 2-tuple of (node, flags).
936
936
937 Raises ``KeyError`` if the path does not exist in the manifest.
937 Raises ``KeyError`` if the path does not exist in the manifest.
938 """
938 """
939
939
940 def __len__():
940 def __len__():
941 """Return the number of entries in the manifest."""
941 """Return the number of entries in the manifest."""
942
942
943 def __nonzero__():
943 def __nonzero__():
944 """Returns True if the manifest has entries, False otherwise."""
944 """Returns True if the manifest has entries, False otherwise."""
945
945
946 __bool__ = __nonzero__
946 __bool__ = __nonzero__
947
947
948 def __setitem__(path, node):
948 def __setitem__(path, node):
949 """Define the node value for a path in the manifest.
949 """Define the node value for a path in the manifest.
950
950
951 If the path is already in the manifest, its flags will be copied to
951 If the path is already in the manifest, its flags will be copied to
952 the new entry.
952 the new entry.
953 """
953 """
954
954
955 def __contains__(path):
955 def __contains__(path):
956 """Whether a path exists in the manifest."""
956 """Whether a path exists in the manifest."""
957
957
958 def __delitem__(path):
958 def __delitem__(path):
959 """Remove a path from the manifest.
959 """Remove a path from the manifest.
960
960
961 Raises ``KeyError`` if the path is not in the manifest.
961 Raises ``KeyError`` if the path is not in the manifest.
962 """
962 """
963
963
964 def __iter__():
964 def __iter__():
965 """Iterate over paths in the manifest."""
965 """Iterate over paths in the manifest."""
966
966
967 def iterkeys():
967 def iterkeys():
968 """Iterate over paths in the manifest."""
968 """Iterate over paths in the manifest."""
969
969
970 def keys():
970 def keys():
971 """Obtain a list of paths in the manifest."""
971 """Obtain a list of paths in the manifest."""
972
972
973 def filesnotin(other, match=None):
973 def filesnotin(other, match=None):
974 """Obtain the set of paths in this manifest but not in another.
974 """Obtain the set of paths in this manifest but not in another.
975
975
976 ``match`` is an optional matcher function to be applied to both
976 ``match`` is an optional matcher function to be applied to both
977 manifests.
977 manifests.
978
978
979 Returns a set of paths.
979 Returns a set of paths.
980 """
980 """
981
981
982 def dirs():
982 def dirs():
983 """Returns an object implementing the ``idirs`` interface."""
983 """Returns an object implementing the ``idirs`` interface."""
984
984
985 def hasdir(dir):
985 def hasdir(dir):
986 """Returns a bool indicating if a directory is in this manifest."""
986 """Returns a bool indicating if a directory is in this manifest."""
987
987
988 def walk(match):
988 def walk(match):
989 """Generator of paths in manifest satisfying a matcher.
989 """Generator of paths in manifest satisfying a matcher.
990
990
991 If the matcher has explicit files listed and they don't exist in
991 If the matcher has explicit files listed and they don't exist in
992 the manifest, ``match.bad()`` is called for each missing file.
992 the manifest, ``match.bad()`` is called for each missing file.
993 """
993 """
994
994
995 def diff(other, match=None, clean=False):
995 def diff(other, match=None, clean=False):
996 """Find differences between this manifest and another.
996 """Find differences between this manifest and another.
997
997
998 This manifest is compared to ``other``.
998 This manifest is compared to ``other``.
999
999
1000 If ``match`` is provided, the two manifests are filtered against this
1000 If ``match`` is provided, the two manifests are filtered against this
1001 matcher and only entries satisfying the matcher are compared.
1001 matcher and only entries satisfying the matcher are compared.
1002
1002
1003 If ``clean`` is True, unchanged files are included in the returned
1003 If ``clean`` is True, unchanged files are included in the returned
1004 object.
1004 object.
1005
1005
1006 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
1006 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
1007 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
1007 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
1008 represents the node and flags for this manifest and ``(node2, flag2)``
1008 represents the node and flags for this manifest and ``(node2, flag2)``
1009 are the same for the other manifest.
1009 are the same for the other manifest.
1010 """
1010 """
1011
1011
1012 def setflag(path, flag):
1012 def setflag(path, flag):
1013 """Set the flag value for a given path.
1013 """Set the flag value for a given path.
1014
1014
1015 Raises ``KeyError`` if the path is not already in the manifest.
1015 Raises ``KeyError`` if the path is not already in the manifest.
1016 """
1016 """
1017
1017
1018 def get(path, default=None):
1018 def get(path, default=None):
1019 """Obtain the node value for a path or a default value if missing."""
1019 """Obtain the node value for a path or a default value if missing."""
1020
1020
1021 def flags(path):
1021 def flags(path):
1022 """Return the flags value for a path (default: empty bytestring)."""
1022 """Return the flags value for a path (default: empty bytestring)."""
1023
1023
1024 def copy():
1024 def copy():
1025 """Return a copy of this manifest."""
1025 """Return a copy of this manifest."""
1026
1026
1027 def items():
1027 def items():
1028 """Returns an iterable of (path, node) for items in this manifest."""
1028 """Returns an iterable of (path, node) for items in this manifest."""
1029
1029
1030 def iteritems():
1030 def iteritems():
1031 """Identical to items()."""
1031 """Identical to items()."""
1032
1032
1033 def iterentries():
1033 def iterentries():
1034 """Returns an iterable of (path, node, flags) for this manifest.
1034 """Returns an iterable of (path, node, flags) for this manifest.
1035
1035
1036 Similar to ``iteritems()`` except items are a 3-tuple and include
1036 Similar to ``iteritems()`` except items are a 3-tuple and include
1037 flags.
1037 flags.
1038 """
1038 """
1039
1039
1040 def text():
1040 def text():
1041 """Obtain the raw data representation for this manifest.
1041 """Obtain the raw data representation for this manifest.
1042
1042
1043 Result is used to create a manifest revision.
1043 Result is used to create a manifest revision.
1044 """
1044 """
1045
1045
1046 def fastdelta(base, changes):
1046 def fastdelta(base, changes):
1047 """Obtain a delta between this manifest and another given changes.
1047 """Obtain a delta between this manifest and another given changes.
1048
1048
1049 ``base`` in the raw data representation for another manifest.
1049 ``base`` in the raw data representation for another manifest.
1050
1050
1051 ``changes`` is an iterable of ``(path, to_delete)``.
1051 ``changes`` is an iterable of ``(path, to_delete)``.
1052
1052
1053 Returns a 2-tuple containing ``bytearray(self.text())`` and the
1053 Returns a 2-tuple containing ``bytearray(self.text())`` and the
1054 delta between ``base`` and this manifest.
1054 delta between ``base`` and this manifest.
1055
1055
1056 If this manifest implementation can't support ``fastdelta()``,
1056 If this manifest implementation can't support ``fastdelta()``,
1057 raise ``mercurial.manifest.FastdeltaUnavailable``.
1057 raise ``mercurial.manifest.FastdeltaUnavailable``.
1058 """
1058 """
1059
1059
1060
1060
1061 class imanifestrevisionbase(interfaceutil.Interface):
1061 class imanifestrevisionbase(interfaceutil.Interface):
1062 """Base interface representing a single revision of a manifest.
1062 """Base interface representing a single revision of a manifest.
1063
1063
1064 Should not be used as a primary interface: should always be inherited
1064 Should not be used as a primary interface: should always be inherited
1065 as part of a larger interface.
1065 as part of a larger interface.
1066 """
1066 """
1067
1067
1068 def copy():
1068 def copy():
1069 """Obtain a copy of this manifest instance.
1069 """Obtain a copy of this manifest instance.
1070
1070
1071 Returns an object conforming to the ``imanifestrevisionwritable``
1071 Returns an object conforming to the ``imanifestrevisionwritable``
1072 interface. The instance will be associated with the same
1072 interface. The instance will be associated with the same
1073 ``imanifestlog`` collection as this instance.
1073 ``imanifestlog`` collection as this instance.
1074 """
1074 """
1075
1075
1076 def read():
1076 def read():
1077 """Obtain the parsed manifest data structure.
1077 """Obtain the parsed manifest data structure.
1078
1078
1079 The returned object conforms to the ``imanifestdict`` interface.
1079 The returned object conforms to the ``imanifestdict`` interface.
1080 """
1080 """
1081
1081
1082
1082
1083 class imanifestrevisionstored(imanifestrevisionbase):
1083 class imanifestrevisionstored(imanifestrevisionbase):
1084 """Interface representing a manifest revision committed to storage."""
1084 """Interface representing a manifest revision committed to storage."""
1085
1085
1086 def node():
1086 def node():
1087 """The binary node for this manifest."""
1087 """The binary node for this manifest."""
1088
1088
1089 parents = interfaceutil.Attribute(
1089 parents = interfaceutil.Attribute(
1090 """List of binary nodes that are parents for this manifest revision."""
1090 """List of binary nodes that are parents for this manifest revision."""
1091 )
1091 )
1092
1092
1093 def readdelta(shallow=False):
1093 def readdelta(shallow=False):
1094 """Obtain the manifest data structure representing changes from parent.
1094 """Obtain the manifest data structure representing changes from parent.
1095
1095
1096 This manifest is compared to its 1st parent. A new manifest representing
1096 This manifest is compared to its 1st parent. A new manifest representing
1097 those differences is constructed.
1097 those differences is constructed.
1098
1098
1099 The returned object conforms to the ``imanifestdict`` interface.
1099 The returned object conforms to the ``imanifestdict`` interface.
1100 """
1100 """
1101
1101
1102 def readfast(shallow=False):
1102 def readfast(shallow=False):
1103 """Calls either ``read()`` or ``readdelta()``.
1103 """Calls either ``read()`` or ``readdelta()``.
1104
1104
1105 The faster of the two options is called.
1105 The faster of the two options is called.
1106 """
1106 """
1107
1107
1108 def find(key):
1108 def find(key):
1109 """Calls self.read().find(key)``.
1109 """Calls self.read().find(key)``.
1110
1110
1111 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1111 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1112 """
1112 """
1113
1113
1114
1114
1115 class imanifestrevisionwritable(imanifestrevisionbase):
1115 class imanifestrevisionwritable(imanifestrevisionbase):
1116 """Interface representing a manifest revision that can be committed."""
1116 """Interface representing a manifest revision that can be committed."""
1117
1117
1118 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1118 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1119 """Add this revision to storage.
1119 """Add this revision to storage.
1120
1120
1121 Takes a transaction object, the changeset revision number it will
1121 Takes a transaction object, the changeset revision number it will
1122 be associated with, its parent nodes, and lists of added and
1122 be associated with, its parent nodes, and lists of added and
1123 removed paths.
1123 removed paths.
1124
1124
1125 If match is provided, storage can choose not to inspect or write out
1125 If match is provided, storage can choose not to inspect or write out
1126 items that do not match. Storage is still required to be able to provide
1126 items that do not match. Storage is still required to be able to provide
1127 the full manifest in the future for any directories written (these
1127 the full manifest in the future for any directories written (these
1128 manifests should not be "narrowed on disk").
1128 manifests should not be "narrowed on disk").
1129
1129
1130 Returns the binary node of the created revision.
1130 Returns the binary node of the created revision.
1131 """
1131 """
1132
1132
1133
1133
1134 class imanifeststorage(interfaceutil.Interface):
1134 class imanifeststorage(interfaceutil.Interface):
1135 """Storage interface for manifest data."""
1135 """Storage interface for manifest data."""
1136
1136
1137 tree = interfaceutil.Attribute(
1137 tree = interfaceutil.Attribute(
1138 """The path to the directory this manifest tracks.
1138 """The path to the directory this manifest tracks.
1139
1139
1140 The empty bytestring represents the root manifest.
1140 The empty bytestring represents the root manifest.
1141 """
1141 """
1142 )
1142 )
1143
1143
1144 index = interfaceutil.Attribute(
1144 index = interfaceutil.Attribute(
1145 """An ``ifilerevisionssequence`` instance."""
1145 """An ``ifilerevisionssequence`` instance."""
1146 )
1146 )
1147
1147
1148 indexfile = interfaceutil.Attribute(
1148 indexfile = interfaceutil.Attribute(
1149 """Path of revlog index file.
1149 """Path of revlog index file.
1150
1150
1151 TODO this is revlog specific and should not be exposed.
1151 TODO this is revlog specific and should not be exposed.
1152 """
1152 """
1153 )
1153 )
1154
1154
1155 opener = interfaceutil.Attribute(
1155 opener = interfaceutil.Attribute(
1156 """VFS opener to use to access underlying files used for storage.
1156 """VFS opener to use to access underlying files used for storage.
1157
1157
1158 TODO this is revlog specific and should not be exposed.
1158 TODO this is revlog specific and should not be exposed.
1159 """
1159 """
1160 )
1160 )
1161
1161
1162 version = interfaceutil.Attribute(
1162 version = interfaceutil.Attribute(
1163 """Revlog version number.
1163 """Revlog version number.
1164
1164
1165 TODO this is revlog specific and should not be exposed.
1165 TODO this is revlog specific and should not be exposed.
1166 """
1166 """
1167 )
1167 )
1168
1168
1169 _generaldelta = interfaceutil.Attribute(
1169 _generaldelta = interfaceutil.Attribute(
1170 """Whether generaldelta storage is being used.
1170 """Whether generaldelta storage is being used.
1171
1171
1172 TODO this is revlog specific and should not be exposed.
1172 TODO this is revlog specific and should not be exposed.
1173 """
1173 """
1174 )
1174 )
1175
1175
1176 fulltextcache = interfaceutil.Attribute(
1176 fulltextcache = interfaceutil.Attribute(
1177 """Dict with cache of fulltexts.
1177 """Dict with cache of fulltexts.
1178
1178
1179 TODO this doesn't feel appropriate for the storage interface.
1179 TODO this doesn't feel appropriate for the storage interface.
1180 """
1180 """
1181 )
1181 )
1182
1182
1183 def __len__():
1183 def __len__():
1184 """Obtain the number of revisions stored for this manifest."""
1184 """Obtain the number of revisions stored for this manifest."""
1185
1185
1186 def __iter__():
1186 def __iter__():
1187 """Iterate over revision numbers for this manifest."""
1187 """Iterate over revision numbers for this manifest."""
1188
1188
1189 def rev(node):
1189 def rev(node):
1190 """Obtain the revision number given a binary node.
1190 """Obtain the revision number given a binary node.
1191
1191
1192 Raises ``error.LookupError`` if the node is not known.
1192 Raises ``error.LookupError`` if the node is not known.
1193 """
1193 """
1194
1194
1195 def node(rev):
1195 def node(rev):
1196 """Obtain the node value given a revision number.
1196 """Obtain the node value given a revision number.
1197
1197
1198 Raises ``error.LookupError`` if the revision is not known.
1198 Raises ``error.LookupError`` if the revision is not known.
1199 """
1199 """
1200
1200
1201 def lookup(value):
1201 def lookup(value):
1202 """Attempt to resolve a value to a node.
1202 """Attempt to resolve a value to a node.
1203
1203
1204 Value can be a binary node, hex node, revision number, or a bytes
1204 Value can be a binary node, hex node, revision number, or a bytes
1205 that can be converted to an integer.
1205 that can be converted to an integer.
1206
1206
1207 Raises ``error.LookupError`` if a ndoe could not be resolved.
1207 Raises ``error.LookupError`` if a ndoe could not be resolved.
1208 """
1208 """
1209
1209
1210 def parents(node):
1210 def parents(node):
1211 """Returns a 2-tuple of parent nodes for a node.
1211 """Returns a 2-tuple of parent nodes for a node.
1212
1212
1213 Values will be ``nullid`` if the parent is empty.
1213 Values will be ``nullid`` if the parent is empty.
1214 """
1214 """
1215
1215
1216 def parentrevs(rev):
1216 def parentrevs(rev):
1217 """Like parents() but operates on revision numbers."""
1217 """Like parents() but operates on revision numbers."""
1218
1218
1219 def linkrev(rev):
1219 def linkrev(rev):
1220 """Obtain the changeset revision number a revision is linked to."""
1220 """Obtain the changeset revision number a revision is linked to."""
1221
1221
1222 def revision(node, _df=None, raw=False):
1222 def revision(node, _df=None, raw=False):
1223 """Obtain fulltext data for a node."""
1223 """Obtain fulltext data for a node."""
1224
1224
1225 def rawdata(node, _df=None):
1225 def rawdata(node, _df=None):
1226 """Obtain raw data for a node."""
1226 """Obtain raw data for a node."""
1227
1227
1228 def revdiff(rev1, rev2):
1228 def revdiff(rev1, rev2):
1229 """Obtain a delta between two revision numbers.
1229 """Obtain a delta between two revision numbers.
1230
1230
1231 The returned data is the result of ``bdiff.bdiff()`` on the raw
1231 The returned data is the result of ``bdiff.bdiff()`` on the raw
1232 revision data.
1232 revision data.
1233 """
1233 """
1234
1234
1235 def cmp(node, fulltext):
1235 def cmp(node, fulltext):
1236 """Compare fulltext to another revision.
1236 """Compare fulltext to another revision.
1237
1237
1238 Returns True if the fulltext is different from what is stored.
1238 Returns True if the fulltext is different from what is stored.
1239 """
1239 """
1240
1240
1241 def emitrevisions(
1241 def emitrevisions(
1242 nodes,
1242 nodes,
1243 nodesorder=None,
1243 nodesorder=None,
1244 revisiondata=False,
1244 revisiondata=False,
1245 assumehaveparentrevisions=False,
1245 assumehaveparentrevisions=False,
1246 ):
1246 ):
1247 """Produce ``irevisiondelta`` describing revisions.
1247 """Produce ``irevisiondelta`` describing revisions.
1248
1248
1249 See the documentation for ``ifiledata`` for more.
1249 See the documentation for ``ifiledata`` for more.
1250 """
1250 """
1251
1251
1252 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1252 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1253 """Process a series of deltas for storage.
1253 """Process a series of deltas for storage.
1254
1254
1255 See the documentation in ``ifilemutation`` for more.
1255 See the documentation in ``ifilemutation`` for more.
1256 """
1256 """
1257
1257
1258 def rawsize(rev):
1258 def rawsize(rev):
1259 """Obtain the size of tracked data.
1259 """Obtain the size of tracked data.
1260
1260
1261 Is equivalent to ``len(m.rawdata(node))``.
1261 Is equivalent to ``len(m.rawdata(node))``.
1262
1262
1263 TODO this method is only used by upgrade code and may be removed.
1263 TODO this method is only used by upgrade code and may be removed.
1264 """
1264 """
1265
1265
1266 def getstrippoint(minlink):
1266 def getstrippoint(minlink):
1267 """Find minimum revision that must be stripped to strip a linkrev.
1267 """Find minimum revision that must be stripped to strip a linkrev.
1268
1268
1269 See the documentation in ``ifilemutation`` for more.
1269 See the documentation in ``ifilemutation`` for more.
1270 """
1270 """
1271
1271
1272 def strip(minlink, transaction):
1272 def strip(minlink, transaction):
1273 """Remove storage of items starting at a linkrev.
1273 """Remove storage of items starting at a linkrev.
1274
1274
1275 See the documentation in ``ifilemutation`` for more.
1275 See the documentation in ``ifilemutation`` for more.
1276 """
1276 """
1277
1277
1278 def checksize():
1278 def checksize():
1279 """Obtain the expected sizes of backing files.
1279 """Obtain the expected sizes of backing files.
1280
1280
1281 TODO this is used by verify and it should not be part of the interface.
1281 TODO this is used by verify and it should not be part of the interface.
1282 """
1282 """
1283
1283
1284 def files():
1284 def files():
1285 """Obtain paths that are backing storage for this manifest.
1285 """Obtain paths that are backing storage for this manifest.
1286
1286
1287 TODO this is used by verify and there should probably be a better API
1287 TODO this is used by verify and there should probably be a better API
1288 for this functionality.
1288 for this functionality.
1289 """
1289 """
1290
1290
1291 def deltaparent(rev):
1291 def deltaparent(rev):
1292 """Obtain the revision that a revision is delta'd against.
1292 """Obtain the revision that a revision is delta'd against.
1293
1293
1294 TODO delta encoding is an implementation detail of storage and should
1294 TODO delta encoding is an implementation detail of storage and should
1295 not be exposed to the storage interface.
1295 not be exposed to the storage interface.
1296 """
1296 """
1297
1297
1298 def clone(tr, dest, **kwargs):
1298 def clone(tr, dest, **kwargs):
1299 """Clone this instance to another."""
1299 """Clone this instance to another."""
1300
1300
1301 def clearcaches(clear_persisted_data=False):
1301 def clearcaches(clear_persisted_data=False):
1302 """Clear any caches associated with this instance."""
1302 """Clear any caches associated with this instance."""
1303
1303
1304 def dirlog(d):
1304 def dirlog(d):
1305 """Obtain a manifest storage instance for a tree."""
1305 """Obtain a manifest storage instance for a tree."""
1306
1306
1307 def add(
1307 def add(
1308 m, transaction, link, p1, p2, added, removed, readtree=None, match=None
1308 m, transaction, link, p1, p2, added, removed, readtree=None, match=None
1309 ):
1309 ):
1310 """Add a revision to storage.
1310 """Add a revision to storage.
1311
1311
1312 ``m`` is an object conforming to ``imanifestdict``.
1312 ``m`` is an object conforming to ``imanifestdict``.
1313
1313
1314 ``link`` is the linkrev revision number.
1314 ``link`` is the linkrev revision number.
1315
1315
1316 ``p1`` and ``p2`` are the parent revision numbers.
1316 ``p1`` and ``p2`` are the parent revision numbers.
1317
1317
1318 ``added`` and ``removed`` are iterables of added and removed paths,
1318 ``added`` and ``removed`` are iterables of added and removed paths,
1319 respectively.
1319 respectively.
1320
1320
1321 ``readtree`` is a function that can be used to read the child tree(s)
1321 ``readtree`` is a function that can be used to read the child tree(s)
1322 when recursively writing the full tree structure when using
1322 when recursively writing the full tree structure when using
1323 treemanifets.
1323 treemanifets.
1324
1324
1325 ``match`` is a matcher that can be used to hint to storage that not all
1325 ``match`` is a matcher that can be used to hint to storage that not all
1326 paths must be inspected; this is an optimization and can be safely
1326 paths must be inspected; this is an optimization and can be safely
1327 ignored. Note that the storage must still be able to reproduce a full
1327 ignored. Note that the storage must still be able to reproduce a full
1328 manifest including files that did not match.
1328 manifest including files that did not match.
1329 """
1329 """
1330
1330
1331 def storageinfo(
1331 def storageinfo(
1332 exclusivefiles=False,
1332 exclusivefiles=False,
1333 sharedfiles=False,
1333 sharedfiles=False,
1334 revisionscount=False,
1334 revisionscount=False,
1335 trackedsize=False,
1335 trackedsize=False,
1336 storedsize=False,
1336 storedsize=False,
1337 ):
1337 ):
1338 """Obtain information about storage for this manifest's data.
1338 """Obtain information about storage for this manifest's data.
1339
1339
1340 See ``ifilestorage.storageinfo()`` for a description of this method.
1340 See ``ifilestorage.storageinfo()`` for a description of this method.
1341 This one behaves the same way, except for manifest data.
1341 This one behaves the same way, except for manifest data.
1342 """
1342 """
1343
1343
1344
1344
1345 class imanifestlog(interfaceutil.Interface):
1345 class imanifestlog(interfaceutil.Interface):
1346 """Interface representing a collection of manifest snapshots.
1346 """Interface representing a collection of manifest snapshots.
1347
1347
1348 Represents the root manifest in a repository.
1348 Represents the root manifest in a repository.
1349
1349
1350 Also serves as a means to access nested tree manifests and to cache
1350 Also serves as a means to access nested tree manifests and to cache
1351 tree manifests.
1351 tree manifests.
1352 """
1352 """
1353
1353
1354 def __getitem__(node):
1354 def __getitem__(node):
1355 """Obtain a manifest instance for a given binary node.
1355 """Obtain a manifest instance for a given binary node.
1356
1356
1357 Equivalent to calling ``self.get('', node)``.
1357 Equivalent to calling ``self.get('', node)``.
1358
1358
1359 The returned object conforms to the ``imanifestrevisionstored``
1359 The returned object conforms to the ``imanifestrevisionstored``
1360 interface.
1360 interface.
1361 """
1361 """
1362
1362
1363 def get(tree, node, verify=True):
1363 def get(tree, node, verify=True):
1364 """Retrieve the manifest instance for a given directory and binary node.
1364 """Retrieve the manifest instance for a given directory and binary node.
1365
1365
1366 ``node`` always refers to the node of the root manifest (which will be
1366 ``node`` always refers to the node of the root manifest (which will be
1367 the only manifest if flat manifests are being used).
1367 the only manifest if flat manifests are being used).
1368
1368
1369 If ``tree`` is the empty string, the root manifest is returned.
1369 If ``tree`` is the empty string, the root manifest is returned.
1370 Otherwise the manifest for the specified directory will be returned
1370 Otherwise the manifest for the specified directory will be returned
1371 (requires tree manifests).
1371 (requires tree manifests).
1372
1372
1373 If ``verify`` is True, ``LookupError`` is raised if the node is not
1373 If ``verify`` is True, ``LookupError`` is raised if the node is not
1374 known.
1374 known.
1375
1375
1376 The returned object conforms to the ``imanifestrevisionstored``
1376 The returned object conforms to the ``imanifestrevisionstored``
1377 interface.
1377 interface.
1378 """
1378 """
1379
1379
1380 def getstorage(tree):
1380 def getstorage(tree):
1381 """Retrieve an interface to storage for a particular tree.
1381 """Retrieve an interface to storage for a particular tree.
1382
1382
1383 If ``tree`` is the empty bytestring, storage for the root manifest will
1383 If ``tree`` is the empty bytestring, storage for the root manifest will
1384 be returned. Otherwise storage for a tree manifest is returned.
1384 be returned. Otherwise storage for a tree manifest is returned.
1385
1385
1386 TODO formalize interface for returned object.
1386 TODO formalize interface for returned object.
1387 """
1387 """
1388
1388
1389 def clearcaches():
1389 def clearcaches():
1390 """Clear caches associated with this collection."""
1390 """Clear caches associated with this collection."""
1391
1391
1392 def rev(node):
1392 def rev(node):
1393 """Obtain the revision number for a binary node.
1393 """Obtain the revision number for a binary node.
1394
1394
1395 Raises ``error.LookupError`` if the node is not known.
1395 Raises ``error.LookupError`` if the node is not known.
1396 """
1396 """
1397
1397
1398 def update_caches(transaction):
1399 """update whatever cache are relevant for the used storage."""
1400
1398
1401
1399 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1402 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1400 """Local repository sub-interface providing access to tracked file storage.
1403 """Local repository sub-interface providing access to tracked file storage.
1401
1404
1402 This interface defines how a repository accesses storage for a single
1405 This interface defines how a repository accesses storage for a single
1403 tracked file path.
1406 tracked file path.
1404 """
1407 """
1405
1408
1406 def file(f):
1409 def file(f):
1407 """Obtain a filelog for a tracked path.
1410 """Obtain a filelog for a tracked path.
1408
1411
1409 The returned type conforms to the ``ifilestorage`` interface.
1412 The returned type conforms to the ``ifilestorage`` interface.
1410 """
1413 """
1411
1414
1412
1415
1413 class ilocalrepositorymain(interfaceutil.Interface):
1416 class ilocalrepositorymain(interfaceutil.Interface):
1414 """Main interface for local repositories.
1417 """Main interface for local repositories.
1415
1418
1416 This currently captures the reality of things - not how things should be.
1419 This currently captures the reality of things - not how things should be.
1417 """
1420 """
1418
1421
1419 supportedformats = interfaceutil.Attribute(
1422 supportedformats = interfaceutil.Attribute(
1420 """Set of requirements that apply to stream clone.
1423 """Set of requirements that apply to stream clone.
1421
1424
1422 This is actually a class attribute and is shared among all instances.
1425 This is actually a class attribute and is shared among all instances.
1423 """
1426 """
1424 )
1427 )
1425
1428
1426 supported = interfaceutil.Attribute(
1429 supported = interfaceutil.Attribute(
1427 """Set of requirements that this repo is capable of opening."""
1430 """Set of requirements that this repo is capable of opening."""
1428 )
1431 )
1429
1432
1430 requirements = interfaceutil.Attribute(
1433 requirements = interfaceutil.Attribute(
1431 """Set of requirements this repo uses."""
1434 """Set of requirements this repo uses."""
1432 )
1435 )
1433
1436
1434 features = interfaceutil.Attribute(
1437 features = interfaceutil.Attribute(
1435 """Set of "features" this repository supports.
1438 """Set of "features" this repository supports.
1436
1439
1437 A "feature" is a loosely-defined term. It can refer to a feature
1440 A "feature" is a loosely-defined term. It can refer to a feature
1438 in the classical sense or can describe an implementation detail
1441 in the classical sense or can describe an implementation detail
1439 of the repository. For example, a ``readonly`` feature may denote
1442 of the repository. For example, a ``readonly`` feature may denote
1440 the repository as read-only. Or a ``revlogfilestore`` feature may
1443 the repository as read-only. Or a ``revlogfilestore`` feature may
1441 denote that the repository is using revlogs for file storage.
1444 denote that the repository is using revlogs for file storage.
1442
1445
1443 The intent of features is to provide a machine-queryable mechanism
1446 The intent of features is to provide a machine-queryable mechanism
1444 for repo consumers to test for various repository characteristics.
1447 for repo consumers to test for various repository characteristics.
1445
1448
1446 Features are similar to ``requirements``. The main difference is that
1449 Features are similar to ``requirements``. The main difference is that
1447 requirements are stored on-disk and represent requirements to open the
1450 requirements are stored on-disk and represent requirements to open the
1448 repository. Features are more run-time capabilities of the repository
1451 repository. Features are more run-time capabilities of the repository
1449 and more granular capabilities (which may be derived from requirements).
1452 and more granular capabilities (which may be derived from requirements).
1450 """
1453 """
1451 )
1454 )
1452
1455
1453 filtername = interfaceutil.Attribute(
1456 filtername = interfaceutil.Attribute(
1454 """Name of the repoview that is active on this repo."""
1457 """Name of the repoview that is active on this repo."""
1455 )
1458 )
1456
1459
1457 wvfs = interfaceutil.Attribute(
1460 wvfs = interfaceutil.Attribute(
1458 """VFS used to access the working directory."""
1461 """VFS used to access the working directory."""
1459 )
1462 )
1460
1463
1461 vfs = interfaceutil.Attribute(
1464 vfs = interfaceutil.Attribute(
1462 """VFS rooted at the .hg directory.
1465 """VFS rooted at the .hg directory.
1463
1466
1464 Used to access repository data not in the store.
1467 Used to access repository data not in the store.
1465 """
1468 """
1466 )
1469 )
1467
1470
1468 svfs = interfaceutil.Attribute(
1471 svfs = interfaceutil.Attribute(
1469 """VFS rooted at the store.
1472 """VFS rooted at the store.
1470
1473
1471 Used to access repository data in the store. Typically .hg/store.
1474 Used to access repository data in the store. Typically .hg/store.
1472 But can point elsewhere if the store is shared.
1475 But can point elsewhere if the store is shared.
1473 """
1476 """
1474 )
1477 )
1475
1478
1476 root = interfaceutil.Attribute(
1479 root = interfaceutil.Attribute(
1477 """Path to the root of the working directory."""
1480 """Path to the root of the working directory."""
1478 )
1481 )
1479
1482
1480 path = interfaceutil.Attribute("""Path to the .hg directory.""")
1483 path = interfaceutil.Attribute("""Path to the .hg directory.""")
1481
1484
1482 origroot = interfaceutil.Attribute(
1485 origroot = interfaceutil.Attribute(
1483 """The filesystem path that was used to construct the repo."""
1486 """The filesystem path that was used to construct the repo."""
1484 )
1487 )
1485
1488
1486 auditor = interfaceutil.Attribute(
1489 auditor = interfaceutil.Attribute(
1487 """A pathauditor for the working directory.
1490 """A pathauditor for the working directory.
1488
1491
1489 This checks if a path refers to a nested repository.
1492 This checks if a path refers to a nested repository.
1490
1493
1491 Operates on the filesystem.
1494 Operates on the filesystem.
1492 """
1495 """
1493 )
1496 )
1494
1497
1495 nofsauditor = interfaceutil.Attribute(
1498 nofsauditor = interfaceutil.Attribute(
1496 """A pathauditor for the working directory.
1499 """A pathauditor for the working directory.
1497
1500
1498 This is like ``auditor`` except it doesn't do filesystem checks.
1501 This is like ``auditor`` except it doesn't do filesystem checks.
1499 """
1502 """
1500 )
1503 )
1501
1504
1502 baseui = interfaceutil.Attribute(
1505 baseui = interfaceutil.Attribute(
1503 """Original ui instance passed into constructor."""
1506 """Original ui instance passed into constructor."""
1504 )
1507 )
1505
1508
1506 ui = interfaceutil.Attribute("""Main ui instance for this instance.""")
1509 ui = interfaceutil.Attribute("""Main ui instance for this instance.""")
1507
1510
1508 sharedpath = interfaceutil.Attribute(
1511 sharedpath = interfaceutil.Attribute(
1509 """Path to the .hg directory of the repo this repo was shared from."""
1512 """Path to the .hg directory of the repo this repo was shared from."""
1510 )
1513 )
1511
1514
1512 store = interfaceutil.Attribute("""A store instance.""")
1515 store = interfaceutil.Attribute("""A store instance.""")
1513
1516
1514 spath = interfaceutil.Attribute("""Path to the store.""")
1517 spath = interfaceutil.Attribute("""Path to the store.""")
1515
1518
1516 sjoin = interfaceutil.Attribute("""Alias to self.store.join.""")
1519 sjoin = interfaceutil.Attribute("""Alias to self.store.join.""")
1517
1520
1518 cachevfs = interfaceutil.Attribute(
1521 cachevfs = interfaceutil.Attribute(
1519 """A VFS used to access the cache directory.
1522 """A VFS used to access the cache directory.
1520
1523
1521 Typically .hg/cache.
1524 Typically .hg/cache.
1522 """
1525 """
1523 )
1526 )
1524
1527
1525 wcachevfs = interfaceutil.Attribute(
1528 wcachevfs = interfaceutil.Attribute(
1526 """A VFS used to access the cache directory dedicated to working copy
1529 """A VFS used to access the cache directory dedicated to working copy
1527
1530
1528 Typically .hg/wcache.
1531 Typically .hg/wcache.
1529 """
1532 """
1530 )
1533 )
1531
1534
1532 filteredrevcache = interfaceutil.Attribute(
1535 filteredrevcache = interfaceutil.Attribute(
1533 """Holds sets of revisions to be filtered."""
1536 """Holds sets of revisions to be filtered."""
1534 )
1537 )
1535
1538
1536 names = interfaceutil.Attribute("""A ``namespaces`` instance.""")
1539 names = interfaceutil.Attribute("""A ``namespaces`` instance.""")
1537
1540
1538 filecopiesmode = interfaceutil.Attribute(
1541 filecopiesmode = interfaceutil.Attribute(
1539 """The way files copies should be dealt with in this repo."""
1542 """The way files copies should be dealt with in this repo."""
1540 )
1543 )
1541
1544
1542 def close():
1545 def close():
1543 """Close the handle on this repository."""
1546 """Close the handle on this repository."""
1544
1547
1545 def peer():
1548 def peer():
1546 """Obtain an object conforming to the ``peer`` interface."""
1549 """Obtain an object conforming to the ``peer`` interface."""
1547
1550
1548 def unfiltered():
1551 def unfiltered():
1549 """Obtain an unfiltered/raw view of this repo."""
1552 """Obtain an unfiltered/raw view of this repo."""
1550
1553
1551 def filtered(name, visibilityexceptions=None):
1554 def filtered(name, visibilityexceptions=None):
1552 """Obtain a named view of this repository."""
1555 """Obtain a named view of this repository."""
1553
1556
1554 obsstore = interfaceutil.Attribute("""A store of obsolescence data.""")
1557 obsstore = interfaceutil.Attribute("""A store of obsolescence data.""")
1555
1558
1556 changelog = interfaceutil.Attribute("""A handle on the changelog revlog.""")
1559 changelog = interfaceutil.Attribute("""A handle on the changelog revlog.""")
1557
1560
1558 manifestlog = interfaceutil.Attribute(
1561 manifestlog = interfaceutil.Attribute(
1559 """An instance conforming to the ``imanifestlog`` interface.
1562 """An instance conforming to the ``imanifestlog`` interface.
1560
1563
1561 Provides access to manifests for the repository.
1564 Provides access to manifests for the repository.
1562 """
1565 """
1563 )
1566 )
1564
1567
1565 dirstate = interfaceutil.Attribute("""Working directory state.""")
1568 dirstate = interfaceutil.Attribute("""Working directory state.""")
1566
1569
1567 narrowpats = interfaceutil.Attribute(
1570 narrowpats = interfaceutil.Attribute(
1568 """Matcher patterns for this repository's narrowspec."""
1571 """Matcher patterns for this repository's narrowspec."""
1569 )
1572 )
1570
1573
1571 def narrowmatch(match=None, includeexact=False):
1574 def narrowmatch(match=None, includeexact=False):
1572 """Obtain a matcher for the narrowspec."""
1575 """Obtain a matcher for the narrowspec."""
1573
1576
1574 def setnarrowpats(newincludes, newexcludes):
1577 def setnarrowpats(newincludes, newexcludes):
1575 """Define the narrowspec for this repository."""
1578 """Define the narrowspec for this repository."""
1576
1579
1577 def __getitem__(changeid):
1580 def __getitem__(changeid):
1578 """Try to resolve a changectx."""
1581 """Try to resolve a changectx."""
1579
1582
1580 def __contains__(changeid):
1583 def __contains__(changeid):
1581 """Whether a changeset exists."""
1584 """Whether a changeset exists."""
1582
1585
1583 def __nonzero__():
1586 def __nonzero__():
1584 """Always returns True."""
1587 """Always returns True."""
1585 return True
1588 return True
1586
1589
1587 __bool__ = __nonzero__
1590 __bool__ = __nonzero__
1588
1591
1589 def __len__():
1592 def __len__():
1590 """Returns the number of changesets in the repo."""
1593 """Returns the number of changesets in the repo."""
1591
1594
1592 def __iter__():
1595 def __iter__():
1593 """Iterate over revisions in the changelog."""
1596 """Iterate over revisions in the changelog."""
1594
1597
1595 def revs(expr, *args):
1598 def revs(expr, *args):
1596 """Evaluate a revset.
1599 """Evaluate a revset.
1597
1600
1598 Emits revisions.
1601 Emits revisions.
1599 """
1602 """
1600
1603
1601 def set(expr, *args):
1604 def set(expr, *args):
1602 """Evaluate a revset.
1605 """Evaluate a revset.
1603
1606
1604 Emits changectx instances.
1607 Emits changectx instances.
1605 """
1608 """
1606
1609
1607 def anyrevs(specs, user=False, localalias=None):
1610 def anyrevs(specs, user=False, localalias=None):
1608 """Find revisions matching one of the given revsets."""
1611 """Find revisions matching one of the given revsets."""
1609
1612
1610 def url():
1613 def url():
1611 """Returns a string representing the location of this repo."""
1614 """Returns a string representing the location of this repo."""
1612
1615
1613 def hook(name, throw=False, **args):
1616 def hook(name, throw=False, **args):
1614 """Call a hook."""
1617 """Call a hook."""
1615
1618
1616 def tags():
1619 def tags():
1617 """Return a mapping of tag to node."""
1620 """Return a mapping of tag to node."""
1618
1621
1619 def tagtype(tagname):
1622 def tagtype(tagname):
1620 """Return the type of a given tag."""
1623 """Return the type of a given tag."""
1621
1624
1622 def tagslist():
1625 def tagslist():
1623 """Return a list of tags ordered by revision."""
1626 """Return a list of tags ordered by revision."""
1624
1627
1625 def nodetags(node):
1628 def nodetags(node):
1626 """Return the tags associated with a node."""
1629 """Return the tags associated with a node."""
1627
1630
1628 def nodebookmarks(node):
1631 def nodebookmarks(node):
1629 """Return the list of bookmarks pointing to the specified node."""
1632 """Return the list of bookmarks pointing to the specified node."""
1630
1633
1631 def branchmap():
1634 def branchmap():
1632 """Return a mapping of branch to heads in that branch."""
1635 """Return a mapping of branch to heads in that branch."""
1633
1636
1634 def revbranchcache():
1637 def revbranchcache():
1635 pass
1638 pass
1636
1639
1637 def branchtip(branchtip, ignoremissing=False):
1640 def branchtip(branchtip, ignoremissing=False):
1638 """Return the tip node for a given branch."""
1641 """Return the tip node for a given branch."""
1639
1642
1640 def lookup(key):
1643 def lookup(key):
1641 """Resolve the node for a revision."""
1644 """Resolve the node for a revision."""
1642
1645
1643 def lookupbranch(key):
1646 def lookupbranch(key):
1644 """Look up the branch name of the given revision or branch name."""
1647 """Look up the branch name of the given revision or branch name."""
1645
1648
1646 def known(nodes):
1649 def known(nodes):
1647 """Determine whether a series of nodes is known.
1650 """Determine whether a series of nodes is known.
1648
1651
1649 Returns a list of bools.
1652 Returns a list of bools.
1650 """
1653 """
1651
1654
1652 def local():
1655 def local():
1653 """Whether the repository is local."""
1656 """Whether the repository is local."""
1654 return True
1657 return True
1655
1658
1656 def publishing():
1659 def publishing():
1657 """Whether the repository is a publishing repository."""
1660 """Whether the repository is a publishing repository."""
1658
1661
1659 def cancopy():
1662 def cancopy():
1660 pass
1663 pass
1661
1664
1662 def shared():
1665 def shared():
1663 """The type of shared repository or None."""
1666 """The type of shared repository or None."""
1664
1667
1665 def wjoin(f, *insidef):
1668 def wjoin(f, *insidef):
1666 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1669 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1667
1670
1668 def setparents(p1, p2):
1671 def setparents(p1, p2):
1669 """Set the parent nodes of the working directory."""
1672 """Set the parent nodes of the working directory."""
1670
1673
1671 def filectx(path, changeid=None, fileid=None):
1674 def filectx(path, changeid=None, fileid=None):
1672 """Obtain a filectx for the given file revision."""
1675 """Obtain a filectx for the given file revision."""
1673
1676
1674 def getcwd():
1677 def getcwd():
1675 """Obtain the current working directory from the dirstate."""
1678 """Obtain the current working directory from the dirstate."""
1676
1679
1677 def pathto(f, cwd=None):
1680 def pathto(f, cwd=None):
1678 """Obtain the relative path to a file."""
1681 """Obtain the relative path to a file."""
1679
1682
1680 def adddatafilter(name, fltr):
1683 def adddatafilter(name, fltr):
1681 pass
1684 pass
1682
1685
1683 def wread(filename):
1686 def wread(filename):
1684 """Read a file from wvfs, using data filters."""
1687 """Read a file from wvfs, using data filters."""
1685
1688
1686 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1689 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1687 """Write data to a file in the wvfs, using data filters."""
1690 """Write data to a file in the wvfs, using data filters."""
1688
1691
1689 def wwritedata(filename, data):
1692 def wwritedata(filename, data):
1690 """Resolve data for writing to the wvfs, using data filters."""
1693 """Resolve data for writing to the wvfs, using data filters."""
1691
1694
1692 def currenttransaction():
1695 def currenttransaction():
1693 """Obtain the current transaction instance or None."""
1696 """Obtain the current transaction instance or None."""
1694
1697
1695 def transaction(desc, report=None):
1698 def transaction(desc, report=None):
1696 """Open a new transaction to write to the repository."""
1699 """Open a new transaction to write to the repository."""
1697
1700
1698 def undofiles():
1701 def undofiles():
1699 """Returns a list of (vfs, path) for files to undo transactions."""
1702 """Returns a list of (vfs, path) for files to undo transactions."""
1700
1703
1701 def recover():
1704 def recover():
1702 """Roll back an interrupted transaction."""
1705 """Roll back an interrupted transaction."""
1703
1706
1704 def rollback(dryrun=False, force=False):
1707 def rollback(dryrun=False, force=False):
1705 """Undo the last transaction.
1708 """Undo the last transaction.
1706
1709
1707 DANGEROUS.
1710 DANGEROUS.
1708 """
1711 """
1709
1712
1710 def updatecaches(tr=None, full=False):
1713 def updatecaches(tr=None, full=False):
1711 """Warm repo caches."""
1714 """Warm repo caches."""
1712
1715
1713 def invalidatecaches():
1716 def invalidatecaches():
1714 """Invalidate cached data due to the repository mutating."""
1717 """Invalidate cached data due to the repository mutating."""
1715
1718
1716 def invalidatevolatilesets():
1719 def invalidatevolatilesets():
1717 pass
1720 pass
1718
1721
1719 def invalidatedirstate():
1722 def invalidatedirstate():
1720 """Invalidate the dirstate."""
1723 """Invalidate the dirstate."""
1721
1724
1722 def invalidate(clearfilecache=False):
1725 def invalidate(clearfilecache=False):
1723 pass
1726 pass
1724
1727
1725 def invalidateall():
1728 def invalidateall():
1726 pass
1729 pass
1727
1730
1728 def lock(wait=True):
1731 def lock(wait=True):
1729 """Lock the repository store and return a lock instance."""
1732 """Lock the repository store and return a lock instance."""
1730
1733
1731 def wlock(wait=True):
1734 def wlock(wait=True):
1732 """Lock the non-store parts of the repository."""
1735 """Lock the non-store parts of the repository."""
1733
1736
1734 def currentwlock():
1737 def currentwlock():
1735 """Return the wlock if it's held or None."""
1738 """Return the wlock if it's held or None."""
1736
1739
1737 def checkcommitpatterns(wctx, match, status, fail):
1740 def checkcommitpatterns(wctx, match, status, fail):
1738 pass
1741 pass
1739
1742
1740 def commit(
1743 def commit(
1741 text=b'',
1744 text=b'',
1742 user=None,
1745 user=None,
1743 date=None,
1746 date=None,
1744 match=None,
1747 match=None,
1745 force=False,
1748 force=False,
1746 editor=False,
1749 editor=False,
1747 extra=None,
1750 extra=None,
1748 ):
1751 ):
1749 """Add a new revision to the repository."""
1752 """Add a new revision to the repository."""
1750
1753
1751 def commitctx(ctx, error=False, origctx=None):
1754 def commitctx(ctx, error=False, origctx=None):
1752 """Commit a commitctx instance to the repository."""
1755 """Commit a commitctx instance to the repository."""
1753
1756
1754 def destroying():
1757 def destroying():
1755 """Inform the repository that nodes are about to be destroyed."""
1758 """Inform the repository that nodes are about to be destroyed."""
1756
1759
1757 def destroyed():
1760 def destroyed():
1758 """Inform the repository that nodes have been destroyed."""
1761 """Inform the repository that nodes have been destroyed."""
1759
1762
1760 def status(
1763 def status(
1761 node1=b'.',
1764 node1=b'.',
1762 node2=None,
1765 node2=None,
1763 match=None,
1766 match=None,
1764 ignored=False,
1767 ignored=False,
1765 clean=False,
1768 clean=False,
1766 unknown=False,
1769 unknown=False,
1767 listsubrepos=False,
1770 listsubrepos=False,
1768 ):
1771 ):
1769 """Convenience method to call repo[x].status()."""
1772 """Convenience method to call repo[x].status()."""
1770
1773
1771 def addpostdsstatus(ps):
1774 def addpostdsstatus(ps):
1772 pass
1775 pass
1773
1776
1774 def postdsstatus():
1777 def postdsstatus():
1775 pass
1778 pass
1776
1779
1777 def clearpostdsstatus():
1780 def clearpostdsstatus():
1778 pass
1781 pass
1779
1782
1780 def heads(start=None):
1783 def heads(start=None):
1781 """Obtain list of nodes that are DAG heads."""
1784 """Obtain list of nodes that are DAG heads."""
1782
1785
1783 def branchheads(branch=None, start=None, closed=False):
1786 def branchheads(branch=None, start=None, closed=False):
1784 pass
1787 pass
1785
1788
1786 def branches(nodes):
1789 def branches(nodes):
1787 pass
1790 pass
1788
1791
1789 def between(pairs):
1792 def between(pairs):
1790 pass
1793 pass
1791
1794
1792 def checkpush(pushop):
1795 def checkpush(pushop):
1793 pass
1796 pass
1794
1797
1795 prepushoutgoinghooks = interfaceutil.Attribute("""util.hooks instance.""")
1798 prepushoutgoinghooks = interfaceutil.Attribute("""util.hooks instance.""")
1796
1799
1797 def pushkey(namespace, key, old, new):
1800 def pushkey(namespace, key, old, new):
1798 pass
1801 pass
1799
1802
1800 def listkeys(namespace):
1803 def listkeys(namespace):
1801 pass
1804 pass
1802
1805
1803 def debugwireargs(one, two, three=None, four=None, five=None):
1806 def debugwireargs(one, two, three=None, four=None, five=None):
1804 pass
1807 pass
1805
1808
1806 def savecommitmessage(text):
1809 def savecommitmessage(text):
1807 pass
1810 pass
1808
1811
1809
1812
1810 class completelocalrepository(
1813 class completelocalrepository(
1811 ilocalrepositorymain, ilocalrepositoryfilestorage
1814 ilocalrepositorymain, ilocalrepositoryfilestorage
1812 ):
1815 ):
1813 """Complete interface for a local repository."""
1816 """Complete interface for a local repository."""
1814
1817
1815
1818
1816 class iwireprotocolcommandcacher(interfaceutil.Interface):
1819 class iwireprotocolcommandcacher(interfaceutil.Interface):
1817 """Represents a caching backend for wire protocol commands.
1820 """Represents a caching backend for wire protocol commands.
1818
1821
1819 Wire protocol version 2 supports transparent caching of many commands.
1822 Wire protocol version 2 supports transparent caching of many commands.
1820 To leverage this caching, servers can activate objects that cache
1823 To leverage this caching, servers can activate objects that cache
1821 command responses. Objects handle both cache writing and reading.
1824 command responses. Objects handle both cache writing and reading.
1822 This interface defines how that response caching mechanism works.
1825 This interface defines how that response caching mechanism works.
1823
1826
1824 Wire protocol version 2 commands emit a series of objects that are
1827 Wire protocol version 2 commands emit a series of objects that are
1825 serialized and sent to the client. The caching layer exists between
1828 serialized and sent to the client. The caching layer exists between
1826 the invocation of the command function and the sending of its output
1829 the invocation of the command function and the sending of its output
1827 objects to an output layer.
1830 objects to an output layer.
1828
1831
1829 Instances of this interface represent a binding to a cache that
1832 Instances of this interface represent a binding to a cache that
1830 can serve a response (in place of calling a command function) and/or
1833 can serve a response (in place of calling a command function) and/or
1831 write responses to a cache for subsequent use.
1834 write responses to a cache for subsequent use.
1832
1835
1833 When a command request arrives, the following happens with regards
1836 When a command request arrives, the following happens with regards
1834 to this interface:
1837 to this interface:
1835
1838
1836 1. The server determines whether the command request is cacheable.
1839 1. The server determines whether the command request is cacheable.
1837 2. If it is, an instance of this interface is spawned.
1840 2. If it is, an instance of this interface is spawned.
1838 3. The cacher is activated in a context manager (``__enter__`` is called).
1841 3. The cacher is activated in a context manager (``__enter__`` is called).
1839 4. A cache *key* for that request is derived. This will call the
1842 4. A cache *key* for that request is derived. This will call the
1840 instance's ``adjustcachekeystate()`` method so the derivation
1843 instance's ``adjustcachekeystate()`` method so the derivation
1841 can be influenced.
1844 can be influenced.
1842 5. The cacher is informed of the derived cache key via a call to
1845 5. The cacher is informed of the derived cache key via a call to
1843 ``setcachekey()``.
1846 ``setcachekey()``.
1844 6. The cacher's ``lookup()`` method is called to test for presence of
1847 6. The cacher's ``lookup()`` method is called to test for presence of
1845 the derived key in the cache.
1848 the derived key in the cache.
1846 7. If ``lookup()`` returns a hit, that cached result is used in place
1849 7. If ``lookup()`` returns a hit, that cached result is used in place
1847 of invoking the command function. ``__exit__`` is called and the instance
1850 of invoking the command function. ``__exit__`` is called and the instance
1848 is discarded.
1851 is discarded.
1849 8. The command function is invoked.
1852 8. The command function is invoked.
1850 9. ``onobject()`` is called for each object emitted by the command
1853 9. ``onobject()`` is called for each object emitted by the command
1851 function.
1854 function.
1852 10. After the final object is seen, ``onfinished()`` is called.
1855 10. After the final object is seen, ``onfinished()`` is called.
1853 11. ``__exit__`` is called to signal the end of use of the instance.
1856 11. ``__exit__`` is called to signal the end of use of the instance.
1854
1857
1855 Cache *key* derivation can be influenced by the instance.
1858 Cache *key* derivation can be influenced by the instance.
1856
1859
1857 Cache keys are initially derived by a deterministic representation of
1860 Cache keys are initially derived by a deterministic representation of
1858 the command request. This includes the command name, arguments, protocol
1861 the command request. This includes the command name, arguments, protocol
1859 version, etc. This initial key derivation is performed by CBOR-encoding a
1862 version, etc. This initial key derivation is performed by CBOR-encoding a
1860 data structure and feeding that output into a hasher.
1863 data structure and feeding that output into a hasher.
1861
1864
1862 Instances of this interface can influence this initial key derivation
1865 Instances of this interface can influence this initial key derivation
1863 via ``adjustcachekeystate()``.
1866 via ``adjustcachekeystate()``.
1864
1867
1865 The instance is informed of the derived cache key via a call to
1868 The instance is informed of the derived cache key via a call to
1866 ``setcachekey()``. The instance must store the key locally so it can
1869 ``setcachekey()``. The instance must store the key locally so it can
1867 be consulted on subsequent operations that may require it.
1870 be consulted on subsequent operations that may require it.
1868
1871
1869 When constructed, the instance has access to a callable that can be used
1872 When constructed, the instance has access to a callable that can be used
1870 for encoding response objects. This callable receives as its single
1873 for encoding response objects. This callable receives as its single
1871 argument an object emitted by a command function. It returns an iterable
1874 argument an object emitted by a command function. It returns an iterable
1872 of bytes chunks representing the encoded object. Unless the cacher is
1875 of bytes chunks representing the encoded object. Unless the cacher is
1873 caching native Python objects in memory or has a way of reconstructing
1876 caching native Python objects in memory or has a way of reconstructing
1874 the original Python objects, implementations typically call this function
1877 the original Python objects, implementations typically call this function
1875 to produce bytes from the output objects and then store those bytes in
1878 to produce bytes from the output objects and then store those bytes in
1876 the cache. When it comes time to re-emit those bytes, they are wrapped
1879 the cache. When it comes time to re-emit those bytes, they are wrapped
1877 in a ``wireprototypes.encodedresponse`` instance to tell the output
1880 in a ``wireprototypes.encodedresponse`` instance to tell the output
1878 layer that they are pre-encoded.
1881 layer that they are pre-encoded.
1879
1882
1880 When receiving the objects emitted by the command function, instances
1883 When receiving the objects emitted by the command function, instances
1881 can choose what to do with those objects. The simplest thing to do is
1884 can choose what to do with those objects. The simplest thing to do is
1882 re-emit the original objects. They will be forwarded to the output
1885 re-emit the original objects. They will be forwarded to the output
1883 layer and will be processed as if the cacher did not exist.
1886 layer and will be processed as if the cacher did not exist.
1884
1887
1885 Implementations could also choose to not emit objects - instead locally
1888 Implementations could also choose to not emit objects - instead locally
1886 buffering objects or their encoded representation. They could then emit
1889 buffering objects or their encoded representation. They could then emit
1887 a single "coalesced" object when ``onfinished()`` is called. In
1890 a single "coalesced" object when ``onfinished()`` is called. In
1888 this way, the implementation would function as a filtering layer of
1891 this way, the implementation would function as a filtering layer of
1889 sorts.
1892 sorts.
1890
1893
1891 When caching objects, typically the encoded form of the object will
1894 When caching objects, typically the encoded form of the object will
1892 be stored. Keep in mind that if the original object is forwarded to
1895 be stored. Keep in mind that if the original object is forwarded to
1893 the output layer, it will need to be encoded there as well. For large
1896 the output layer, it will need to be encoded there as well. For large
1894 output, this redundant encoding could add overhead. Implementations
1897 output, this redundant encoding could add overhead. Implementations
1895 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1898 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1896 instances to avoid this overhead.
1899 instances to avoid this overhead.
1897 """
1900 """
1898
1901
1899 def __enter__():
1902 def __enter__():
1900 """Marks the instance as active.
1903 """Marks the instance as active.
1901
1904
1902 Should return self.
1905 Should return self.
1903 """
1906 """
1904
1907
1905 def __exit__(exctype, excvalue, exctb):
1908 def __exit__(exctype, excvalue, exctb):
1906 """Called when cacher is no longer used.
1909 """Called when cacher is no longer used.
1907
1910
1908 This can be used by implementations to perform cleanup actions (e.g.
1911 This can be used by implementations to perform cleanup actions (e.g.
1909 disconnecting network sockets, aborting a partially cached response.
1912 disconnecting network sockets, aborting a partially cached response.
1910 """
1913 """
1911
1914
1912 def adjustcachekeystate(state):
1915 def adjustcachekeystate(state):
1913 """Influences cache key derivation by adjusting state to derive key.
1916 """Influences cache key derivation by adjusting state to derive key.
1914
1917
1915 A dict defining the state used to derive the cache key is passed.
1918 A dict defining the state used to derive the cache key is passed.
1916
1919
1917 Implementations can modify this dict to record additional state that
1920 Implementations can modify this dict to record additional state that
1918 is wanted to influence key derivation.
1921 is wanted to influence key derivation.
1919
1922
1920 Implementations are *highly* encouraged to not modify or delete
1923 Implementations are *highly* encouraged to not modify or delete
1921 existing keys.
1924 existing keys.
1922 """
1925 """
1923
1926
1924 def setcachekey(key):
1927 def setcachekey(key):
1925 """Record the derived cache key for this request.
1928 """Record the derived cache key for this request.
1926
1929
1927 Instances may mutate the key for internal usage, as desired. e.g.
1930 Instances may mutate the key for internal usage, as desired. e.g.
1928 instances may wish to prepend the repo name, introduce path
1931 instances may wish to prepend the repo name, introduce path
1929 components for filesystem or URL addressing, etc. Behavior is up to
1932 components for filesystem or URL addressing, etc. Behavior is up to
1930 the cache.
1933 the cache.
1931
1934
1932 Returns a bool indicating if the request is cacheable by this
1935 Returns a bool indicating if the request is cacheable by this
1933 instance.
1936 instance.
1934 """
1937 """
1935
1938
1936 def lookup():
1939 def lookup():
1937 """Attempt to resolve an entry in the cache.
1940 """Attempt to resolve an entry in the cache.
1938
1941
1939 The instance is instructed to look for the cache key that it was
1942 The instance is instructed to look for the cache key that it was
1940 informed about via the call to ``setcachekey()``.
1943 informed about via the call to ``setcachekey()``.
1941
1944
1942 If there's no cache hit or the cacher doesn't wish to use the cached
1945 If there's no cache hit or the cacher doesn't wish to use the cached
1943 entry, ``None`` should be returned.
1946 entry, ``None`` should be returned.
1944
1947
1945 Else, a dict defining the cached result should be returned. The
1948 Else, a dict defining the cached result should be returned. The
1946 dict may have the following keys:
1949 dict may have the following keys:
1947
1950
1948 objs
1951 objs
1949 An iterable of objects that should be sent to the client. That
1952 An iterable of objects that should be sent to the client. That
1950 iterable of objects is expected to be what the command function
1953 iterable of objects is expected to be what the command function
1951 would return if invoked or an equivalent representation thereof.
1954 would return if invoked or an equivalent representation thereof.
1952 """
1955 """
1953
1956
1954 def onobject(obj):
1957 def onobject(obj):
1955 """Called when a new object is emitted from the command function.
1958 """Called when a new object is emitted from the command function.
1956
1959
1957 Receives as its argument the object that was emitted from the
1960 Receives as its argument the object that was emitted from the
1958 command function.
1961 command function.
1959
1962
1960 This method returns an iterator of objects to forward to the output
1963 This method returns an iterator of objects to forward to the output
1961 layer. The easiest implementation is a generator that just
1964 layer. The easiest implementation is a generator that just
1962 ``yield obj``.
1965 ``yield obj``.
1963 """
1966 """
1964
1967
1965 def onfinished():
1968 def onfinished():
1966 """Called after all objects have been emitted from the command function.
1969 """Called after all objects have been emitted from the command function.
1967
1970
1968 Implementations should return an iterator of objects to forward to
1971 Implementations should return an iterator of objects to forward to
1969 the output layer.
1972 the output layer.
1970
1973
1971 This method can be a generator.
1974 This method can be a generator.
1972 """
1975 """
@@ -1,3818 +1,3819
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import random
12 import random
13 import sys
13 import sys
14 import time
14 import time
15 import weakref
15 import weakref
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 bin,
19 bin,
20 hex,
20 hex,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 )
24 )
25 from .pycompat import (
25 from .pycompat import (
26 delattr,
26 delattr,
27 getattr,
27 getattr,
28 )
28 )
29 from . import (
29 from . import (
30 bookmarks,
30 bookmarks,
31 branchmap,
31 branchmap,
32 bundle2,
32 bundle2,
33 changegroup,
33 changegroup,
34 color,
34 color,
35 context,
35 context,
36 dirstate,
36 dirstate,
37 dirstateguard,
37 dirstateguard,
38 discovery,
38 discovery,
39 encoding,
39 encoding,
40 error,
40 error,
41 exchange,
41 exchange,
42 extensions,
42 extensions,
43 filelog,
43 filelog,
44 hook,
44 hook,
45 lock as lockmod,
45 lock as lockmod,
46 match as matchmod,
46 match as matchmod,
47 merge as mergemod,
47 merge as mergemod,
48 mergeutil,
48 mergeutil,
49 namespaces,
49 namespaces,
50 narrowspec,
50 narrowspec,
51 obsolete,
51 obsolete,
52 pathutil,
52 pathutil,
53 phases,
53 phases,
54 pushkey,
54 pushkey,
55 pycompat,
55 pycompat,
56 rcutil,
56 rcutil,
57 repoview,
57 repoview,
58 revset,
58 revset,
59 revsetlang,
59 revsetlang,
60 scmutil,
60 scmutil,
61 sparse,
61 sparse,
62 store as storemod,
62 store as storemod,
63 subrepoutil,
63 subrepoutil,
64 tags as tagsmod,
64 tags as tagsmod,
65 transaction,
65 transaction,
66 txnutil,
66 txnutil,
67 util,
67 util,
68 vfs as vfsmod,
68 vfs as vfsmod,
69 )
69 )
70
70
71 from .interfaces import (
71 from .interfaces import (
72 repository,
72 repository,
73 util as interfaceutil,
73 util as interfaceutil,
74 )
74 )
75
75
76 from .utils import (
76 from .utils import (
77 hashutil,
77 hashutil,
78 procutil,
78 procutil,
79 stringutil,
79 stringutil,
80 )
80 )
81
81
82 from .revlogutils import constants as revlogconst
82 from .revlogutils import constants as revlogconst
83
83
84 release = lockmod.release
84 release = lockmod.release
85 urlerr = util.urlerr
85 urlerr = util.urlerr
86 urlreq = util.urlreq
86 urlreq = util.urlreq
87
87
88 # set of (path, vfs-location) tuples. vfs-location is:
88 # set of (path, vfs-location) tuples. vfs-location is:
89 # - 'plain for vfs relative paths
89 # - 'plain for vfs relative paths
90 # - '' for svfs relative paths
90 # - '' for svfs relative paths
91 _cachedfiles = set()
91 _cachedfiles = set()
92
92
93
93
94 class _basefilecache(scmutil.filecache):
94 class _basefilecache(scmutil.filecache):
95 """All filecache usage on repo are done for logic that should be unfiltered
95 """All filecache usage on repo are done for logic that should be unfiltered
96 """
96 """
97
97
98 def __get__(self, repo, type=None):
98 def __get__(self, repo, type=None):
99 if repo is None:
99 if repo is None:
100 return self
100 return self
101 # proxy to unfiltered __dict__ since filtered repo has no entry
101 # proxy to unfiltered __dict__ since filtered repo has no entry
102 unfi = repo.unfiltered()
102 unfi = repo.unfiltered()
103 try:
103 try:
104 return unfi.__dict__[self.sname]
104 return unfi.__dict__[self.sname]
105 except KeyError:
105 except KeyError:
106 pass
106 pass
107 return super(_basefilecache, self).__get__(unfi, type)
107 return super(_basefilecache, self).__get__(unfi, type)
108
108
109 def set(self, repo, value):
109 def set(self, repo, value):
110 return super(_basefilecache, self).set(repo.unfiltered(), value)
110 return super(_basefilecache, self).set(repo.unfiltered(), value)
111
111
112
112
113 class repofilecache(_basefilecache):
113 class repofilecache(_basefilecache):
114 """filecache for files in .hg but outside of .hg/store"""
114 """filecache for files in .hg but outside of .hg/store"""
115
115
116 def __init__(self, *paths):
116 def __init__(self, *paths):
117 super(repofilecache, self).__init__(*paths)
117 super(repofilecache, self).__init__(*paths)
118 for path in paths:
118 for path in paths:
119 _cachedfiles.add((path, b'plain'))
119 _cachedfiles.add((path, b'plain'))
120
120
121 def join(self, obj, fname):
121 def join(self, obj, fname):
122 return obj.vfs.join(fname)
122 return obj.vfs.join(fname)
123
123
124
124
125 class storecache(_basefilecache):
125 class storecache(_basefilecache):
126 """filecache for files in the store"""
126 """filecache for files in the store"""
127
127
128 def __init__(self, *paths):
128 def __init__(self, *paths):
129 super(storecache, self).__init__(*paths)
129 super(storecache, self).__init__(*paths)
130 for path in paths:
130 for path in paths:
131 _cachedfiles.add((path, b''))
131 _cachedfiles.add((path, b''))
132
132
133 def join(self, obj, fname):
133 def join(self, obj, fname):
134 return obj.sjoin(fname)
134 return obj.sjoin(fname)
135
135
136
136
137 class mixedrepostorecache(_basefilecache):
137 class mixedrepostorecache(_basefilecache):
138 """filecache for a mix files in .hg/store and outside"""
138 """filecache for a mix files in .hg/store and outside"""
139
139
140 def __init__(self, *pathsandlocations):
140 def __init__(self, *pathsandlocations):
141 # scmutil.filecache only uses the path for passing back into our
141 # scmutil.filecache only uses the path for passing back into our
142 # join(), so we can safely pass a list of paths and locations
142 # join(), so we can safely pass a list of paths and locations
143 super(mixedrepostorecache, self).__init__(*pathsandlocations)
143 super(mixedrepostorecache, self).__init__(*pathsandlocations)
144 _cachedfiles.update(pathsandlocations)
144 _cachedfiles.update(pathsandlocations)
145
145
146 def join(self, obj, fnameandlocation):
146 def join(self, obj, fnameandlocation):
147 fname, location = fnameandlocation
147 fname, location = fnameandlocation
148 if location == b'plain':
148 if location == b'plain':
149 return obj.vfs.join(fname)
149 return obj.vfs.join(fname)
150 else:
150 else:
151 if location != b'':
151 if location != b'':
152 raise error.ProgrammingError(
152 raise error.ProgrammingError(
153 b'unexpected location: %s' % location
153 b'unexpected location: %s' % location
154 )
154 )
155 return obj.sjoin(fname)
155 return obj.sjoin(fname)
156
156
157
157
158 def isfilecached(repo, name):
158 def isfilecached(repo, name):
159 """check if a repo has already cached "name" filecache-ed property
159 """check if a repo has already cached "name" filecache-ed property
160
160
161 This returns (cachedobj-or-None, iscached) tuple.
161 This returns (cachedobj-or-None, iscached) tuple.
162 """
162 """
163 cacheentry = repo.unfiltered()._filecache.get(name, None)
163 cacheentry = repo.unfiltered()._filecache.get(name, None)
164 if not cacheentry:
164 if not cacheentry:
165 return None, False
165 return None, False
166 return cacheentry.obj, True
166 return cacheentry.obj, True
167
167
168
168
169 class unfilteredpropertycache(util.propertycache):
169 class unfilteredpropertycache(util.propertycache):
170 """propertycache that apply to unfiltered repo only"""
170 """propertycache that apply to unfiltered repo only"""
171
171
172 def __get__(self, repo, type=None):
172 def __get__(self, repo, type=None):
173 unfi = repo.unfiltered()
173 unfi = repo.unfiltered()
174 if unfi is repo:
174 if unfi is repo:
175 return super(unfilteredpropertycache, self).__get__(unfi)
175 return super(unfilteredpropertycache, self).__get__(unfi)
176 return getattr(unfi, self.name)
176 return getattr(unfi, self.name)
177
177
178
178
179 class filteredpropertycache(util.propertycache):
179 class filteredpropertycache(util.propertycache):
180 """propertycache that must take filtering in account"""
180 """propertycache that must take filtering in account"""
181
181
182 def cachevalue(self, obj, value):
182 def cachevalue(self, obj, value):
183 object.__setattr__(obj, self.name, value)
183 object.__setattr__(obj, self.name, value)
184
184
185
185
186 def hasunfilteredcache(repo, name):
186 def hasunfilteredcache(repo, name):
187 """check if a repo has an unfilteredpropertycache value for <name>"""
187 """check if a repo has an unfilteredpropertycache value for <name>"""
188 return name in vars(repo.unfiltered())
188 return name in vars(repo.unfiltered())
189
189
190
190
191 def unfilteredmethod(orig):
191 def unfilteredmethod(orig):
192 """decorate method that always need to be run on unfiltered version"""
192 """decorate method that always need to be run on unfiltered version"""
193
193
194 def wrapper(repo, *args, **kwargs):
194 def wrapper(repo, *args, **kwargs):
195 return orig(repo.unfiltered(), *args, **kwargs)
195 return orig(repo.unfiltered(), *args, **kwargs)
196
196
197 return wrapper
197 return wrapper
198
198
199
199
200 moderncaps = {
200 moderncaps = {
201 b'lookup',
201 b'lookup',
202 b'branchmap',
202 b'branchmap',
203 b'pushkey',
203 b'pushkey',
204 b'known',
204 b'known',
205 b'getbundle',
205 b'getbundle',
206 b'unbundle',
206 b'unbundle',
207 }
207 }
208 legacycaps = moderncaps.union({b'changegroupsubset'})
208 legacycaps = moderncaps.union({b'changegroupsubset'})
209
209
210
210
211 @interfaceutil.implementer(repository.ipeercommandexecutor)
211 @interfaceutil.implementer(repository.ipeercommandexecutor)
212 class localcommandexecutor(object):
212 class localcommandexecutor(object):
213 def __init__(self, peer):
213 def __init__(self, peer):
214 self._peer = peer
214 self._peer = peer
215 self._sent = False
215 self._sent = False
216 self._closed = False
216 self._closed = False
217
217
218 def __enter__(self):
218 def __enter__(self):
219 return self
219 return self
220
220
221 def __exit__(self, exctype, excvalue, exctb):
221 def __exit__(self, exctype, excvalue, exctb):
222 self.close()
222 self.close()
223
223
224 def callcommand(self, command, args):
224 def callcommand(self, command, args):
225 if self._sent:
225 if self._sent:
226 raise error.ProgrammingError(
226 raise error.ProgrammingError(
227 b'callcommand() cannot be used after sendcommands()'
227 b'callcommand() cannot be used after sendcommands()'
228 )
228 )
229
229
230 if self._closed:
230 if self._closed:
231 raise error.ProgrammingError(
231 raise error.ProgrammingError(
232 b'callcommand() cannot be used after close()'
232 b'callcommand() cannot be used after close()'
233 )
233 )
234
234
235 # We don't need to support anything fancy. Just call the named
235 # We don't need to support anything fancy. Just call the named
236 # method on the peer and return a resolved future.
236 # method on the peer and return a resolved future.
237 fn = getattr(self._peer, pycompat.sysstr(command))
237 fn = getattr(self._peer, pycompat.sysstr(command))
238
238
239 f = pycompat.futures.Future()
239 f = pycompat.futures.Future()
240
240
241 try:
241 try:
242 result = fn(**pycompat.strkwargs(args))
242 result = fn(**pycompat.strkwargs(args))
243 except Exception:
243 except Exception:
244 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
244 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
245 else:
245 else:
246 f.set_result(result)
246 f.set_result(result)
247
247
248 return f
248 return f
249
249
250 def sendcommands(self):
250 def sendcommands(self):
251 self._sent = True
251 self._sent = True
252
252
253 def close(self):
253 def close(self):
254 self._closed = True
254 self._closed = True
255
255
256
256
257 @interfaceutil.implementer(repository.ipeercommands)
257 @interfaceutil.implementer(repository.ipeercommands)
258 class localpeer(repository.peer):
258 class localpeer(repository.peer):
259 '''peer for a local repo; reflects only the most recent API'''
259 '''peer for a local repo; reflects only the most recent API'''
260
260
261 def __init__(self, repo, caps=None):
261 def __init__(self, repo, caps=None):
262 super(localpeer, self).__init__()
262 super(localpeer, self).__init__()
263
263
264 if caps is None:
264 if caps is None:
265 caps = moderncaps.copy()
265 caps = moderncaps.copy()
266 self._repo = repo.filtered(b'served')
266 self._repo = repo.filtered(b'served')
267 self.ui = repo.ui
267 self.ui = repo.ui
268 self._caps = repo._restrictcapabilities(caps)
268 self._caps = repo._restrictcapabilities(caps)
269
269
270 # Begin of _basepeer interface.
270 # Begin of _basepeer interface.
271
271
272 def url(self):
272 def url(self):
273 return self._repo.url()
273 return self._repo.url()
274
274
275 def local(self):
275 def local(self):
276 return self._repo
276 return self._repo
277
277
278 def peer(self):
278 def peer(self):
279 return self
279 return self
280
280
281 def canpush(self):
281 def canpush(self):
282 return True
282 return True
283
283
284 def close(self):
284 def close(self):
285 self._repo.close()
285 self._repo.close()
286
286
287 # End of _basepeer interface.
287 # End of _basepeer interface.
288
288
289 # Begin of _basewirecommands interface.
289 # Begin of _basewirecommands interface.
290
290
291 def branchmap(self):
291 def branchmap(self):
292 return self._repo.branchmap()
292 return self._repo.branchmap()
293
293
294 def capabilities(self):
294 def capabilities(self):
295 return self._caps
295 return self._caps
296
296
297 def clonebundles(self):
297 def clonebundles(self):
298 return self._repo.tryread(b'clonebundles.manifest')
298 return self._repo.tryread(b'clonebundles.manifest')
299
299
300 def debugwireargs(self, one, two, three=None, four=None, five=None):
300 def debugwireargs(self, one, two, three=None, four=None, five=None):
301 """Used to test argument passing over the wire"""
301 """Used to test argument passing over the wire"""
302 return b"%s %s %s %s %s" % (
302 return b"%s %s %s %s %s" % (
303 one,
303 one,
304 two,
304 two,
305 pycompat.bytestr(three),
305 pycompat.bytestr(three),
306 pycompat.bytestr(four),
306 pycompat.bytestr(four),
307 pycompat.bytestr(five),
307 pycompat.bytestr(five),
308 )
308 )
309
309
310 def getbundle(
310 def getbundle(
311 self, source, heads=None, common=None, bundlecaps=None, **kwargs
311 self, source, heads=None, common=None, bundlecaps=None, **kwargs
312 ):
312 ):
313 chunks = exchange.getbundlechunks(
313 chunks = exchange.getbundlechunks(
314 self._repo,
314 self._repo,
315 source,
315 source,
316 heads=heads,
316 heads=heads,
317 common=common,
317 common=common,
318 bundlecaps=bundlecaps,
318 bundlecaps=bundlecaps,
319 **kwargs
319 **kwargs
320 )[1]
320 )[1]
321 cb = util.chunkbuffer(chunks)
321 cb = util.chunkbuffer(chunks)
322
322
323 if exchange.bundle2requested(bundlecaps):
323 if exchange.bundle2requested(bundlecaps):
324 # When requesting a bundle2, getbundle returns a stream to make the
324 # When requesting a bundle2, getbundle returns a stream to make the
325 # wire level function happier. We need to build a proper object
325 # wire level function happier. We need to build a proper object
326 # from it in local peer.
326 # from it in local peer.
327 return bundle2.getunbundler(self.ui, cb)
327 return bundle2.getunbundler(self.ui, cb)
328 else:
328 else:
329 return changegroup.getunbundler(b'01', cb, None)
329 return changegroup.getunbundler(b'01', cb, None)
330
330
331 def heads(self):
331 def heads(self):
332 return self._repo.heads()
332 return self._repo.heads()
333
333
334 def known(self, nodes):
334 def known(self, nodes):
335 return self._repo.known(nodes)
335 return self._repo.known(nodes)
336
336
337 def listkeys(self, namespace):
337 def listkeys(self, namespace):
338 return self._repo.listkeys(namespace)
338 return self._repo.listkeys(namespace)
339
339
340 def lookup(self, key):
340 def lookup(self, key):
341 return self._repo.lookup(key)
341 return self._repo.lookup(key)
342
342
343 def pushkey(self, namespace, key, old, new):
343 def pushkey(self, namespace, key, old, new):
344 return self._repo.pushkey(namespace, key, old, new)
344 return self._repo.pushkey(namespace, key, old, new)
345
345
346 def stream_out(self):
346 def stream_out(self):
347 raise error.Abort(_(b'cannot perform stream clone against local peer'))
347 raise error.Abort(_(b'cannot perform stream clone against local peer'))
348
348
349 def unbundle(self, bundle, heads, url):
349 def unbundle(self, bundle, heads, url):
350 """apply a bundle on a repo
350 """apply a bundle on a repo
351
351
352 This function handles the repo locking itself."""
352 This function handles the repo locking itself."""
353 try:
353 try:
354 try:
354 try:
355 bundle = exchange.readbundle(self.ui, bundle, None)
355 bundle = exchange.readbundle(self.ui, bundle, None)
356 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
356 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
357 if util.safehasattr(ret, b'getchunks'):
357 if util.safehasattr(ret, b'getchunks'):
358 # This is a bundle20 object, turn it into an unbundler.
358 # This is a bundle20 object, turn it into an unbundler.
359 # This little dance should be dropped eventually when the
359 # This little dance should be dropped eventually when the
360 # API is finally improved.
360 # API is finally improved.
361 stream = util.chunkbuffer(ret.getchunks())
361 stream = util.chunkbuffer(ret.getchunks())
362 ret = bundle2.getunbundler(self.ui, stream)
362 ret = bundle2.getunbundler(self.ui, stream)
363 return ret
363 return ret
364 except Exception as exc:
364 except Exception as exc:
365 # If the exception contains output salvaged from a bundle2
365 # If the exception contains output salvaged from a bundle2
366 # reply, we need to make sure it is printed before continuing
366 # reply, we need to make sure it is printed before continuing
367 # to fail. So we build a bundle2 with such output and consume
367 # to fail. So we build a bundle2 with such output and consume
368 # it directly.
368 # it directly.
369 #
369 #
370 # This is not very elegant but allows a "simple" solution for
370 # This is not very elegant but allows a "simple" solution for
371 # issue4594
371 # issue4594
372 output = getattr(exc, '_bundle2salvagedoutput', ())
372 output = getattr(exc, '_bundle2salvagedoutput', ())
373 if output:
373 if output:
374 bundler = bundle2.bundle20(self._repo.ui)
374 bundler = bundle2.bundle20(self._repo.ui)
375 for out in output:
375 for out in output:
376 bundler.addpart(out)
376 bundler.addpart(out)
377 stream = util.chunkbuffer(bundler.getchunks())
377 stream = util.chunkbuffer(bundler.getchunks())
378 b = bundle2.getunbundler(self.ui, stream)
378 b = bundle2.getunbundler(self.ui, stream)
379 bundle2.processbundle(self._repo, b)
379 bundle2.processbundle(self._repo, b)
380 raise
380 raise
381 except error.PushRaced as exc:
381 except error.PushRaced as exc:
382 raise error.ResponseError(
382 raise error.ResponseError(
383 _(b'push failed:'), stringutil.forcebytestr(exc)
383 _(b'push failed:'), stringutil.forcebytestr(exc)
384 )
384 )
385
385
386 # End of _basewirecommands interface.
386 # End of _basewirecommands interface.
387
387
388 # Begin of peer interface.
388 # Begin of peer interface.
389
389
390 def commandexecutor(self):
390 def commandexecutor(self):
391 return localcommandexecutor(self)
391 return localcommandexecutor(self)
392
392
393 # End of peer interface.
393 # End of peer interface.
394
394
395
395
396 @interfaceutil.implementer(repository.ipeerlegacycommands)
396 @interfaceutil.implementer(repository.ipeerlegacycommands)
397 class locallegacypeer(localpeer):
397 class locallegacypeer(localpeer):
398 '''peer extension which implements legacy methods too; used for tests with
398 '''peer extension which implements legacy methods too; used for tests with
399 restricted capabilities'''
399 restricted capabilities'''
400
400
401 def __init__(self, repo):
401 def __init__(self, repo):
402 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
402 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
403
403
404 # Begin of baselegacywirecommands interface.
404 # Begin of baselegacywirecommands interface.
405
405
406 def between(self, pairs):
406 def between(self, pairs):
407 return self._repo.between(pairs)
407 return self._repo.between(pairs)
408
408
409 def branches(self, nodes):
409 def branches(self, nodes):
410 return self._repo.branches(nodes)
410 return self._repo.branches(nodes)
411
411
412 def changegroup(self, nodes, source):
412 def changegroup(self, nodes, source):
413 outgoing = discovery.outgoing(
413 outgoing = discovery.outgoing(
414 self._repo, missingroots=nodes, missingheads=self._repo.heads()
414 self._repo, missingroots=nodes, missingheads=self._repo.heads()
415 )
415 )
416 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
416 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
417
417
418 def changegroupsubset(self, bases, heads, source):
418 def changegroupsubset(self, bases, heads, source):
419 outgoing = discovery.outgoing(
419 outgoing = discovery.outgoing(
420 self._repo, missingroots=bases, missingheads=heads
420 self._repo, missingroots=bases, missingheads=heads
421 )
421 )
422 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
422 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
423
423
424 # End of baselegacywirecommands interface.
424 # End of baselegacywirecommands interface.
425
425
426
426
427 # Increment the sub-version when the revlog v2 format changes to lock out old
427 # Increment the sub-version when the revlog v2 format changes to lock out old
428 # clients.
428 # clients.
429 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
429 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
430
430
431 # A repository with the sparserevlog feature will have delta chains that
431 # A repository with the sparserevlog feature will have delta chains that
432 # can spread over a larger span. Sparse reading cuts these large spans into
432 # can spread over a larger span. Sparse reading cuts these large spans into
433 # pieces, so that each piece isn't too big.
433 # pieces, so that each piece isn't too big.
434 # Without the sparserevlog capability, reading from the repository could use
434 # Without the sparserevlog capability, reading from the repository could use
435 # huge amounts of memory, because the whole span would be read at once,
435 # huge amounts of memory, because the whole span would be read at once,
436 # including all the intermediate revisions that aren't pertinent for the chain.
436 # including all the intermediate revisions that aren't pertinent for the chain.
437 # This is why once a repository has enabled sparse-read, it becomes required.
437 # This is why once a repository has enabled sparse-read, it becomes required.
438 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
438 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
439
439
440 # A repository with the sidedataflag requirement will allow to store extra
440 # A repository with the sidedataflag requirement will allow to store extra
441 # information for revision without altering their original hashes.
441 # information for revision without altering their original hashes.
442 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
442 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
443
443
444 # A repository with the the copies-sidedata-changeset requirement will store
444 # A repository with the the copies-sidedata-changeset requirement will store
445 # copies related information in changeset's sidedata.
445 # copies related information in changeset's sidedata.
446 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
446 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
447
447
448 # Functions receiving (ui, features) that extensions can register to impact
448 # Functions receiving (ui, features) that extensions can register to impact
449 # the ability to load repositories with custom requirements. Only
449 # the ability to load repositories with custom requirements. Only
450 # functions defined in loaded extensions are called.
450 # functions defined in loaded extensions are called.
451 #
451 #
452 # The function receives a set of requirement strings that the repository
452 # The function receives a set of requirement strings that the repository
453 # is capable of opening. Functions will typically add elements to the
453 # is capable of opening. Functions will typically add elements to the
454 # set to reflect that the extension knows how to handle that requirements.
454 # set to reflect that the extension knows how to handle that requirements.
455 featuresetupfuncs = set()
455 featuresetupfuncs = set()
456
456
457
457
458 def makelocalrepository(baseui, path, intents=None):
458 def makelocalrepository(baseui, path, intents=None):
459 """Create a local repository object.
459 """Create a local repository object.
460
460
461 Given arguments needed to construct a local repository, this function
461 Given arguments needed to construct a local repository, this function
462 performs various early repository loading functionality (such as
462 performs various early repository loading functionality (such as
463 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
463 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
464 the repository can be opened, derives a type suitable for representing
464 the repository can be opened, derives a type suitable for representing
465 that repository, and returns an instance of it.
465 that repository, and returns an instance of it.
466
466
467 The returned object conforms to the ``repository.completelocalrepository``
467 The returned object conforms to the ``repository.completelocalrepository``
468 interface.
468 interface.
469
469
470 The repository type is derived by calling a series of factory functions
470 The repository type is derived by calling a series of factory functions
471 for each aspect/interface of the final repository. These are defined by
471 for each aspect/interface of the final repository. These are defined by
472 ``REPO_INTERFACES``.
472 ``REPO_INTERFACES``.
473
473
474 Each factory function is called to produce a type implementing a specific
474 Each factory function is called to produce a type implementing a specific
475 interface. The cumulative list of returned types will be combined into a
475 interface. The cumulative list of returned types will be combined into a
476 new type and that type will be instantiated to represent the local
476 new type and that type will be instantiated to represent the local
477 repository.
477 repository.
478
478
479 The factory functions each receive various state that may be consulted
479 The factory functions each receive various state that may be consulted
480 as part of deriving a type.
480 as part of deriving a type.
481
481
482 Extensions should wrap these factory functions to customize repository type
482 Extensions should wrap these factory functions to customize repository type
483 creation. Note that an extension's wrapped function may be called even if
483 creation. Note that an extension's wrapped function may be called even if
484 that extension is not loaded for the repo being constructed. Extensions
484 that extension is not loaded for the repo being constructed. Extensions
485 should check if their ``__name__`` appears in the
485 should check if their ``__name__`` appears in the
486 ``extensionmodulenames`` set passed to the factory function and no-op if
486 ``extensionmodulenames`` set passed to the factory function and no-op if
487 not.
487 not.
488 """
488 """
489 ui = baseui.copy()
489 ui = baseui.copy()
490 # Prevent copying repo configuration.
490 # Prevent copying repo configuration.
491 ui.copy = baseui.copy
491 ui.copy = baseui.copy
492
492
493 # Working directory VFS rooted at repository root.
493 # Working directory VFS rooted at repository root.
494 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
494 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
495
495
496 # Main VFS for .hg/ directory.
496 # Main VFS for .hg/ directory.
497 hgpath = wdirvfs.join(b'.hg')
497 hgpath = wdirvfs.join(b'.hg')
498 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
498 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
499
499
500 # The .hg/ path should exist and should be a directory. All other
500 # The .hg/ path should exist and should be a directory. All other
501 # cases are errors.
501 # cases are errors.
502 if not hgvfs.isdir():
502 if not hgvfs.isdir():
503 try:
503 try:
504 hgvfs.stat()
504 hgvfs.stat()
505 except OSError as e:
505 except OSError as e:
506 if e.errno != errno.ENOENT:
506 if e.errno != errno.ENOENT:
507 raise
507 raise
508
508
509 raise error.RepoError(_(b'repository %s not found') % path)
509 raise error.RepoError(_(b'repository %s not found') % path)
510
510
511 # .hg/requires file contains a newline-delimited list of
511 # .hg/requires file contains a newline-delimited list of
512 # features/capabilities the opener (us) must have in order to use
512 # features/capabilities the opener (us) must have in order to use
513 # the repository. This file was introduced in Mercurial 0.9.2,
513 # the repository. This file was introduced in Mercurial 0.9.2,
514 # which means very old repositories may not have one. We assume
514 # which means very old repositories may not have one. We assume
515 # a missing file translates to no requirements.
515 # a missing file translates to no requirements.
516 try:
516 try:
517 requirements = set(hgvfs.read(b'requires').splitlines())
517 requirements = set(hgvfs.read(b'requires').splitlines())
518 except IOError as e:
518 except IOError as e:
519 if e.errno != errno.ENOENT:
519 if e.errno != errno.ENOENT:
520 raise
520 raise
521 requirements = set()
521 requirements = set()
522
522
523 # The .hg/hgrc file may load extensions or contain config options
523 # The .hg/hgrc file may load extensions or contain config options
524 # that influence repository construction. Attempt to load it and
524 # that influence repository construction. Attempt to load it and
525 # process any new extensions that it may have pulled in.
525 # process any new extensions that it may have pulled in.
526 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
526 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
527 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
527 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
528 extensions.loadall(ui)
528 extensions.loadall(ui)
529 extensions.populateui(ui)
529 extensions.populateui(ui)
530
530
531 # Set of module names of extensions loaded for this repository.
531 # Set of module names of extensions loaded for this repository.
532 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
532 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
533
533
534 supportedrequirements = gathersupportedrequirements(ui)
534 supportedrequirements = gathersupportedrequirements(ui)
535
535
536 # We first validate the requirements are known.
536 # We first validate the requirements are known.
537 ensurerequirementsrecognized(requirements, supportedrequirements)
537 ensurerequirementsrecognized(requirements, supportedrequirements)
538
538
539 # Then we validate that the known set is reasonable to use together.
539 # Then we validate that the known set is reasonable to use together.
540 ensurerequirementscompatible(ui, requirements)
540 ensurerequirementscompatible(ui, requirements)
541
541
542 # TODO there are unhandled edge cases related to opening repositories with
542 # TODO there are unhandled edge cases related to opening repositories with
543 # shared storage. If storage is shared, we should also test for requirements
543 # shared storage. If storage is shared, we should also test for requirements
544 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
544 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
545 # that repo, as that repo may load extensions needed to open it. This is a
545 # that repo, as that repo may load extensions needed to open it. This is a
546 # bit complicated because we don't want the other hgrc to overwrite settings
546 # bit complicated because we don't want the other hgrc to overwrite settings
547 # in this hgrc.
547 # in this hgrc.
548 #
548 #
549 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
549 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
550 # file when sharing repos. But if a requirement is added after the share is
550 # file when sharing repos. But if a requirement is added after the share is
551 # performed, thereby introducing a new requirement for the opener, we may
551 # performed, thereby introducing a new requirement for the opener, we may
552 # will not see that and could encounter a run-time error interacting with
552 # will not see that and could encounter a run-time error interacting with
553 # that shared store since it has an unknown-to-us requirement.
553 # that shared store since it has an unknown-to-us requirement.
554
554
555 # At this point, we know we should be capable of opening the repository.
555 # At this point, we know we should be capable of opening the repository.
556 # Now get on with doing that.
556 # Now get on with doing that.
557
557
558 features = set()
558 features = set()
559
559
560 # The "store" part of the repository holds versioned data. How it is
560 # The "store" part of the repository holds versioned data. How it is
561 # accessed is determined by various requirements. The ``shared`` or
561 # accessed is determined by various requirements. The ``shared`` or
562 # ``relshared`` requirements indicate the store lives in the path contained
562 # ``relshared`` requirements indicate the store lives in the path contained
563 # in the ``.hg/sharedpath`` file. This is an absolute path for
563 # in the ``.hg/sharedpath`` file. This is an absolute path for
564 # ``shared`` and relative to ``.hg/`` for ``relshared``.
564 # ``shared`` and relative to ``.hg/`` for ``relshared``.
565 if b'shared' in requirements or b'relshared' in requirements:
565 if b'shared' in requirements or b'relshared' in requirements:
566 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
566 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
567 if b'relshared' in requirements:
567 if b'relshared' in requirements:
568 sharedpath = hgvfs.join(sharedpath)
568 sharedpath = hgvfs.join(sharedpath)
569
569
570 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
570 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
571
571
572 if not sharedvfs.exists():
572 if not sharedvfs.exists():
573 raise error.RepoError(
573 raise error.RepoError(
574 _(b'.hg/sharedpath points to nonexistent directory %s')
574 _(b'.hg/sharedpath points to nonexistent directory %s')
575 % sharedvfs.base
575 % sharedvfs.base
576 )
576 )
577
577
578 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
578 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
579
579
580 storebasepath = sharedvfs.base
580 storebasepath = sharedvfs.base
581 cachepath = sharedvfs.join(b'cache')
581 cachepath = sharedvfs.join(b'cache')
582 else:
582 else:
583 storebasepath = hgvfs.base
583 storebasepath = hgvfs.base
584 cachepath = hgvfs.join(b'cache')
584 cachepath = hgvfs.join(b'cache')
585 wcachepath = hgvfs.join(b'wcache')
585 wcachepath = hgvfs.join(b'wcache')
586
586
587 # The store has changed over time and the exact layout is dictated by
587 # The store has changed over time and the exact layout is dictated by
588 # requirements. The store interface abstracts differences across all
588 # requirements. The store interface abstracts differences across all
589 # of them.
589 # of them.
590 store = makestore(
590 store = makestore(
591 requirements,
591 requirements,
592 storebasepath,
592 storebasepath,
593 lambda base: vfsmod.vfs(base, cacheaudited=True),
593 lambda base: vfsmod.vfs(base, cacheaudited=True),
594 )
594 )
595 hgvfs.createmode = store.createmode
595 hgvfs.createmode = store.createmode
596
596
597 storevfs = store.vfs
597 storevfs = store.vfs
598 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
598 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
599
599
600 # The cache vfs is used to manage cache files.
600 # The cache vfs is used to manage cache files.
601 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
601 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
602 cachevfs.createmode = store.createmode
602 cachevfs.createmode = store.createmode
603 # The cache vfs is used to manage cache files related to the working copy
603 # The cache vfs is used to manage cache files related to the working copy
604 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
604 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
605 wcachevfs.createmode = store.createmode
605 wcachevfs.createmode = store.createmode
606
606
607 # Now resolve the type for the repository object. We do this by repeatedly
607 # Now resolve the type for the repository object. We do this by repeatedly
608 # calling a factory function to produces types for specific aspects of the
608 # calling a factory function to produces types for specific aspects of the
609 # repo's operation. The aggregate returned types are used as base classes
609 # repo's operation. The aggregate returned types are used as base classes
610 # for a dynamically-derived type, which will represent our new repository.
610 # for a dynamically-derived type, which will represent our new repository.
611
611
612 bases = []
612 bases = []
613 extrastate = {}
613 extrastate = {}
614
614
615 for iface, fn in REPO_INTERFACES:
615 for iface, fn in REPO_INTERFACES:
616 # We pass all potentially useful state to give extensions tons of
616 # We pass all potentially useful state to give extensions tons of
617 # flexibility.
617 # flexibility.
618 typ = fn()(
618 typ = fn()(
619 ui=ui,
619 ui=ui,
620 intents=intents,
620 intents=intents,
621 requirements=requirements,
621 requirements=requirements,
622 features=features,
622 features=features,
623 wdirvfs=wdirvfs,
623 wdirvfs=wdirvfs,
624 hgvfs=hgvfs,
624 hgvfs=hgvfs,
625 store=store,
625 store=store,
626 storevfs=storevfs,
626 storevfs=storevfs,
627 storeoptions=storevfs.options,
627 storeoptions=storevfs.options,
628 cachevfs=cachevfs,
628 cachevfs=cachevfs,
629 wcachevfs=wcachevfs,
629 wcachevfs=wcachevfs,
630 extensionmodulenames=extensionmodulenames,
630 extensionmodulenames=extensionmodulenames,
631 extrastate=extrastate,
631 extrastate=extrastate,
632 baseclasses=bases,
632 baseclasses=bases,
633 )
633 )
634
634
635 if not isinstance(typ, type):
635 if not isinstance(typ, type):
636 raise error.ProgrammingError(
636 raise error.ProgrammingError(
637 b'unable to construct type for %s' % iface
637 b'unable to construct type for %s' % iface
638 )
638 )
639
639
640 bases.append(typ)
640 bases.append(typ)
641
641
642 # type() allows you to use characters in type names that wouldn't be
642 # type() allows you to use characters in type names that wouldn't be
643 # recognized as Python symbols in source code. We abuse that to add
643 # recognized as Python symbols in source code. We abuse that to add
644 # rich information about our constructed repo.
644 # rich information about our constructed repo.
645 name = pycompat.sysstr(
645 name = pycompat.sysstr(
646 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
646 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
647 )
647 )
648
648
649 cls = type(name, tuple(bases), {})
649 cls = type(name, tuple(bases), {})
650
650
651 return cls(
651 return cls(
652 baseui=baseui,
652 baseui=baseui,
653 ui=ui,
653 ui=ui,
654 origroot=path,
654 origroot=path,
655 wdirvfs=wdirvfs,
655 wdirvfs=wdirvfs,
656 hgvfs=hgvfs,
656 hgvfs=hgvfs,
657 requirements=requirements,
657 requirements=requirements,
658 supportedrequirements=supportedrequirements,
658 supportedrequirements=supportedrequirements,
659 sharedpath=storebasepath,
659 sharedpath=storebasepath,
660 store=store,
660 store=store,
661 cachevfs=cachevfs,
661 cachevfs=cachevfs,
662 wcachevfs=wcachevfs,
662 wcachevfs=wcachevfs,
663 features=features,
663 features=features,
664 intents=intents,
664 intents=intents,
665 )
665 )
666
666
667
667
668 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
668 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
669 """Load hgrc files/content into a ui instance.
669 """Load hgrc files/content into a ui instance.
670
670
671 This is called during repository opening to load any additional
671 This is called during repository opening to load any additional
672 config files or settings relevant to the current repository.
672 config files or settings relevant to the current repository.
673
673
674 Returns a bool indicating whether any additional configs were loaded.
674 Returns a bool indicating whether any additional configs were loaded.
675
675
676 Extensions should monkeypatch this function to modify how per-repo
676 Extensions should monkeypatch this function to modify how per-repo
677 configs are loaded. For example, an extension may wish to pull in
677 configs are loaded. For example, an extension may wish to pull in
678 configs from alternate files or sources.
678 configs from alternate files or sources.
679 """
679 """
680 if not rcutil.use_repo_hgrc():
680 if not rcutil.use_repo_hgrc():
681 return False
681 return False
682 try:
682 try:
683 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
683 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
684 return True
684 return True
685 except IOError:
685 except IOError:
686 return False
686 return False
687
687
688
688
689 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
689 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
690 """Perform additional actions after .hg/hgrc is loaded.
690 """Perform additional actions after .hg/hgrc is loaded.
691
691
692 This function is called during repository loading immediately after
692 This function is called during repository loading immediately after
693 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
693 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
694
694
695 The function can be used to validate configs, automatically add
695 The function can be used to validate configs, automatically add
696 options (including extensions) based on requirements, etc.
696 options (including extensions) based on requirements, etc.
697 """
697 """
698
698
699 # Map of requirements to list of extensions to load automatically when
699 # Map of requirements to list of extensions to load automatically when
700 # requirement is present.
700 # requirement is present.
701 autoextensions = {
701 autoextensions = {
702 b'git': [b'git'],
702 b'git': [b'git'],
703 b'largefiles': [b'largefiles'],
703 b'largefiles': [b'largefiles'],
704 b'lfs': [b'lfs'],
704 b'lfs': [b'lfs'],
705 }
705 }
706
706
707 for requirement, names in sorted(autoextensions.items()):
707 for requirement, names in sorted(autoextensions.items()):
708 if requirement not in requirements:
708 if requirement not in requirements:
709 continue
709 continue
710
710
711 for name in names:
711 for name in names:
712 if not ui.hasconfig(b'extensions', name):
712 if not ui.hasconfig(b'extensions', name):
713 ui.setconfig(b'extensions', name, b'', source=b'autoload')
713 ui.setconfig(b'extensions', name, b'', source=b'autoload')
714
714
715
715
716 def gathersupportedrequirements(ui):
716 def gathersupportedrequirements(ui):
717 """Determine the complete set of recognized requirements."""
717 """Determine the complete set of recognized requirements."""
718 # Start with all requirements supported by this file.
718 # Start with all requirements supported by this file.
719 supported = set(localrepository._basesupported)
719 supported = set(localrepository._basesupported)
720
720
721 # Execute ``featuresetupfuncs`` entries if they belong to an extension
721 # Execute ``featuresetupfuncs`` entries if they belong to an extension
722 # relevant to this ui instance.
722 # relevant to this ui instance.
723 modules = {m.__name__ for n, m in extensions.extensions(ui)}
723 modules = {m.__name__ for n, m in extensions.extensions(ui)}
724
724
725 for fn in featuresetupfuncs:
725 for fn in featuresetupfuncs:
726 if fn.__module__ in modules:
726 if fn.__module__ in modules:
727 fn(ui, supported)
727 fn(ui, supported)
728
728
729 # Add derived requirements from registered compression engines.
729 # Add derived requirements from registered compression engines.
730 for name in util.compengines:
730 for name in util.compengines:
731 engine = util.compengines[name]
731 engine = util.compengines[name]
732 if engine.available() and engine.revlogheader():
732 if engine.available() and engine.revlogheader():
733 supported.add(b'exp-compression-%s' % name)
733 supported.add(b'exp-compression-%s' % name)
734 if engine.name() == b'zstd':
734 if engine.name() == b'zstd':
735 supported.add(b'revlog-compression-zstd')
735 supported.add(b'revlog-compression-zstd')
736
736
737 return supported
737 return supported
738
738
739
739
740 def ensurerequirementsrecognized(requirements, supported):
740 def ensurerequirementsrecognized(requirements, supported):
741 """Validate that a set of local requirements is recognized.
741 """Validate that a set of local requirements is recognized.
742
742
743 Receives a set of requirements. Raises an ``error.RepoError`` if there
743 Receives a set of requirements. Raises an ``error.RepoError`` if there
744 exists any requirement in that set that currently loaded code doesn't
744 exists any requirement in that set that currently loaded code doesn't
745 recognize.
745 recognize.
746
746
747 Returns a set of supported requirements.
747 Returns a set of supported requirements.
748 """
748 """
749 missing = set()
749 missing = set()
750
750
751 for requirement in requirements:
751 for requirement in requirements:
752 if requirement in supported:
752 if requirement in supported:
753 continue
753 continue
754
754
755 if not requirement or not requirement[0:1].isalnum():
755 if not requirement or not requirement[0:1].isalnum():
756 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
756 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
757
757
758 missing.add(requirement)
758 missing.add(requirement)
759
759
760 if missing:
760 if missing:
761 raise error.RequirementError(
761 raise error.RequirementError(
762 _(b'repository requires features unknown to this Mercurial: %s')
762 _(b'repository requires features unknown to this Mercurial: %s')
763 % b' '.join(sorted(missing)),
763 % b' '.join(sorted(missing)),
764 hint=_(
764 hint=_(
765 b'see https://mercurial-scm.org/wiki/MissingRequirement '
765 b'see https://mercurial-scm.org/wiki/MissingRequirement '
766 b'for more information'
766 b'for more information'
767 ),
767 ),
768 )
768 )
769
769
770
770
771 def ensurerequirementscompatible(ui, requirements):
771 def ensurerequirementscompatible(ui, requirements):
772 """Validates that a set of recognized requirements is mutually compatible.
772 """Validates that a set of recognized requirements is mutually compatible.
773
773
774 Some requirements may not be compatible with others or require
774 Some requirements may not be compatible with others or require
775 config options that aren't enabled. This function is called during
775 config options that aren't enabled. This function is called during
776 repository opening to ensure that the set of requirements needed
776 repository opening to ensure that the set of requirements needed
777 to open a repository is sane and compatible with config options.
777 to open a repository is sane and compatible with config options.
778
778
779 Extensions can monkeypatch this function to perform additional
779 Extensions can monkeypatch this function to perform additional
780 checking.
780 checking.
781
781
782 ``error.RepoError`` should be raised on failure.
782 ``error.RepoError`` should be raised on failure.
783 """
783 """
784 if b'exp-sparse' in requirements and not sparse.enabled:
784 if b'exp-sparse' in requirements and not sparse.enabled:
785 raise error.RepoError(
785 raise error.RepoError(
786 _(
786 _(
787 b'repository is using sparse feature but '
787 b'repository is using sparse feature but '
788 b'sparse is not enabled; enable the '
788 b'sparse is not enabled; enable the '
789 b'"sparse" extensions to access'
789 b'"sparse" extensions to access'
790 )
790 )
791 )
791 )
792
792
793
793
794 def makestore(requirements, path, vfstype):
794 def makestore(requirements, path, vfstype):
795 """Construct a storage object for a repository."""
795 """Construct a storage object for a repository."""
796 if b'store' in requirements:
796 if b'store' in requirements:
797 if b'fncache' in requirements:
797 if b'fncache' in requirements:
798 return storemod.fncachestore(
798 return storemod.fncachestore(
799 path, vfstype, b'dotencode' in requirements
799 path, vfstype, b'dotencode' in requirements
800 )
800 )
801
801
802 return storemod.encodedstore(path, vfstype)
802 return storemod.encodedstore(path, vfstype)
803
803
804 return storemod.basicstore(path, vfstype)
804 return storemod.basicstore(path, vfstype)
805
805
806
806
807 def resolvestorevfsoptions(ui, requirements, features):
807 def resolvestorevfsoptions(ui, requirements, features):
808 """Resolve the options to pass to the store vfs opener.
808 """Resolve the options to pass to the store vfs opener.
809
809
810 The returned dict is used to influence behavior of the storage layer.
810 The returned dict is used to influence behavior of the storage layer.
811 """
811 """
812 options = {}
812 options = {}
813
813
814 if b'treemanifest' in requirements:
814 if b'treemanifest' in requirements:
815 options[b'treemanifest'] = True
815 options[b'treemanifest'] = True
816
816
817 # experimental config: format.manifestcachesize
817 # experimental config: format.manifestcachesize
818 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
818 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
819 if manifestcachesize is not None:
819 if manifestcachesize is not None:
820 options[b'manifestcachesize'] = manifestcachesize
820 options[b'manifestcachesize'] = manifestcachesize
821
821
822 # In the absence of another requirement superseding a revlog-related
822 # In the absence of another requirement superseding a revlog-related
823 # requirement, we have to assume the repo is using revlog version 0.
823 # requirement, we have to assume the repo is using revlog version 0.
824 # This revlog format is super old and we don't bother trying to parse
824 # This revlog format is super old and we don't bother trying to parse
825 # opener options for it because those options wouldn't do anything
825 # opener options for it because those options wouldn't do anything
826 # meaningful on such old repos.
826 # meaningful on such old repos.
827 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
827 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
828 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
828 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
829 else: # explicitly mark repo as using revlogv0
829 else: # explicitly mark repo as using revlogv0
830 options[b'revlogv0'] = True
830 options[b'revlogv0'] = True
831
831
832 if COPIESSDC_REQUIREMENT in requirements:
832 if COPIESSDC_REQUIREMENT in requirements:
833 options[b'copies-storage'] = b'changeset-sidedata'
833 options[b'copies-storage'] = b'changeset-sidedata'
834 else:
834 else:
835 writecopiesto = ui.config(b'experimental', b'copies.write-to')
835 writecopiesto = ui.config(b'experimental', b'copies.write-to')
836 copiesextramode = (b'changeset-only', b'compatibility')
836 copiesextramode = (b'changeset-only', b'compatibility')
837 if writecopiesto in copiesextramode:
837 if writecopiesto in copiesextramode:
838 options[b'copies-storage'] = b'extra'
838 options[b'copies-storage'] = b'extra'
839
839
840 return options
840 return options
841
841
842
842
843 def resolverevlogstorevfsoptions(ui, requirements, features):
843 def resolverevlogstorevfsoptions(ui, requirements, features):
844 """Resolve opener options specific to revlogs."""
844 """Resolve opener options specific to revlogs."""
845
845
846 options = {}
846 options = {}
847 options[b'flagprocessors'] = {}
847 options[b'flagprocessors'] = {}
848
848
849 if b'revlogv1' in requirements:
849 if b'revlogv1' in requirements:
850 options[b'revlogv1'] = True
850 options[b'revlogv1'] = True
851 if REVLOGV2_REQUIREMENT in requirements:
851 if REVLOGV2_REQUIREMENT in requirements:
852 options[b'revlogv2'] = True
852 options[b'revlogv2'] = True
853
853
854 if b'generaldelta' in requirements:
854 if b'generaldelta' in requirements:
855 options[b'generaldelta'] = True
855 options[b'generaldelta'] = True
856
856
857 # experimental config: format.chunkcachesize
857 # experimental config: format.chunkcachesize
858 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
858 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
859 if chunkcachesize is not None:
859 if chunkcachesize is not None:
860 options[b'chunkcachesize'] = chunkcachesize
860 options[b'chunkcachesize'] = chunkcachesize
861
861
862 deltabothparents = ui.configbool(
862 deltabothparents = ui.configbool(
863 b'storage', b'revlog.optimize-delta-parent-choice'
863 b'storage', b'revlog.optimize-delta-parent-choice'
864 )
864 )
865 options[b'deltabothparents'] = deltabothparents
865 options[b'deltabothparents'] = deltabothparents
866
866
867 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
867 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
868 lazydeltabase = False
868 lazydeltabase = False
869 if lazydelta:
869 if lazydelta:
870 lazydeltabase = ui.configbool(
870 lazydeltabase = ui.configbool(
871 b'storage', b'revlog.reuse-external-delta-parent'
871 b'storage', b'revlog.reuse-external-delta-parent'
872 )
872 )
873 if lazydeltabase is None:
873 if lazydeltabase is None:
874 lazydeltabase = not scmutil.gddeltaconfig(ui)
874 lazydeltabase = not scmutil.gddeltaconfig(ui)
875 options[b'lazydelta'] = lazydelta
875 options[b'lazydelta'] = lazydelta
876 options[b'lazydeltabase'] = lazydeltabase
876 options[b'lazydeltabase'] = lazydeltabase
877
877
878 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
878 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
879 if 0 <= chainspan:
879 if 0 <= chainspan:
880 options[b'maxdeltachainspan'] = chainspan
880 options[b'maxdeltachainspan'] = chainspan
881
881
882 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
882 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
883 if mmapindexthreshold is not None:
883 if mmapindexthreshold is not None:
884 options[b'mmapindexthreshold'] = mmapindexthreshold
884 options[b'mmapindexthreshold'] = mmapindexthreshold
885
885
886 withsparseread = ui.configbool(b'experimental', b'sparse-read')
886 withsparseread = ui.configbool(b'experimental', b'sparse-read')
887 srdensitythres = float(
887 srdensitythres = float(
888 ui.config(b'experimental', b'sparse-read.density-threshold')
888 ui.config(b'experimental', b'sparse-read.density-threshold')
889 )
889 )
890 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
890 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
891 options[b'with-sparse-read'] = withsparseread
891 options[b'with-sparse-read'] = withsparseread
892 options[b'sparse-read-density-threshold'] = srdensitythres
892 options[b'sparse-read-density-threshold'] = srdensitythres
893 options[b'sparse-read-min-gap-size'] = srmingapsize
893 options[b'sparse-read-min-gap-size'] = srmingapsize
894
894
895 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
895 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
896 options[b'sparse-revlog'] = sparserevlog
896 options[b'sparse-revlog'] = sparserevlog
897 if sparserevlog:
897 if sparserevlog:
898 options[b'generaldelta'] = True
898 options[b'generaldelta'] = True
899
899
900 sidedata = SIDEDATA_REQUIREMENT in requirements
900 sidedata = SIDEDATA_REQUIREMENT in requirements
901 options[b'side-data'] = sidedata
901 options[b'side-data'] = sidedata
902
902
903 maxchainlen = None
903 maxchainlen = None
904 if sparserevlog:
904 if sparserevlog:
905 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
905 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
906 # experimental config: format.maxchainlen
906 # experimental config: format.maxchainlen
907 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
907 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
908 if maxchainlen is not None:
908 if maxchainlen is not None:
909 options[b'maxchainlen'] = maxchainlen
909 options[b'maxchainlen'] = maxchainlen
910
910
911 for r in requirements:
911 for r in requirements:
912 # we allow multiple compression engine requirement to co-exist because
912 # we allow multiple compression engine requirement to co-exist because
913 # strickly speaking, revlog seems to support mixed compression style.
913 # strickly speaking, revlog seems to support mixed compression style.
914 #
914 #
915 # The compression used for new entries will be "the last one"
915 # The compression used for new entries will be "the last one"
916 prefix = r.startswith
916 prefix = r.startswith
917 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
917 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
918 options[b'compengine'] = r.split(b'-', 2)[2]
918 options[b'compengine'] = r.split(b'-', 2)[2]
919
919
920 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
920 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
921 if options[b'zlib.level'] is not None:
921 if options[b'zlib.level'] is not None:
922 if not (0 <= options[b'zlib.level'] <= 9):
922 if not (0 <= options[b'zlib.level'] <= 9):
923 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
923 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
924 raise error.Abort(msg % options[b'zlib.level'])
924 raise error.Abort(msg % options[b'zlib.level'])
925 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
925 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
926 if options[b'zstd.level'] is not None:
926 if options[b'zstd.level'] is not None:
927 if not (0 <= options[b'zstd.level'] <= 22):
927 if not (0 <= options[b'zstd.level'] <= 22):
928 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
928 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
929 raise error.Abort(msg % options[b'zstd.level'])
929 raise error.Abort(msg % options[b'zstd.level'])
930
930
931 if repository.NARROW_REQUIREMENT in requirements:
931 if repository.NARROW_REQUIREMENT in requirements:
932 options[b'enableellipsis'] = True
932 options[b'enableellipsis'] = True
933
933
934 if ui.configbool(b'experimental', b'rust.index'):
934 if ui.configbool(b'experimental', b'rust.index'):
935 options[b'rust.index'] = True
935 options[b'rust.index'] = True
936 if ui.configbool(b'experimental', b'exp-persistent-nodemap'):
936 if ui.configbool(b'experimental', b'exp-persistent-nodemap'):
937 options[b'exp-persistent-nodemap'] = True
937 options[b'exp-persistent-nodemap'] = True
938 if ui.configbool(b'experimental', b'exp-persistent-nodemap.mmap'):
938 if ui.configbool(b'experimental', b'exp-persistent-nodemap.mmap'):
939 options[b'exp-persistent-nodemap.mmap'] = True
939 options[b'exp-persistent-nodemap.mmap'] = True
940 if ui.configbool(b'devel', b'persistent-nodemap'):
940 if ui.configbool(b'devel', b'persistent-nodemap'):
941 options[b'devel-force-nodemap'] = True
941 options[b'devel-force-nodemap'] = True
942
942
943 return options
943 return options
944
944
945
945
946 def makemain(**kwargs):
946 def makemain(**kwargs):
947 """Produce a type conforming to ``ilocalrepositorymain``."""
947 """Produce a type conforming to ``ilocalrepositorymain``."""
948 return localrepository
948 return localrepository
949
949
950
950
951 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
951 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
952 class revlogfilestorage(object):
952 class revlogfilestorage(object):
953 """File storage when using revlogs."""
953 """File storage when using revlogs."""
954
954
955 def file(self, path):
955 def file(self, path):
956 if path[0] == b'/':
956 if path[0] == b'/':
957 path = path[1:]
957 path = path[1:]
958
958
959 return filelog.filelog(self.svfs, path)
959 return filelog.filelog(self.svfs, path)
960
960
961
961
962 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
962 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
963 class revlognarrowfilestorage(object):
963 class revlognarrowfilestorage(object):
964 """File storage when using revlogs and narrow files."""
964 """File storage when using revlogs and narrow files."""
965
965
966 def file(self, path):
966 def file(self, path):
967 if path[0] == b'/':
967 if path[0] == b'/':
968 path = path[1:]
968 path = path[1:]
969
969
970 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
970 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
971
971
972
972
973 def makefilestorage(requirements, features, **kwargs):
973 def makefilestorage(requirements, features, **kwargs):
974 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
974 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
975 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
975 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
976 features.add(repository.REPO_FEATURE_STREAM_CLONE)
976 features.add(repository.REPO_FEATURE_STREAM_CLONE)
977
977
978 if repository.NARROW_REQUIREMENT in requirements:
978 if repository.NARROW_REQUIREMENT in requirements:
979 return revlognarrowfilestorage
979 return revlognarrowfilestorage
980 else:
980 else:
981 return revlogfilestorage
981 return revlogfilestorage
982
982
983
983
984 # List of repository interfaces and factory functions for them. Each
984 # List of repository interfaces and factory functions for them. Each
985 # will be called in order during ``makelocalrepository()`` to iteratively
985 # will be called in order during ``makelocalrepository()`` to iteratively
986 # derive the final type for a local repository instance. We capture the
986 # derive the final type for a local repository instance. We capture the
987 # function as a lambda so we don't hold a reference and the module-level
987 # function as a lambda so we don't hold a reference and the module-level
988 # functions can be wrapped.
988 # functions can be wrapped.
989 REPO_INTERFACES = [
989 REPO_INTERFACES = [
990 (repository.ilocalrepositorymain, lambda: makemain),
990 (repository.ilocalrepositorymain, lambda: makemain),
991 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
991 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
992 ]
992 ]
993
993
994
994
995 @interfaceutil.implementer(repository.ilocalrepositorymain)
995 @interfaceutil.implementer(repository.ilocalrepositorymain)
996 class localrepository(object):
996 class localrepository(object):
997 """Main class for representing local repositories.
997 """Main class for representing local repositories.
998
998
999 All local repositories are instances of this class.
999 All local repositories are instances of this class.
1000
1000
1001 Constructed on its own, instances of this class are not usable as
1001 Constructed on its own, instances of this class are not usable as
1002 repository objects. To obtain a usable repository object, call
1002 repository objects. To obtain a usable repository object, call
1003 ``hg.repository()``, ``localrepo.instance()``, or
1003 ``hg.repository()``, ``localrepo.instance()``, or
1004 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1004 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1005 ``instance()`` adds support for creating new repositories.
1005 ``instance()`` adds support for creating new repositories.
1006 ``hg.repository()`` adds more extension integration, including calling
1006 ``hg.repository()`` adds more extension integration, including calling
1007 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1007 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1008 used.
1008 used.
1009 """
1009 """
1010
1010
1011 # obsolete experimental requirements:
1011 # obsolete experimental requirements:
1012 # - manifestv2: An experimental new manifest format that allowed
1012 # - manifestv2: An experimental new manifest format that allowed
1013 # for stem compression of long paths. Experiment ended up not
1013 # for stem compression of long paths. Experiment ended up not
1014 # being successful (repository sizes went up due to worse delta
1014 # being successful (repository sizes went up due to worse delta
1015 # chains), and the code was deleted in 4.6.
1015 # chains), and the code was deleted in 4.6.
1016 supportedformats = {
1016 supportedformats = {
1017 b'revlogv1',
1017 b'revlogv1',
1018 b'generaldelta',
1018 b'generaldelta',
1019 b'treemanifest',
1019 b'treemanifest',
1020 COPIESSDC_REQUIREMENT,
1020 COPIESSDC_REQUIREMENT,
1021 REVLOGV2_REQUIREMENT,
1021 REVLOGV2_REQUIREMENT,
1022 SIDEDATA_REQUIREMENT,
1022 SIDEDATA_REQUIREMENT,
1023 SPARSEREVLOG_REQUIREMENT,
1023 SPARSEREVLOG_REQUIREMENT,
1024 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1024 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1025 }
1025 }
1026 _basesupported = supportedformats | {
1026 _basesupported = supportedformats | {
1027 b'store',
1027 b'store',
1028 b'fncache',
1028 b'fncache',
1029 b'shared',
1029 b'shared',
1030 b'relshared',
1030 b'relshared',
1031 b'dotencode',
1031 b'dotencode',
1032 b'exp-sparse',
1032 b'exp-sparse',
1033 b'internal-phase',
1033 b'internal-phase',
1034 }
1034 }
1035
1035
1036 # list of prefix for file which can be written without 'wlock'
1036 # list of prefix for file which can be written without 'wlock'
1037 # Extensions should extend this list when needed
1037 # Extensions should extend this list when needed
1038 _wlockfreeprefix = {
1038 _wlockfreeprefix = {
1039 # We migh consider requiring 'wlock' for the next
1039 # We migh consider requiring 'wlock' for the next
1040 # two, but pretty much all the existing code assume
1040 # two, but pretty much all the existing code assume
1041 # wlock is not needed so we keep them excluded for
1041 # wlock is not needed so we keep them excluded for
1042 # now.
1042 # now.
1043 b'hgrc',
1043 b'hgrc',
1044 b'requires',
1044 b'requires',
1045 # XXX cache is a complicatged business someone
1045 # XXX cache is a complicatged business someone
1046 # should investigate this in depth at some point
1046 # should investigate this in depth at some point
1047 b'cache/',
1047 b'cache/',
1048 # XXX shouldn't be dirstate covered by the wlock?
1048 # XXX shouldn't be dirstate covered by the wlock?
1049 b'dirstate',
1049 b'dirstate',
1050 # XXX bisect was still a bit too messy at the time
1050 # XXX bisect was still a bit too messy at the time
1051 # this changeset was introduced. Someone should fix
1051 # this changeset was introduced. Someone should fix
1052 # the remainig bit and drop this line
1052 # the remainig bit and drop this line
1053 b'bisect.state',
1053 b'bisect.state',
1054 }
1054 }
1055
1055
1056 def __init__(
1056 def __init__(
1057 self,
1057 self,
1058 baseui,
1058 baseui,
1059 ui,
1059 ui,
1060 origroot,
1060 origroot,
1061 wdirvfs,
1061 wdirvfs,
1062 hgvfs,
1062 hgvfs,
1063 requirements,
1063 requirements,
1064 supportedrequirements,
1064 supportedrequirements,
1065 sharedpath,
1065 sharedpath,
1066 store,
1066 store,
1067 cachevfs,
1067 cachevfs,
1068 wcachevfs,
1068 wcachevfs,
1069 features,
1069 features,
1070 intents=None,
1070 intents=None,
1071 ):
1071 ):
1072 """Create a new local repository instance.
1072 """Create a new local repository instance.
1073
1073
1074 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1074 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1075 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1075 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1076 object.
1076 object.
1077
1077
1078 Arguments:
1078 Arguments:
1079
1079
1080 baseui
1080 baseui
1081 ``ui.ui`` instance that ``ui`` argument was based off of.
1081 ``ui.ui`` instance that ``ui`` argument was based off of.
1082
1082
1083 ui
1083 ui
1084 ``ui.ui`` instance for use by the repository.
1084 ``ui.ui`` instance for use by the repository.
1085
1085
1086 origroot
1086 origroot
1087 ``bytes`` path to working directory root of this repository.
1087 ``bytes`` path to working directory root of this repository.
1088
1088
1089 wdirvfs
1089 wdirvfs
1090 ``vfs.vfs`` rooted at the working directory.
1090 ``vfs.vfs`` rooted at the working directory.
1091
1091
1092 hgvfs
1092 hgvfs
1093 ``vfs.vfs`` rooted at .hg/
1093 ``vfs.vfs`` rooted at .hg/
1094
1094
1095 requirements
1095 requirements
1096 ``set`` of bytestrings representing repository opening requirements.
1096 ``set`` of bytestrings representing repository opening requirements.
1097
1097
1098 supportedrequirements
1098 supportedrequirements
1099 ``set`` of bytestrings representing repository requirements that we
1099 ``set`` of bytestrings representing repository requirements that we
1100 know how to open. May be a supetset of ``requirements``.
1100 know how to open. May be a supetset of ``requirements``.
1101
1101
1102 sharedpath
1102 sharedpath
1103 ``bytes`` Defining path to storage base directory. Points to a
1103 ``bytes`` Defining path to storage base directory. Points to a
1104 ``.hg/`` directory somewhere.
1104 ``.hg/`` directory somewhere.
1105
1105
1106 store
1106 store
1107 ``store.basicstore`` (or derived) instance providing access to
1107 ``store.basicstore`` (or derived) instance providing access to
1108 versioned storage.
1108 versioned storage.
1109
1109
1110 cachevfs
1110 cachevfs
1111 ``vfs.vfs`` used for cache files.
1111 ``vfs.vfs`` used for cache files.
1112
1112
1113 wcachevfs
1113 wcachevfs
1114 ``vfs.vfs`` used for cache files related to the working copy.
1114 ``vfs.vfs`` used for cache files related to the working copy.
1115
1115
1116 features
1116 features
1117 ``set`` of bytestrings defining features/capabilities of this
1117 ``set`` of bytestrings defining features/capabilities of this
1118 instance.
1118 instance.
1119
1119
1120 intents
1120 intents
1121 ``set`` of system strings indicating what this repo will be used
1121 ``set`` of system strings indicating what this repo will be used
1122 for.
1122 for.
1123 """
1123 """
1124 self.baseui = baseui
1124 self.baseui = baseui
1125 self.ui = ui
1125 self.ui = ui
1126 self.origroot = origroot
1126 self.origroot = origroot
1127 # vfs rooted at working directory.
1127 # vfs rooted at working directory.
1128 self.wvfs = wdirvfs
1128 self.wvfs = wdirvfs
1129 self.root = wdirvfs.base
1129 self.root = wdirvfs.base
1130 # vfs rooted at .hg/. Used to access most non-store paths.
1130 # vfs rooted at .hg/. Used to access most non-store paths.
1131 self.vfs = hgvfs
1131 self.vfs = hgvfs
1132 self.path = hgvfs.base
1132 self.path = hgvfs.base
1133 self.requirements = requirements
1133 self.requirements = requirements
1134 self.supported = supportedrequirements
1134 self.supported = supportedrequirements
1135 self.sharedpath = sharedpath
1135 self.sharedpath = sharedpath
1136 self.store = store
1136 self.store = store
1137 self.cachevfs = cachevfs
1137 self.cachevfs = cachevfs
1138 self.wcachevfs = wcachevfs
1138 self.wcachevfs = wcachevfs
1139 self.features = features
1139 self.features = features
1140
1140
1141 self.filtername = None
1141 self.filtername = None
1142
1142
1143 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1143 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1144 b'devel', b'check-locks'
1144 b'devel', b'check-locks'
1145 ):
1145 ):
1146 self.vfs.audit = self._getvfsward(self.vfs.audit)
1146 self.vfs.audit = self._getvfsward(self.vfs.audit)
1147 # A list of callback to shape the phase if no data were found.
1147 # A list of callback to shape the phase if no data were found.
1148 # Callback are in the form: func(repo, roots) --> processed root.
1148 # Callback are in the form: func(repo, roots) --> processed root.
1149 # This list it to be filled by extension during repo setup
1149 # This list it to be filled by extension during repo setup
1150 self._phasedefaults = []
1150 self._phasedefaults = []
1151
1151
1152 color.setup(self.ui)
1152 color.setup(self.ui)
1153
1153
1154 self.spath = self.store.path
1154 self.spath = self.store.path
1155 self.svfs = self.store.vfs
1155 self.svfs = self.store.vfs
1156 self.sjoin = self.store.join
1156 self.sjoin = self.store.join
1157 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1157 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1158 b'devel', b'check-locks'
1158 b'devel', b'check-locks'
1159 ):
1159 ):
1160 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1160 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1161 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1161 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1162 else: # standard vfs
1162 else: # standard vfs
1163 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1163 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1164
1164
1165 self._dirstatevalidatewarned = False
1165 self._dirstatevalidatewarned = False
1166
1166
1167 self._branchcaches = branchmap.BranchMapCache()
1167 self._branchcaches = branchmap.BranchMapCache()
1168 self._revbranchcache = None
1168 self._revbranchcache = None
1169 self._filterpats = {}
1169 self._filterpats = {}
1170 self._datafilters = {}
1170 self._datafilters = {}
1171 self._transref = self._lockref = self._wlockref = None
1171 self._transref = self._lockref = self._wlockref = None
1172
1172
1173 # A cache for various files under .hg/ that tracks file changes,
1173 # A cache for various files under .hg/ that tracks file changes,
1174 # (used by the filecache decorator)
1174 # (used by the filecache decorator)
1175 #
1175 #
1176 # Maps a property name to its util.filecacheentry
1176 # Maps a property name to its util.filecacheentry
1177 self._filecache = {}
1177 self._filecache = {}
1178
1178
1179 # hold sets of revision to be filtered
1179 # hold sets of revision to be filtered
1180 # should be cleared when something might have changed the filter value:
1180 # should be cleared when something might have changed the filter value:
1181 # - new changesets,
1181 # - new changesets,
1182 # - phase change,
1182 # - phase change,
1183 # - new obsolescence marker,
1183 # - new obsolescence marker,
1184 # - working directory parent change,
1184 # - working directory parent change,
1185 # - bookmark changes
1185 # - bookmark changes
1186 self.filteredrevcache = {}
1186 self.filteredrevcache = {}
1187
1187
1188 # post-dirstate-status hooks
1188 # post-dirstate-status hooks
1189 self._postdsstatus = []
1189 self._postdsstatus = []
1190
1190
1191 # generic mapping between names and nodes
1191 # generic mapping between names and nodes
1192 self.names = namespaces.namespaces()
1192 self.names = namespaces.namespaces()
1193
1193
1194 # Key to signature value.
1194 # Key to signature value.
1195 self._sparsesignaturecache = {}
1195 self._sparsesignaturecache = {}
1196 # Signature to cached matcher instance.
1196 # Signature to cached matcher instance.
1197 self._sparsematchercache = {}
1197 self._sparsematchercache = {}
1198
1198
1199 self._extrafilterid = repoview.extrafilter(ui)
1199 self._extrafilterid = repoview.extrafilter(ui)
1200
1200
1201 self.filecopiesmode = None
1201 self.filecopiesmode = None
1202 if COPIESSDC_REQUIREMENT in self.requirements:
1202 if COPIESSDC_REQUIREMENT in self.requirements:
1203 self.filecopiesmode = b'changeset-sidedata'
1203 self.filecopiesmode = b'changeset-sidedata'
1204
1204
1205 def _getvfsward(self, origfunc):
1205 def _getvfsward(self, origfunc):
1206 """build a ward for self.vfs"""
1206 """build a ward for self.vfs"""
1207 rref = weakref.ref(self)
1207 rref = weakref.ref(self)
1208
1208
1209 def checkvfs(path, mode=None):
1209 def checkvfs(path, mode=None):
1210 ret = origfunc(path, mode=mode)
1210 ret = origfunc(path, mode=mode)
1211 repo = rref()
1211 repo = rref()
1212 if (
1212 if (
1213 repo is None
1213 repo is None
1214 or not util.safehasattr(repo, b'_wlockref')
1214 or not util.safehasattr(repo, b'_wlockref')
1215 or not util.safehasattr(repo, b'_lockref')
1215 or not util.safehasattr(repo, b'_lockref')
1216 ):
1216 ):
1217 return
1217 return
1218 if mode in (None, b'r', b'rb'):
1218 if mode in (None, b'r', b'rb'):
1219 return
1219 return
1220 if path.startswith(repo.path):
1220 if path.startswith(repo.path):
1221 # truncate name relative to the repository (.hg)
1221 # truncate name relative to the repository (.hg)
1222 path = path[len(repo.path) + 1 :]
1222 path = path[len(repo.path) + 1 :]
1223 if path.startswith(b'cache/'):
1223 if path.startswith(b'cache/'):
1224 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1224 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1225 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1225 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1226 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1226 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1227 # journal is covered by 'lock'
1227 # journal is covered by 'lock'
1228 if repo._currentlock(repo._lockref) is None:
1228 if repo._currentlock(repo._lockref) is None:
1229 repo.ui.develwarn(
1229 repo.ui.develwarn(
1230 b'write with no lock: "%s"' % path,
1230 b'write with no lock: "%s"' % path,
1231 stacklevel=3,
1231 stacklevel=3,
1232 config=b'check-locks',
1232 config=b'check-locks',
1233 )
1233 )
1234 elif repo._currentlock(repo._wlockref) is None:
1234 elif repo._currentlock(repo._wlockref) is None:
1235 # rest of vfs files are covered by 'wlock'
1235 # rest of vfs files are covered by 'wlock'
1236 #
1236 #
1237 # exclude special files
1237 # exclude special files
1238 for prefix in self._wlockfreeprefix:
1238 for prefix in self._wlockfreeprefix:
1239 if path.startswith(prefix):
1239 if path.startswith(prefix):
1240 return
1240 return
1241 repo.ui.develwarn(
1241 repo.ui.develwarn(
1242 b'write with no wlock: "%s"' % path,
1242 b'write with no wlock: "%s"' % path,
1243 stacklevel=3,
1243 stacklevel=3,
1244 config=b'check-locks',
1244 config=b'check-locks',
1245 )
1245 )
1246 return ret
1246 return ret
1247
1247
1248 return checkvfs
1248 return checkvfs
1249
1249
1250 def _getsvfsward(self, origfunc):
1250 def _getsvfsward(self, origfunc):
1251 """build a ward for self.svfs"""
1251 """build a ward for self.svfs"""
1252 rref = weakref.ref(self)
1252 rref = weakref.ref(self)
1253
1253
1254 def checksvfs(path, mode=None):
1254 def checksvfs(path, mode=None):
1255 ret = origfunc(path, mode=mode)
1255 ret = origfunc(path, mode=mode)
1256 repo = rref()
1256 repo = rref()
1257 if repo is None or not util.safehasattr(repo, b'_lockref'):
1257 if repo is None or not util.safehasattr(repo, b'_lockref'):
1258 return
1258 return
1259 if mode in (None, b'r', b'rb'):
1259 if mode in (None, b'r', b'rb'):
1260 return
1260 return
1261 if path.startswith(repo.sharedpath):
1261 if path.startswith(repo.sharedpath):
1262 # truncate name relative to the repository (.hg)
1262 # truncate name relative to the repository (.hg)
1263 path = path[len(repo.sharedpath) + 1 :]
1263 path = path[len(repo.sharedpath) + 1 :]
1264 if repo._currentlock(repo._lockref) is None:
1264 if repo._currentlock(repo._lockref) is None:
1265 repo.ui.develwarn(
1265 repo.ui.develwarn(
1266 b'write with no lock: "%s"' % path, stacklevel=4
1266 b'write with no lock: "%s"' % path, stacklevel=4
1267 )
1267 )
1268 return ret
1268 return ret
1269
1269
1270 return checksvfs
1270 return checksvfs
1271
1271
1272 def close(self):
1272 def close(self):
1273 self._writecaches()
1273 self._writecaches()
1274
1274
1275 def _writecaches(self):
1275 def _writecaches(self):
1276 if self._revbranchcache:
1276 if self._revbranchcache:
1277 self._revbranchcache.write()
1277 self._revbranchcache.write()
1278
1278
1279 def _restrictcapabilities(self, caps):
1279 def _restrictcapabilities(self, caps):
1280 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1280 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1281 caps = set(caps)
1281 caps = set(caps)
1282 capsblob = bundle2.encodecaps(
1282 capsblob = bundle2.encodecaps(
1283 bundle2.getrepocaps(self, role=b'client')
1283 bundle2.getrepocaps(self, role=b'client')
1284 )
1284 )
1285 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1285 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1286 return caps
1286 return caps
1287
1287
1288 def _writerequirements(self):
1288 def _writerequirements(self):
1289 scmutil.writerequires(self.vfs, self.requirements)
1289 scmutil.writerequires(self.vfs, self.requirements)
1290
1290
1291 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1291 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1292 # self -> auditor -> self._checknested -> self
1292 # self -> auditor -> self._checknested -> self
1293
1293
1294 @property
1294 @property
1295 def auditor(self):
1295 def auditor(self):
1296 # This is only used by context.workingctx.match in order to
1296 # This is only used by context.workingctx.match in order to
1297 # detect files in subrepos.
1297 # detect files in subrepos.
1298 return pathutil.pathauditor(self.root, callback=self._checknested)
1298 return pathutil.pathauditor(self.root, callback=self._checknested)
1299
1299
1300 @property
1300 @property
1301 def nofsauditor(self):
1301 def nofsauditor(self):
1302 # This is only used by context.basectx.match in order to detect
1302 # This is only used by context.basectx.match in order to detect
1303 # files in subrepos.
1303 # files in subrepos.
1304 return pathutil.pathauditor(
1304 return pathutil.pathauditor(
1305 self.root, callback=self._checknested, realfs=False, cached=True
1305 self.root, callback=self._checknested, realfs=False, cached=True
1306 )
1306 )
1307
1307
1308 def _checknested(self, path):
1308 def _checknested(self, path):
1309 """Determine if path is a legal nested repository."""
1309 """Determine if path is a legal nested repository."""
1310 if not path.startswith(self.root):
1310 if not path.startswith(self.root):
1311 return False
1311 return False
1312 subpath = path[len(self.root) + 1 :]
1312 subpath = path[len(self.root) + 1 :]
1313 normsubpath = util.pconvert(subpath)
1313 normsubpath = util.pconvert(subpath)
1314
1314
1315 # XXX: Checking against the current working copy is wrong in
1315 # XXX: Checking against the current working copy is wrong in
1316 # the sense that it can reject things like
1316 # the sense that it can reject things like
1317 #
1317 #
1318 # $ hg cat -r 10 sub/x.txt
1318 # $ hg cat -r 10 sub/x.txt
1319 #
1319 #
1320 # if sub/ is no longer a subrepository in the working copy
1320 # if sub/ is no longer a subrepository in the working copy
1321 # parent revision.
1321 # parent revision.
1322 #
1322 #
1323 # However, it can of course also allow things that would have
1323 # However, it can of course also allow things that would have
1324 # been rejected before, such as the above cat command if sub/
1324 # been rejected before, such as the above cat command if sub/
1325 # is a subrepository now, but was a normal directory before.
1325 # is a subrepository now, but was a normal directory before.
1326 # The old path auditor would have rejected by mistake since it
1326 # The old path auditor would have rejected by mistake since it
1327 # panics when it sees sub/.hg/.
1327 # panics when it sees sub/.hg/.
1328 #
1328 #
1329 # All in all, checking against the working copy seems sensible
1329 # All in all, checking against the working copy seems sensible
1330 # since we want to prevent access to nested repositories on
1330 # since we want to prevent access to nested repositories on
1331 # the filesystem *now*.
1331 # the filesystem *now*.
1332 ctx = self[None]
1332 ctx = self[None]
1333 parts = util.splitpath(subpath)
1333 parts = util.splitpath(subpath)
1334 while parts:
1334 while parts:
1335 prefix = b'/'.join(parts)
1335 prefix = b'/'.join(parts)
1336 if prefix in ctx.substate:
1336 if prefix in ctx.substate:
1337 if prefix == normsubpath:
1337 if prefix == normsubpath:
1338 return True
1338 return True
1339 else:
1339 else:
1340 sub = ctx.sub(prefix)
1340 sub = ctx.sub(prefix)
1341 return sub.checknested(subpath[len(prefix) + 1 :])
1341 return sub.checknested(subpath[len(prefix) + 1 :])
1342 else:
1342 else:
1343 parts.pop()
1343 parts.pop()
1344 return False
1344 return False
1345
1345
1346 def peer(self):
1346 def peer(self):
1347 return localpeer(self) # not cached to avoid reference cycle
1347 return localpeer(self) # not cached to avoid reference cycle
1348
1348
1349 def unfiltered(self):
1349 def unfiltered(self):
1350 """Return unfiltered version of the repository
1350 """Return unfiltered version of the repository
1351
1351
1352 Intended to be overwritten by filtered repo."""
1352 Intended to be overwritten by filtered repo."""
1353 return self
1353 return self
1354
1354
1355 def filtered(self, name, visibilityexceptions=None):
1355 def filtered(self, name, visibilityexceptions=None):
1356 """Return a filtered version of a repository
1356 """Return a filtered version of a repository
1357
1357
1358 The `name` parameter is the identifier of the requested view. This
1358 The `name` parameter is the identifier of the requested view. This
1359 will return a repoview object set "exactly" to the specified view.
1359 will return a repoview object set "exactly" to the specified view.
1360
1360
1361 This function does not apply recursive filtering to a repository. For
1361 This function does not apply recursive filtering to a repository. For
1362 example calling `repo.filtered("served")` will return a repoview using
1362 example calling `repo.filtered("served")` will return a repoview using
1363 the "served" view, regardless of the initial view used by `repo`.
1363 the "served" view, regardless of the initial view used by `repo`.
1364
1364
1365 In other word, there is always only one level of `repoview` "filtering".
1365 In other word, there is always only one level of `repoview` "filtering".
1366 """
1366 """
1367 if self._extrafilterid is not None and b'%' not in name:
1367 if self._extrafilterid is not None and b'%' not in name:
1368 name = name + b'%' + self._extrafilterid
1368 name = name + b'%' + self._extrafilterid
1369
1369
1370 cls = repoview.newtype(self.unfiltered().__class__)
1370 cls = repoview.newtype(self.unfiltered().__class__)
1371 return cls(self, name, visibilityexceptions)
1371 return cls(self, name, visibilityexceptions)
1372
1372
1373 @mixedrepostorecache(
1373 @mixedrepostorecache(
1374 (b'bookmarks', b'plain'),
1374 (b'bookmarks', b'plain'),
1375 (b'bookmarks.current', b'plain'),
1375 (b'bookmarks.current', b'plain'),
1376 (b'bookmarks', b''),
1376 (b'bookmarks', b''),
1377 (b'00changelog.i', b''),
1377 (b'00changelog.i', b''),
1378 )
1378 )
1379 def _bookmarks(self):
1379 def _bookmarks(self):
1380 # Since the multiple files involved in the transaction cannot be
1380 # Since the multiple files involved in the transaction cannot be
1381 # written atomically (with current repository format), there is a race
1381 # written atomically (with current repository format), there is a race
1382 # condition here.
1382 # condition here.
1383 #
1383 #
1384 # 1) changelog content A is read
1384 # 1) changelog content A is read
1385 # 2) outside transaction update changelog to content B
1385 # 2) outside transaction update changelog to content B
1386 # 3) outside transaction update bookmark file referring to content B
1386 # 3) outside transaction update bookmark file referring to content B
1387 # 4) bookmarks file content is read and filtered against changelog-A
1387 # 4) bookmarks file content is read and filtered against changelog-A
1388 #
1388 #
1389 # When this happens, bookmarks against nodes missing from A are dropped.
1389 # When this happens, bookmarks against nodes missing from A are dropped.
1390 #
1390 #
1391 # Having this happening during read is not great, but it become worse
1391 # Having this happening during read is not great, but it become worse
1392 # when this happen during write because the bookmarks to the "unknown"
1392 # when this happen during write because the bookmarks to the "unknown"
1393 # nodes will be dropped for good. However, writes happen within locks.
1393 # nodes will be dropped for good. However, writes happen within locks.
1394 # This locking makes it possible to have a race free consistent read.
1394 # This locking makes it possible to have a race free consistent read.
1395 # For this purpose data read from disc before locking are
1395 # For this purpose data read from disc before locking are
1396 # "invalidated" right after the locks are taken. This invalidations are
1396 # "invalidated" right after the locks are taken. This invalidations are
1397 # "light", the `filecache` mechanism keep the data in memory and will
1397 # "light", the `filecache` mechanism keep the data in memory and will
1398 # reuse them if the underlying files did not changed. Not parsing the
1398 # reuse them if the underlying files did not changed. Not parsing the
1399 # same data multiple times helps performances.
1399 # same data multiple times helps performances.
1400 #
1400 #
1401 # Unfortunately in the case describe above, the files tracked by the
1401 # Unfortunately in the case describe above, the files tracked by the
1402 # bookmarks file cache might not have changed, but the in-memory
1402 # bookmarks file cache might not have changed, but the in-memory
1403 # content is still "wrong" because we used an older changelog content
1403 # content is still "wrong" because we used an older changelog content
1404 # to process the on-disk data. So after locking, the changelog would be
1404 # to process the on-disk data. So after locking, the changelog would be
1405 # refreshed but `_bookmarks` would be preserved.
1405 # refreshed but `_bookmarks` would be preserved.
1406 # Adding `00changelog.i` to the list of tracked file is not
1406 # Adding `00changelog.i` to the list of tracked file is not
1407 # enough, because at the time we build the content for `_bookmarks` in
1407 # enough, because at the time we build the content for `_bookmarks` in
1408 # (4), the changelog file has already diverged from the content used
1408 # (4), the changelog file has already diverged from the content used
1409 # for loading `changelog` in (1)
1409 # for loading `changelog` in (1)
1410 #
1410 #
1411 # To prevent the issue, we force the changelog to be explicitly
1411 # To prevent the issue, we force the changelog to be explicitly
1412 # reloaded while computing `_bookmarks`. The data race can still happen
1412 # reloaded while computing `_bookmarks`. The data race can still happen
1413 # without the lock (with a narrower window), but it would no longer go
1413 # without the lock (with a narrower window), but it would no longer go
1414 # undetected during the lock time refresh.
1414 # undetected during the lock time refresh.
1415 #
1415 #
1416 # The new schedule is as follow
1416 # The new schedule is as follow
1417 #
1417 #
1418 # 1) filecache logic detect that `_bookmarks` needs to be computed
1418 # 1) filecache logic detect that `_bookmarks` needs to be computed
1419 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1419 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1420 # 3) We force `changelog` filecache to be tested
1420 # 3) We force `changelog` filecache to be tested
1421 # 4) cachestat for `changelog` are captured (for changelog)
1421 # 4) cachestat for `changelog` are captured (for changelog)
1422 # 5) `_bookmarks` is computed and cached
1422 # 5) `_bookmarks` is computed and cached
1423 #
1423 #
1424 # The step in (3) ensure we have a changelog at least as recent as the
1424 # The step in (3) ensure we have a changelog at least as recent as the
1425 # cache stat computed in (1). As a result at locking time:
1425 # cache stat computed in (1). As a result at locking time:
1426 # * if the changelog did not changed since (1) -> we can reuse the data
1426 # * if the changelog did not changed since (1) -> we can reuse the data
1427 # * otherwise -> the bookmarks get refreshed.
1427 # * otherwise -> the bookmarks get refreshed.
1428 self._refreshchangelog()
1428 self._refreshchangelog()
1429 return bookmarks.bmstore(self)
1429 return bookmarks.bmstore(self)
1430
1430
1431 def _refreshchangelog(self):
1431 def _refreshchangelog(self):
1432 """make sure the in memory changelog match the on-disk one"""
1432 """make sure the in memory changelog match the on-disk one"""
1433 if 'changelog' in vars(self) and self.currenttransaction() is None:
1433 if 'changelog' in vars(self) and self.currenttransaction() is None:
1434 del self.changelog
1434 del self.changelog
1435
1435
1436 @property
1436 @property
1437 def _activebookmark(self):
1437 def _activebookmark(self):
1438 return self._bookmarks.active
1438 return self._bookmarks.active
1439
1439
1440 # _phasesets depend on changelog. what we need is to call
1440 # _phasesets depend on changelog. what we need is to call
1441 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1441 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1442 # can't be easily expressed in filecache mechanism.
1442 # can't be easily expressed in filecache mechanism.
1443 @storecache(b'phaseroots', b'00changelog.i')
1443 @storecache(b'phaseroots', b'00changelog.i')
1444 def _phasecache(self):
1444 def _phasecache(self):
1445 return phases.phasecache(self, self._phasedefaults)
1445 return phases.phasecache(self, self._phasedefaults)
1446
1446
1447 @storecache(b'obsstore')
1447 @storecache(b'obsstore')
1448 def obsstore(self):
1448 def obsstore(self):
1449 return obsolete.makestore(self.ui, self)
1449 return obsolete.makestore(self.ui, self)
1450
1450
1451 @storecache(b'00changelog.i')
1451 @storecache(b'00changelog.i')
1452 def changelog(self):
1452 def changelog(self):
1453 return self.store.changelog(txnutil.mayhavepending(self.root))
1453 return self.store.changelog(txnutil.mayhavepending(self.root))
1454
1454
1455 @storecache(b'00manifest.i')
1455 @storecache(b'00manifest.i')
1456 def manifestlog(self):
1456 def manifestlog(self):
1457 return self.store.manifestlog(self, self._storenarrowmatch)
1457 return self.store.manifestlog(self, self._storenarrowmatch)
1458
1458
1459 @repofilecache(b'dirstate')
1459 @repofilecache(b'dirstate')
1460 def dirstate(self):
1460 def dirstate(self):
1461 return self._makedirstate()
1461 return self._makedirstate()
1462
1462
1463 def _makedirstate(self):
1463 def _makedirstate(self):
1464 """Extension point for wrapping the dirstate per-repo."""
1464 """Extension point for wrapping the dirstate per-repo."""
1465 sparsematchfn = lambda: sparse.matcher(self)
1465 sparsematchfn = lambda: sparse.matcher(self)
1466
1466
1467 return dirstate.dirstate(
1467 return dirstate.dirstate(
1468 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1468 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1469 )
1469 )
1470
1470
1471 def _dirstatevalidate(self, node):
1471 def _dirstatevalidate(self, node):
1472 try:
1472 try:
1473 self.changelog.rev(node)
1473 self.changelog.rev(node)
1474 return node
1474 return node
1475 except error.LookupError:
1475 except error.LookupError:
1476 if not self._dirstatevalidatewarned:
1476 if not self._dirstatevalidatewarned:
1477 self._dirstatevalidatewarned = True
1477 self._dirstatevalidatewarned = True
1478 self.ui.warn(
1478 self.ui.warn(
1479 _(b"warning: ignoring unknown working parent %s!\n")
1479 _(b"warning: ignoring unknown working parent %s!\n")
1480 % short(node)
1480 % short(node)
1481 )
1481 )
1482 return nullid
1482 return nullid
1483
1483
1484 @storecache(narrowspec.FILENAME)
1484 @storecache(narrowspec.FILENAME)
1485 def narrowpats(self):
1485 def narrowpats(self):
1486 """matcher patterns for this repository's narrowspec
1486 """matcher patterns for this repository's narrowspec
1487
1487
1488 A tuple of (includes, excludes).
1488 A tuple of (includes, excludes).
1489 """
1489 """
1490 return narrowspec.load(self)
1490 return narrowspec.load(self)
1491
1491
1492 @storecache(narrowspec.FILENAME)
1492 @storecache(narrowspec.FILENAME)
1493 def _storenarrowmatch(self):
1493 def _storenarrowmatch(self):
1494 if repository.NARROW_REQUIREMENT not in self.requirements:
1494 if repository.NARROW_REQUIREMENT not in self.requirements:
1495 return matchmod.always()
1495 return matchmod.always()
1496 include, exclude = self.narrowpats
1496 include, exclude = self.narrowpats
1497 return narrowspec.match(self.root, include=include, exclude=exclude)
1497 return narrowspec.match(self.root, include=include, exclude=exclude)
1498
1498
1499 @storecache(narrowspec.FILENAME)
1499 @storecache(narrowspec.FILENAME)
1500 def _narrowmatch(self):
1500 def _narrowmatch(self):
1501 if repository.NARROW_REQUIREMENT not in self.requirements:
1501 if repository.NARROW_REQUIREMENT not in self.requirements:
1502 return matchmod.always()
1502 return matchmod.always()
1503 narrowspec.checkworkingcopynarrowspec(self)
1503 narrowspec.checkworkingcopynarrowspec(self)
1504 include, exclude = self.narrowpats
1504 include, exclude = self.narrowpats
1505 return narrowspec.match(self.root, include=include, exclude=exclude)
1505 return narrowspec.match(self.root, include=include, exclude=exclude)
1506
1506
1507 def narrowmatch(self, match=None, includeexact=False):
1507 def narrowmatch(self, match=None, includeexact=False):
1508 """matcher corresponding the the repo's narrowspec
1508 """matcher corresponding the the repo's narrowspec
1509
1509
1510 If `match` is given, then that will be intersected with the narrow
1510 If `match` is given, then that will be intersected with the narrow
1511 matcher.
1511 matcher.
1512
1512
1513 If `includeexact` is True, then any exact matches from `match` will
1513 If `includeexact` is True, then any exact matches from `match` will
1514 be included even if they're outside the narrowspec.
1514 be included even if they're outside the narrowspec.
1515 """
1515 """
1516 if match:
1516 if match:
1517 if includeexact and not self._narrowmatch.always():
1517 if includeexact and not self._narrowmatch.always():
1518 # do not exclude explicitly-specified paths so that they can
1518 # do not exclude explicitly-specified paths so that they can
1519 # be warned later on
1519 # be warned later on
1520 em = matchmod.exact(match.files())
1520 em = matchmod.exact(match.files())
1521 nm = matchmod.unionmatcher([self._narrowmatch, em])
1521 nm = matchmod.unionmatcher([self._narrowmatch, em])
1522 return matchmod.intersectmatchers(match, nm)
1522 return matchmod.intersectmatchers(match, nm)
1523 return matchmod.intersectmatchers(match, self._narrowmatch)
1523 return matchmod.intersectmatchers(match, self._narrowmatch)
1524 return self._narrowmatch
1524 return self._narrowmatch
1525
1525
1526 def setnarrowpats(self, newincludes, newexcludes):
1526 def setnarrowpats(self, newincludes, newexcludes):
1527 narrowspec.save(self, newincludes, newexcludes)
1527 narrowspec.save(self, newincludes, newexcludes)
1528 self.invalidate(clearfilecache=True)
1528 self.invalidate(clearfilecache=True)
1529
1529
1530 @unfilteredpropertycache
1530 @unfilteredpropertycache
1531 def _quick_access_changeid_null(self):
1531 def _quick_access_changeid_null(self):
1532 return {
1532 return {
1533 b'null': (nullrev, nullid),
1533 b'null': (nullrev, nullid),
1534 nullrev: (nullrev, nullid),
1534 nullrev: (nullrev, nullid),
1535 nullid: (nullrev, nullid),
1535 nullid: (nullrev, nullid),
1536 }
1536 }
1537
1537
1538 @unfilteredpropertycache
1538 @unfilteredpropertycache
1539 def _quick_access_changeid_wc(self):
1539 def _quick_access_changeid_wc(self):
1540 # also fast path access to the working copy parents
1540 # also fast path access to the working copy parents
1541 # however, only do it for filter that ensure wc is visible.
1541 # however, only do it for filter that ensure wc is visible.
1542 quick = {}
1542 quick = {}
1543 cl = self.unfiltered().changelog
1543 cl = self.unfiltered().changelog
1544 for node in self.dirstate.parents():
1544 for node in self.dirstate.parents():
1545 if node == nullid:
1545 if node == nullid:
1546 continue
1546 continue
1547 rev = cl.index.get_rev(node)
1547 rev = cl.index.get_rev(node)
1548 if rev is None:
1548 if rev is None:
1549 # unknown working copy parent case:
1549 # unknown working copy parent case:
1550 #
1550 #
1551 # skip the fast path and let higher code deal with it
1551 # skip the fast path and let higher code deal with it
1552 continue
1552 continue
1553 pair = (rev, node)
1553 pair = (rev, node)
1554 quick[rev] = pair
1554 quick[rev] = pair
1555 quick[node] = pair
1555 quick[node] = pair
1556 # also add the parents of the parents
1556 # also add the parents of the parents
1557 for r in cl.parentrevs(rev):
1557 for r in cl.parentrevs(rev):
1558 if r == nullrev:
1558 if r == nullrev:
1559 continue
1559 continue
1560 n = cl.node(r)
1560 n = cl.node(r)
1561 pair = (r, n)
1561 pair = (r, n)
1562 quick[r] = pair
1562 quick[r] = pair
1563 quick[n] = pair
1563 quick[n] = pair
1564 p1node = self.dirstate.p1()
1564 p1node = self.dirstate.p1()
1565 if p1node != nullid:
1565 if p1node != nullid:
1566 quick[b'.'] = quick[p1node]
1566 quick[b'.'] = quick[p1node]
1567 return quick
1567 return quick
1568
1568
1569 @unfilteredmethod
1569 @unfilteredmethod
1570 def _quick_access_changeid_invalidate(self):
1570 def _quick_access_changeid_invalidate(self):
1571 if '_quick_access_changeid_wc' in vars(self):
1571 if '_quick_access_changeid_wc' in vars(self):
1572 del self.__dict__['_quick_access_changeid_wc']
1572 del self.__dict__['_quick_access_changeid_wc']
1573
1573
1574 @property
1574 @property
1575 def _quick_access_changeid(self):
1575 def _quick_access_changeid(self):
1576 """an helper dictionnary for __getitem__ calls
1576 """an helper dictionnary for __getitem__ calls
1577
1577
1578 This contains a list of symbol we can recognise right away without
1578 This contains a list of symbol we can recognise right away without
1579 further processing.
1579 further processing.
1580 """
1580 """
1581 mapping = self._quick_access_changeid_null
1581 mapping = self._quick_access_changeid_null
1582 if self.filtername in repoview.filter_has_wc:
1582 if self.filtername in repoview.filter_has_wc:
1583 mapping = mapping.copy()
1583 mapping = mapping.copy()
1584 mapping.update(self._quick_access_changeid_wc)
1584 mapping.update(self._quick_access_changeid_wc)
1585 return mapping
1585 return mapping
1586
1586
1587 def __getitem__(self, changeid):
1587 def __getitem__(self, changeid):
1588 # dealing with special cases
1588 # dealing with special cases
1589 if changeid is None:
1589 if changeid is None:
1590 return context.workingctx(self)
1590 return context.workingctx(self)
1591 if isinstance(changeid, context.basectx):
1591 if isinstance(changeid, context.basectx):
1592 return changeid
1592 return changeid
1593
1593
1594 # dealing with multiple revisions
1594 # dealing with multiple revisions
1595 if isinstance(changeid, slice):
1595 if isinstance(changeid, slice):
1596 # wdirrev isn't contiguous so the slice shouldn't include it
1596 # wdirrev isn't contiguous so the slice shouldn't include it
1597 return [
1597 return [
1598 self[i]
1598 self[i]
1599 for i in pycompat.xrange(*changeid.indices(len(self)))
1599 for i in pycompat.xrange(*changeid.indices(len(self)))
1600 if i not in self.changelog.filteredrevs
1600 if i not in self.changelog.filteredrevs
1601 ]
1601 ]
1602
1602
1603 # dealing with some special values
1603 # dealing with some special values
1604 quick_access = self._quick_access_changeid.get(changeid)
1604 quick_access = self._quick_access_changeid.get(changeid)
1605 if quick_access is not None:
1605 if quick_access is not None:
1606 rev, node = quick_access
1606 rev, node = quick_access
1607 return context.changectx(self, rev, node, maybe_filtered=False)
1607 return context.changectx(self, rev, node, maybe_filtered=False)
1608 if changeid == b'tip':
1608 if changeid == b'tip':
1609 node = self.changelog.tip()
1609 node = self.changelog.tip()
1610 rev = self.changelog.rev(node)
1610 rev = self.changelog.rev(node)
1611 return context.changectx(self, rev, node)
1611 return context.changectx(self, rev, node)
1612
1612
1613 # dealing with arbitrary values
1613 # dealing with arbitrary values
1614 try:
1614 try:
1615 if isinstance(changeid, int):
1615 if isinstance(changeid, int):
1616 node = self.changelog.node(changeid)
1616 node = self.changelog.node(changeid)
1617 rev = changeid
1617 rev = changeid
1618 elif changeid == b'.':
1618 elif changeid == b'.':
1619 # this is a hack to delay/avoid loading obsmarkers
1619 # this is a hack to delay/avoid loading obsmarkers
1620 # when we know that '.' won't be hidden
1620 # when we know that '.' won't be hidden
1621 node = self.dirstate.p1()
1621 node = self.dirstate.p1()
1622 rev = self.unfiltered().changelog.rev(node)
1622 rev = self.unfiltered().changelog.rev(node)
1623 elif len(changeid) == 20:
1623 elif len(changeid) == 20:
1624 try:
1624 try:
1625 node = changeid
1625 node = changeid
1626 rev = self.changelog.rev(changeid)
1626 rev = self.changelog.rev(changeid)
1627 except error.FilteredLookupError:
1627 except error.FilteredLookupError:
1628 changeid = hex(changeid) # for the error message
1628 changeid = hex(changeid) # for the error message
1629 raise
1629 raise
1630 except LookupError:
1630 except LookupError:
1631 # check if it might have come from damaged dirstate
1631 # check if it might have come from damaged dirstate
1632 #
1632 #
1633 # XXX we could avoid the unfiltered if we had a recognizable
1633 # XXX we could avoid the unfiltered if we had a recognizable
1634 # exception for filtered changeset access
1634 # exception for filtered changeset access
1635 if (
1635 if (
1636 self.local()
1636 self.local()
1637 and changeid in self.unfiltered().dirstate.parents()
1637 and changeid in self.unfiltered().dirstate.parents()
1638 ):
1638 ):
1639 msg = _(b"working directory has unknown parent '%s'!")
1639 msg = _(b"working directory has unknown parent '%s'!")
1640 raise error.Abort(msg % short(changeid))
1640 raise error.Abort(msg % short(changeid))
1641 changeid = hex(changeid) # for the error message
1641 changeid = hex(changeid) # for the error message
1642 raise
1642 raise
1643
1643
1644 elif len(changeid) == 40:
1644 elif len(changeid) == 40:
1645 node = bin(changeid)
1645 node = bin(changeid)
1646 rev = self.changelog.rev(node)
1646 rev = self.changelog.rev(node)
1647 else:
1647 else:
1648 raise error.ProgrammingError(
1648 raise error.ProgrammingError(
1649 b"unsupported changeid '%s' of type %s"
1649 b"unsupported changeid '%s' of type %s"
1650 % (changeid, pycompat.bytestr(type(changeid)))
1650 % (changeid, pycompat.bytestr(type(changeid)))
1651 )
1651 )
1652
1652
1653 return context.changectx(self, rev, node)
1653 return context.changectx(self, rev, node)
1654
1654
1655 except (error.FilteredIndexError, error.FilteredLookupError):
1655 except (error.FilteredIndexError, error.FilteredLookupError):
1656 raise error.FilteredRepoLookupError(
1656 raise error.FilteredRepoLookupError(
1657 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1657 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1658 )
1658 )
1659 except (IndexError, LookupError):
1659 except (IndexError, LookupError):
1660 raise error.RepoLookupError(
1660 raise error.RepoLookupError(
1661 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1661 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1662 )
1662 )
1663 except error.WdirUnsupported:
1663 except error.WdirUnsupported:
1664 return context.workingctx(self)
1664 return context.workingctx(self)
1665
1665
1666 def __contains__(self, changeid):
1666 def __contains__(self, changeid):
1667 """True if the given changeid exists
1667 """True if the given changeid exists
1668
1668
1669 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1669 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1670 specified.
1670 specified.
1671 """
1671 """
1672 try:
1672 try:
1673 self[changeid]
1673 self[changeid]
1674 return True
1674 return True
1675 except error.RepoLookupError:
1675 except error.RepoLookupError:
1676 return False
1676 return False
1677
1677
1678 def __nonzero__(self):
1678 def __nonzero__(self):
1679 return True
1679 return True
1680
1680
1681 __bool__ = __nonzero__
1681 __bool__ = __nonzero__
1682
1682
1683 def __len__(self):
1683 def __len__(self):
1684 # no need to pay the cost of repoview.changelog
1684 # no need to pay the cost of repoview.changelog
1685 unfi = self.unfiltered()
1685 unfi = self.unfiltered()
1686 return len(unfi.changelog)
1686 return len(unfi.changelog)
1687
1687
1688 def __iter__(self):
1688 def __iter__(self):
1689 return iter(self.changelog)
1689 return iter(self.changelog)
1690
1690
1691 def revs(self, expr, *args):
1691 def revs(self, expr, *args):
1692 '''Find revisions matching a revset.
1692 '''Find revisions matching a revset.
1693
1693
1694 The revset is specified as a string ``expr`` that may contain
1694 The revset is specified as a string ``expr`` that may contain
1695 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1695 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1696
1696
1697 Revset aliases from the configuration are not expanded. To expand
1697 Revset aliases from the configuration are not expanded. To expand
1698 user aliases, consider calling ``scmutil.revrange()`` or
1698 user aliases, consider calling ``scmutil.revrange()`` or
1699 ``repo.anyrevs([expr], user=True)``.
1699 ``repo.anyrevs([expr], user=True)``.
1700
1700
1701 Returns a smartset.abstractsmartset, which is a list-like interface
1701 Returns a smartset.abstractsmartset, which is a list-like interface
1702 that contains integer revisions.
1702 that contains integer revisions.
1703 '''
1703 '''
1704 tree = revsetlang.spectree(expr, *args)
1704 tree = revsetlang.spectree(expr, *args)
1705 return revset.makematcher(tree)(self)
1705 return revset.makematcher(tree)(self)
1706
1706
1707 def set(self, expr, *args):
1707 def set(self, expr, *args):
1708 '''Find revisions matching a revset and emit changectx instances.
1708 '''Find revisions matching a revset and emit changectx instances.
1709
1709
1710 This is a convenience wrapper around ``revs()`` that iterates the
1710 This is a convenience wrapper around ``revs()`` that iterates the
1711 result and is a generator of changectx instances.
1711 result and is a generator of changectx instances.
1712
1712
1713 Revset aliases from the configuration are not expanded. To expand
1713 Revset aliases from the configuration are not expanded. To expand
1714 user aliases, consider calling ``scmutil.revrange()``.
1714 user aliases, consider calling ``scmutil.revrange()``.
1715 '''
1715 '''
1716 for r in self.revs(expr, *args):
1716 for r in self.revs(expr, *args):
1717 yield self[r]
1717 yield self[r]
1718
1718
1719 def anyrevs(self, specs, user=False, localalias=None):
1719 def anyrevs(self, specs, user=False, localalias=None):
1720 '''Find revisions matching one of the given revsets.
1720 '''Find revisions matching one of the given revsets.
1721
1721
1722 Revset aliases from the configuration are not expanded by default. To
1722 Revset aliases from the configuration are not expanded by default. To
1723 expand user aliases, specify ``user=True``. To provide some local
1723 expand user aliases, specify ``user=True``. To provide some local
1724 definitions overriding user aliases, set ``localalias`` to
1724 definitions overriding user aliases, set ``localalias`` to
1725 ``{name: definitionstring}``.
1725 ``{name: definitionstring}``.
1726 '''
1726 '''
1727 if specs == [b'null']:
1727 if specs == [b'null']:
1728 return revset.baseset([nullrev])
1728 return revset.baseset([nullrev])
1729 if specs == [b'.']:
1729 if specs == [b'.']:
1730 quick_data = self._quick_access_changeid.get(b'.')
1730 quick_data = self._quick_access_changeid.get(b'.')
1731 if quick_data is not None:
1731 if quick_data is not None:
1732 return revset.baseset([quick_data[0]])
1732 return revset.baseset([quick_data[0]])
1733 if user:
1733 if user:
1734 m = revset.matchany(
1734 m = revset.matchany(
1735 self.ui,
1735 self.ui,
1736 specs,
1736 specs,
1737 lookup=revset.lookupfn(self),
1737 lookup=revset.lookupfn(self),
1738 localalias=localalias,
1738 localalias=localalias,
1739 )
1739 )
1740 else:
1740 else:
1741 m = revset.matchany(None, specs, localalias=localalias)
1741 m = revset.matchany(None, specs, localalias=localalias)
1742 return m(self)
1742 return m(self)
1743
1743
1744 def url(self):
1744 def url(self):
1745 return b'file:' + self.root
1745 return b'file:' + self.root
1746
1746
1747 def hook(self, name, throw=False, **args):
1747 def hook(self, name, throw=False, **args):
1748 """Call a hook, passing this repo instance.
1748 """Call a hook, passing this repo instance.
1749
1749
1750 This a convenience method to aid invoking hooks. Extensions likely
1750 This a convenience method to aid invoking hooks. Extensions likely
1751 won't call this unless they have registered a custom hook or are
1751 won't call this unless they have registered a custom hook or are
1752 replacing code that is expected to call a hook.
1752 replacing code that is expected to call a hook.
1753 """
1753 """
1754 return hook.hook(self.ui, self, name, throw, **args)
1754 return hook.hook(self.ui, self, name, throw, **args)
1755
1755
1756 @filteredpropertycache
1756 @filteredpropertycache
1757 def _tagscache(self):
1757 def _tagscache(self):
1758 '''Returns a tagscache object that contains various tags related
1758 '''Returns a tagscache object that contains various tags related
1759 caches.'''
1759 caches.'''
1760
1760
1761 # This simplifies its cache management by having one decorated
1761 # This simplifies its cache management by having one decorated
1762 # function (this one) and the rest simply fetch things from it.
1762 # function (this one) and the rest simply fetch things from it.
1763 class tagscache(object):
1763 class tagscache(object):
1764 def __init__(self):
1764 def __init__(self):
1765 # These two define the set of tags for this repository. tags
1765 # These two define the set of tags for this repository. tags
1766 # maps tag name to node; tagtypes maps tag name to 'global' or
1766 # maps tag name to node; tagtypes maps tag name to 'global' or
1767 # 'local'. (Global tags are defined by .hgtags across all
1767 # 'local'. (Global tags are defined by .hgtags across all
1768 # heads, and local tags are defined in .hg/localtags.)
1768 # heads, and local tags are defined in .hg/localtags.)
1769 # They constitute the in-memory cache of tags.
1769 # They constitute the in-memory cache of tags.
1770 self.tags = self.tagtypes = None
1770 self.tags = self.tagtypes = None
1771
1771
1772 self.nodetagscache = self.tagslist = None
1772 self.nodetagscache = self.tagslist = None
1773
1773
1774 cache = tagscache()
1774 cache = tagscache()
1775 cache.tags, cache.tagtypes = self._findtags()
1775 cache.tags, cache.tagtypes = self._findtags()
1776
1776
1777 return cache
1777 return cache
1778
1778
1779 def tags(self):
1779 def tags(self):
1780 '''return a mapping of tag to node'''
1780 '''return a mapping of tag to node'''
1781 t = {}
1781 t = {}
1782 if self.changelog.filteredrevs:
1782 if self.changelog.filteredrevs:
1783 tags, tt = self._findtags()
1783 tags, tt = self._findtags()
1784 else:
1784 else:
1785 tags = self._tagscache.tags
1785 tags = self._tagscache.tags
1786 rev = self.changelog.rev
1786 rev = self.changelog.rev
1787 for k, v in pycompat.iteritems(tags):
1787 for k, v in pycompat.iteritems(tags):
1788 try:
1788 try:
1789 # ignore tags to unknown nodes
1789 # ignore tags to unknown nodes
1790 rev(v)
1790 rev(v)
1791 t[k] = v
1791 t[k] = v
1792 except (error.LookupError, ValueError):
1792 except (error.LookupError, ValueError):
1793 pass
1793 pass
1794 return t
1794 return t
1795
1795
1796 def _findtags(self):
1796 def _findtags(self):
1797 '''Do the hard work of finding tags. Return a pair of dicts
1797 '''Do the hard work of finding tags. Return a pair of dicts
1798 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1798 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1799 maps tag name to a string like \'global\' or \'local\'.
1799 maps tag name to a string like \'global\' or \'local\'.
1800 Subclasses or extensions are free to add their own tags, but
1800 Subclasses or extensions are free to add their own tags, but
1801 should be aware that the returned dicts will be retained for the
1801 should be aware that the returned dicts will be retained for the
1802 duration of the localrepo object.'''
1802 duration of the localrepo object.'''
1803
1803
1804 # XXX what tagtype should subclasses/extensions use? Currently
1804 # XXX what tagtype should subclasses/extensions use? Currently
1805 # mq and bookmarks add tags, but do not set the tagtype at all.
1805 # mq and bookmarks add tags, but do not set the tagtype at all.
1806 # Should each extension invent its own tag type? Should there
1806 # Should each extension invent its own tag type? Should there
1807 # be one tagtype for all such "virtual" tags? Or is the status
1807 # be one tagtype for all such "virtual" tags? Or is the status
1808 # quo fine?
1808 # quo fine?
1809
1809
1810 # map tag name to (node, hist)
1810 # map tag name to (node, hist)
1811 alltags = tagsmod.findglobaltags(self.ui, self)
1811 alltags = tagsmod.findglobaltags(self.ui, self)
1812 # map tag name to tag type
1812 # map tag name to tag type
1813 tagtypes = {tag: b'global' for tag in alltags}
1813 tagtypes = {tag: b'global' for tag in alltags}
1814
1814
1815 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1815 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1816
1816
1817 # Build the return dicts. Have to re-encode tag names because
1817 # Build the return dicts. Have to re-encode tag names because
1818 # the tags module always uses UTF-8 (in order not to lose info
1818 # the tags module always uses UTF-8 (in order not to lose info
1819 # writing to the cache), but the rest of Mercurial wants them in
1819 # writing to the cache), but the rest of Mercurial wants them in
1820 # local encoding.
1820 # local encoding.
1821 tags = {}
1821 tags = {}
1822 for (name, (node, hist)) in pycompat.iteritems(alltags):
1822 for (name, (node, hist)) in pycompat.iteritems(alltags):
1823 if node != nullid:
1823 if node != nullid:
1824 tags[encoding.tolocal(name)] = node
1824 tags[encoding.tolocal(name)] = node
1825 tags[b'tip'] = self.changelog.tip()
1825 tags[b'tip'] = self.changelog.tip()
1826 tagtypes = {
1826 tagtypes = {
1827 encoding.tolocal(name): value
1827 encoding.tolocal(name): value
1828 for (name, value) in pycompat.iteritems(tagtypes)
1828 for (name, value) in pycompat.iteritems(tagtypes)
1829 }
1829 }
1830 return (tags, tagtypes)
1830 return (tags, tagtypes)
1831
1831
1832 def tagtype(self, tagname):
1832 def tagtype(self, tagname):
1833 '''
1833 '''
1834 return the type of the given tag. result can be:
1834 return the type of the given tag. result can be:
1835
1835
1836 'local' : a local tag
1836 'local' : a local tag
1837 'global' : a global tag
1837 'global' : a global tag
1838 None : tag does not exist
1838 None : tag does not exist
1839 '''
1839 '''
1840
1840
1841 return self._tagscache.tagtypes.get(tagname)
1841 return self._tagscache.tagtypes.get(tagname)
1842
1842
1843 def tagslist(self):
1843 def tagslist(self):
1844 '''return a list of tags ordered by revision'''
1844 '''return a list of tags ordered by revision'''
1845 if not self._tagscache.tagslist:
1845 if not self._tagscache.tagslist:
1846 l = []
1846 l = []
1847 for t, n in pycompat.iteritems(self.tags()):
1847 for t, n in pycompat.iteritems(self.tags()):
1848 l.append((self.changelog.rev(n), t, n))
1848 l.append((self.changelog.rev(n), t, n))
1849 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1849 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1850
1850
1851 return self._tagscache.tagslist
1851 return self._tagscache.tagslist
1852
1852
1853 def nodetags(self, node):
1853 def nodetags(self, node):
1854 '''return the tags associated with a node'''
1854 '''return the tags associated with a node'''
1855 if not self._tagscache.nodetagscache:
1855 if not self._tagscache.nodetagscache:
1856 nodetagscache = {}
1856 nodetagscache = {}
1857 for t, n in pycompat.iteritems(self._tagscache.tags):
1857 for t, n in pycompat.iteritems(self._tagscache.tags):
1858 nodetagscache.setdefault(n, []).append(t)
1858 nodetagscache.setdefault(n, []).append(t)
1859 for tags in pycompat.itervalues(nodetagscache):
1859 for tags in pycompat.itervalues(nodetagscache):
1860 tags.sort()
1860 tags.sort()
1861 self._tagscache.nodetagscache = nodetagscache
1861 self._tagscache.nodetagscache = nodetagscache
1862 return self._tagscache.nodetagscache.get(node, [])
1862 return self._tagscache.nodetagscache.get(node, [])
1863
1863
1864 def nodebookmarks(self, node):
1864 def nodebookmarks(self, node):
1865 """return the list of bookmarks pointing to the specified node"""
1865 """return the list of bookmarks pointing to the specified node"""
1866 return self._bookmarks.names(node)
1866 return self._bookmarks.names(node)
1867
1867
1868 def branchmap(self):
1868 def branchmap(self):
1869 '''returns a dictionary {branch: [branchheads]} with branchheads
1869 '''returns a dictionary {branch: [branchheads]} with branchheads
1870 ordered by increasing revision number'''
1870 ordered by increasing revision number'''
1871 return self._branchcaches[self]
1871 return self._branchcaches[self]
1872
1872
1873 @unfilteredmethod
1873 @unfilteredmethod
1874 def revbranchcache(self):
1874 def revbranchcache(self):
1875 if not self._revbranchcache:
1875 if not self._revbranchcache:
1876 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1876 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1877 return self._revbranchcache
1877 return self._revbranchcache
1878
1878
1879 def branchtip(self, branch, ignoremissing=False):
1879 def branchtip(self, branch, ignoremissing=False):
1880 '''return the tip node for a given branch
1880 '''return the tip node for a given branch
1881
1881
1882 If ignoremissing is True, then this method will not raise an error.
1882 If ignoremissing is True, then this method will not raise an error.
1883 This is helpful for callers that only expect None for a missing branch
1883 This is helpful for callers that only expect None for a missing branch
1884 (e.g. namespace).
1884 (e.g. namespace).
1885
1885
1886 '''
1886 '''
1887 try:
1887 try:
1888 return self.branchmap().branchtip(branch)
1888 return self.branchmap().branchtip(branch)
1889 except KeyError:
1889 except KeyError:
1890 if not ignoremissing:
1890 if not ignoremissing:
1891 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1891 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1892 else:
1892 else:
1893 pass
1893 pass
1894
1894
1895 def lookup(self, key):
1895 def lookup(self, key):
1896 node = scmutil.revsymbol(self, key).node()
1896 node = scmutil.revsymbol(self, key).node()
1897 if node is None:
1897 if node is None:
1898 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1898 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1899 return node
1899 return node
1900
1900
1901 def lookupbranch(self, key):
1901 def lookupbranch(self, key):
1902 if self.branchmap().hasbranch(key):
1902 if self.branchmap().hasbranch(key):
1903 return key
1903 return key
1904
1904
1905 return scmutil.revsymbol(self, key).branch()
1905 return scmutil.revsymbol(self, key).branch()
1906
1906
1907 def known(self, nodes):
1907 def known(self, nodes):
1908 cl = self.changelog
1908 cl = self.changelog
1909 get_rev = cl.index.get_rev
1909 get_rev = cl.index.get_rev
1910 filtered = cl.filteredrevs
1910 filtered = cl.filteredrevs
1911 result = []
1911 result = []
1912 for n in nodes:
1912 for n in nodes:
1913 r = get_rev(n)
1913 r = get_rev(n)
1914 resp = not (r is None or r in filtered)
1914 resp = not (r is None or r in filtered)
1915 result.append(resp)
1915 result.append(resp)
1916 return result
1916 return result
1917
1917
1918 def local(self):
1918 def local(self):
1919 return self
1919 return self
1920
1920
1921 def publishing(self):
1921 def publishing(self):
1922 # it's safe (and desirable) to trust the publish flag unconditionally
1922 # it's safe (and desirable) to trust the publish flag unconditionally
1923 # so that we don't finalize changes shared between users via ssh or nfs
1923 # so that we don't finalize changes shared between users via ssh or nfs
1924 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1924 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1925
1925
1926 def cancopy(self):
1926 def cancopy(self):
1927 # so statichttprepo's override of local() works
1927 # so statichttprepo's override of local() works
1928 if not self.local():
1928 if not self.local():
1929 return False
1929 return False
1930 if not self.publishing():
1930 if not self.publishing():
1931 return True
1931 return True
1932 # if publishing we can't copy if there is filtered content
1932 # if publishing we can't copy if there is filtered content
1933 return not self.filtered(b'visible').changelog.filteredrevs
1933 return not self.filtered(b'visible').changelog.filteredrevs
1934
1934
1935 def shared(self):
1935 def shared(self):
1936 '''the type of shared repository (None if not shared)'''
1936 '''the type of shared repository (None if not shared)'''
1937 if self.sharedpath != self.path:
1937 if self.sharedpath != self.path:
1938 return b'store'
1938 return b'store'
1939 return None
1939 return None
1940
1940
1941 def wjoin(self, f, *insidef):
1941 def wjoin(self, f, *insidef):
1942 return self.vfs.reljoin(self.root, f, *insidef)
1942 return self.vfs.reljoin(self.root, f, *insidef)
1943
1943
1944 def setparents(self, p1, p2=nullid):
1944 def setparents(self, p1, p2=nullid):
1945 self[None].setparents(p1, p2)
1945 self[None].setparents(p1, p2)
1946 self._quick_access_changeid_invalidate()
1946 self._quick_access_changeid_invalidate()
1947
1947
1948 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1948 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1949 """changeid must be a changeset revision, if specified.
1949 """changeid must be a changeset revision, if specified.
1950 fileid can be a file revision or node."""
1950 fileid can be a file revision or node."""
1951 return context.filectx(
1951 return context.filectx(
1952 self, path, changeid, fileid, changectx=changectx
1952 self, path, changeid, fileid, changectx=changectx
1953 )
1953 )
1954
1954
1955 def getcwd(self):
1955 def getcwd(self):
1956 return self.dirstate.getcwd()
1956 return self.dirstate.getcwd()
1957
1957
1958 def pathto(self, f, cwd=None):
1958 def pathto(self, f, cwd=None):
1959 return self.dirstate.pathto(f, cwd)
1959 return self.dirstate.pathto(f, cwd)
1960
1960
1961 def _loadfilter(self, filter):
1961 def _loadfilter(self, filter):
1962 if filter not in self._filterpats:
1962 if filter not in self._filterpats:
1963 l = []
1963 l = []
1964 for pat, cmd in self.ui.configitems(filter):
1964 for pat, cmd in self.ui.configitems(filter):
1965 if cmd == b'!':
1965 if cmd == b'!':
1966 continue
1966 continue
1967 mf = matchmod.match(self.root, b'', [pat])
1967 mf = matchmod.match(self.root, b'', [pat])
1968 fn = None
1968 fn = None
1969 params = cmd
1969 params = cmd
1970 for name, filterfn in pycompat.iteritems(self._datafilters):
1970 for name, filterfn in pycompat.iteritems(self._datafilters):
1971 if cmd.startswith(name):
1971 if cmd.startswith(name):
1972 fn = filterfn
1972 fn = filterfn
1973 params = cmd[len(name) :].lstrip()
1973 params = cmd[len(name) :].lstrip()
1974 break
1974 break
1975 if not fn:
1975 if not fn:
1976 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1976 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1977 fn.__name__ = 'commandfilter'
1977 fn.__name__ = 'commandfilter'
1978 # Wrap old filters not supporting keyword arguments
1978 # Wrap old filters not supporting keyword arguments
1979 if not pycompat.getargspec(fn)[2]:
1979 if not pycompat.getargspec(fn)[2]:
1980 oldfn = fn
1980 oldfn = fn
1981 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1981 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1982 fn.__name__ = 'compat-' + oldfn.__name__
1982 fn.__name__ = 'compat-' + oldfn.__name__
1983 l.append((mf, fn, params))
1983 l.append((mf, fn, params))
1984 self._filterpats[filter] = l
1984 self._filterpats[filter] = l
1985 return self._filterpats[filter]
1985 return self._filterpats[filter]
1986
1986
1987 def _filter(self, filterpats, filename, data):
1987 def _filter(self, filterpats, filename, data):
1988 for mf, fn, cmd in filterpats:
1988 for mf, fn, cmd in filterpats:
1989 if mf(filename):
1989 if mf(filename):
1990 self.ui.debug(
1990 self.ui.debug(
1991 b"filtering %s through %s\n"
1991 b"filtering %s through %s\n"
1992 % (filename, cmd or pycompat.sysbytes(fn.__name__))
1992 % (filename, cmd or pycompat.sysbytes(fn.__name__))
1993 )
1993 )
1994 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1994 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1995 break
1995 break
1996
1996
1997 return data
1997 return data
1998
1998
1999 @unfilteredpropertycache
1999 @unfilteredpropertycache
2000 def _encodefilterpats(self):
2000 def _encodefilterpats(self):
2001 return self._loadfilter(b'encode')
2001 return self._loadfilter(b'encode')
2002
2002
2003 @unfilteredpropertycache
2003 @unfilteredpropertycache
2004 def _decodefilterpats(self):
2004 def _decodefilterpats(self):
2005 return self._loadfilter(b'decode')
2005 return self._loadfilter(b'decode')
2006
2006
2007 def adddatafilter(self, name, filter):
2007 def adddatafilter(self, name, filter):
2008 self._datafilters[name] = filter
2008 self._datafilters[name] = filter
2009
2009
2010 def wread(self, filename):
2010 def wread(self, filename):
2011 if self.wvfs.islink(filename):
2011 if self.wvfs.islink(filename):
2012 data = self.wvfs.readlink(filename)
2012 data = self.wvfs.readlink(filename)
2013 else:
2013 else:
2014 data = self.wvfs.read(filename)
2014 data = self.wvfs.read(filename)
2015 return self._filter(self._encodefilterpats, filename, data)
2015 return self._filter(self._encodefilterpats, filename, data)
2016
2016
2017 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2017 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2018 """write ``data`` into ``filename`` in the working directory
2018 """write ``data`` into ``filename`` in the working directory
2019
2019
2020 This returns length of written (maybe decoded) data.
2020 This returns length of written (maybe decoded) data.
2021 """
2021 """
2022 data = self._filter(self._decodefilterpats, filename, data)
2022 data = self._filter(self._decodefilterpats, filename, data)
2023 if b'l' in flags:
2023 if b'l' in flags:
2024 self.wvfs.symlink(data, filename)
2024 self.wvfs.symlink(data, filename)
2025 else:
2025 else:
2026 self.wvfs.write(
2026 self.wvfs.write(
2027 filename, data, backgroundclose=backgroundclose, **kwargs
2027 filename, data, backgroundclose=backgroundclose, **kwargs
2028 )
2028 )
2029 if b'x' in flags:
2029 if b'x' in flags:
2030 self.wvfs.setflags(filename, False, True)
2030 self.wvfs.setflags(filename, False, True)
2031 else:
2031 else:
2032 self.wvfs.setflags(filename, False, False)
2032 self.wvfs.setflags(filename, False, False)
2033 return len(data)
2033 return len(data)
2034
2034
2035 def wwritedata(self, filename, data):
2035 def wwritedata(self, filename, data):
2036 return self._filter(self._decodefilterpats, filename, data)
2036 return self._filter(self._decodefilterpats, filename, data)
2037
2037
2038 def currenttransaction(self):
2038 def currenttransaction(self):
2039 """return the current transaction or None if non exists"""
2039 """return the current transaction or None if non exists"""
2040 if self._transref:
2040 if self._transref:
2041 tr = self._transref()
2041 tr = self._transref()
2042 else:
2042 else:
2043 tr = None
2043 tr = None
2044
2044
2045 if tr and tr.running():
2045 if tr and tr.running():
2046 return tr
2046 return tr
2047 return None
2047 return None
2048
2048
2049 def transaction(self, desc, report=None):
2049 def transaction(self, desc, report=None):
2050 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2050 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2051 b'devel', b'check-locks'
2051 b'devel', b'check-locks'
2052 ):
2052 ):
2053 if self._currentlock(self._lockref) is None:
2053 if self._currentlock(self._lockref) is None:
2054 raise error.ProgrammingError(b'transaction requires locking')
2054 raise error.ProgrammingError(b'transaction requires locking')
2055 tr = self.currenttransaction()
2055 tr = self.currenttransaction()
2056 if tr is not None:
2056 if tr is not None:
2057 return tr.nest(name=desc)
2057 return tr.nest(name=desc)
2058
2058
2059 # abort here if the journal already exists
2059 # abort here if the journal already exists
2060 if self.svfs.exists(b"journal"):
2060 if self.svfs.exists(b"journal"):
2061 raise error.RepoError(
2061 raise error.RepoError(
2062 _(b"abandoned transaction found"),
2062 _(b"abandoned transaction found"),
2063 hint=_(b"run 'hg recover' to clean up transaction"),
2063 hint=_(b"run 'hg recover' to clean up transaction"),
2064 )
2064 )
2065
2065
2066 idbase = b"%.40f#%f" % (random.random(), time.time())
2066 idbase = b"%.40f#%f" % (random.random(), time.time())
2067 ha = hex(hashutil.sha1(idbase).digest())
2067 ha = hex(hashutil.sha1(idbase).digest())
2068 txnid = b'TXN:' + ha
2068 txnid = b'TXN:' + ha
2069 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2069 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2070
2070
2071 self._writejournal(desc)
2071 self._writejournal(desc)
2072 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2072 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2073 if report:
2073 if report:
2074 rp = report
2074 rp = report
2075 else:
2075 else:
2076 rp = self.ui.warn
2076 rp = self.ui.warn
2077 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2077 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2078 # we must avoid cyclic reference between repo and transaction.
2078 # we must avoid cyclic reference between repo and transaction.
2079 reporef = weakref.ref(self)
2079 reporef = weakref.ref(self)
2080 # Code to track tag movement
2080 # Code to track tag movement
2081 #
2081 #
2082 # Since tags are all handled as file content, it is actually quite hard
2082 # Since tags are all handled as file content, it is actually quite hard
2083 # to track these movement from a code perspective. So we fallback to a
2083 # to track these movement from a code perspective. So we fallback to a
2084 # tracking at the repository level. One could envision to track changes
2084 # tracking at the repository level. One could envision to track changes
2085 # to the '.hgtags' file through changegroup apply but that fails to
2085 # to the '.hgtags' file through changegroup apply but that fails to
2086 # cope with case where transaction expose new heads without changegroup
2086 # cope with case where transaction expose new heads without changegroup
2087 # being involved (eg: phase movement).
2087 # being involved (eg: phase movement).
2088 #
2088 #
2089 # For now, We gate the feature behind a flag since this likely comes
2089 # For now, We gate the feature behind a flag since this likely comes
2090 # with performance impacts. The current code run more often than needed
2090 # with performance impacts. The current code run more often than needed
2091 # and do not use caches as much as it could. The current focus is on
2091 # and do not use caches as much as it could. The current focus is on
2092 # the behavior of the feature so we disable it by default. The flag
2092 # the behavior of the feature so we disable it by default. The flag
2093 # will be removed when we are happy with the performance impact.
2093 # will be removed when we are happy with the performance impact.
2094 #
2094 #
2095 # Once this feature is no longer experimental move the following
2095 # Once this feature is no longer experimental move the following
2096 # documentation to the appropriate help section:
2096 # documentation to the appropriate help section:
2097 #
2097 #
2098 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2098 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2099 # tags (new or changed or deleted tags). In addition the details of
2099 # tags (new or changed or deleted tags). In addition the details of
2100 # these changes are made available in a file at:
2100 # these changes are made available in a file at:
2101 # ``REPOROOT/.hg/changes/tags.changes``.
2101 # ``REPOROOT/.hg/changes/tags.changes``.
2102 # Make sure you check for HG_TAG_MOVED before reading that file as it
2102 # Make sure you check for HG_TAG_MOVED before reading that file as it
2103 # might exist from a previous transaction even if no tag were touched
2103 # might exist from a previous transaction even if no tag were touched
2104 # in this one. Changes are recorded in a line base format::
2104 # in this one. Changes are recorded in a line base format::
2105 #
2105 #
2106 # <action> <hex-node> <tag-name>\n
2106 # <action> <hex-node> <tag-name>\n
2107 #
2107 #
2108 # Actions are defined as follow:
2108 # Actions are defined as follow:
2109 # "-R": tag is removed,
2109 # "-R": tag is removed,
2110 # "+A": tag is added,
2110 # "+A": tag is added,
2111 # "-M": tag is moved (old value),
2111 # "-M": tag is moved (old value),
2112 # "+M": tag is moved (new value),
2112 # "+M": tag is moved (new value),
2113 tracktags = lambda x: None
2113 tracktags = lambda x: None
2114 # experimental config: experimental.hook-track-tags
2114 # experimental config: experimental.hook-track-tags
2115 shouldtracktags = self.ui.configbool(
2115 shouldtracktags = self.ui.configbool(
2116 b'experimental', b'hook-track-tags'
2116 b'experimental', b'hook-track-tags'
2117 )
2117 )
2118 if desc != b'strip' and shouldtracktags:
2118 if desc != b'strip' and shouldtracktags:
2119 oldheads = self.changelog.headrevs()
2119 oldheads = self.changelog.headrevs()
2120
2120
2121 def tracktags(tr2):
2121 def tracktags(tr2):
2122 repo = reporef()
2122 repo = reporef()
2123 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2123 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2124 newheads = repo.changelog.headrevs()
2124 newheads = repo.changelog.headrevs()
2125 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2125 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2126 # notes: we compare lists here.
2126 # notes: we compare lists here.
2127 # As we do it only once buiding set would not be cheaper
2127 # As we do it only once buiding set would not be cheaper
2128 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2128 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2129 if changes:
2129 if changes:
2130 tr2.hookargs[b'tag_moved'] = b'1'
2130 tr2.hookargs[b'tag_moved'] = b'1'
2131 with repo.vfs(
2131 with repo.vfs(
2132 b'changes/tags.changes', b'w', atomictemp=True
2132 b'changes/tags.changes', b'w', atomictemp=True
2133 ) as changesfile:
2133 ) as changesfile:
2134 # note: we do not register the file to the transaction
2134 # note: we do not register the file to the transaction
2135 # because we needs it to still exist on the transaction
2135 # because we needs it to still exist on the transaction
2136 # is close (for txnclose hooks)
2136 # is close (for txnclose hooks)
2137 tagsmod.writediff(changesfile, changes)
2137 tagsmod.writediff(changesfile, changes)
2138
2138
2139 def validate(tr2):
2139 def validate(tr2):
2140 """will run pre-closing hooks"""
2140 """will run pre-closing hooks"""
2141 # XXX the transaction API is a bit lacking here so we take a hacky
2141 # XXX the transaction API is a bit lacking here so we take a hacky
2142 # path for now
2142 # path for now
2143 #
2143 #
2144 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2144 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2145 # dict is copied before these run. In addition we needs the data
2145 # dict is copied before these run. In addition we needs the data
2146 # available to in memory hooks too.
2146 # available to in memory hooks too.
2147 #
2147 #
2148 # Moreover, we also need to make sure this runs before txnclose
2148 # Moreover, we also need to make sure this runs before txnclose
2149 # hooks and there is no "pending" mechanism that would execute
2149 # hooks and there is no "pending" mechanism that would execute
2150 # logic only if hooks are about to run.
2150 # logic only if hooks are about to run.
2151 #
2151 #
2152 # Fixing this limitation of the transaction is also needed to track
2152 # Fixing this limitation of the transaction is also needed to track
2153 # other families of changes (bookmarks, phases, obsolescence).
2153 # other families of changes (bookmarks, phases, obsolescence).
2154 #
2154 #
2155 # This will have to be fixed before we remove the experimental
2155 # This will have to be fixed before we remove the experimental
2156 # gating.
2156 # gating.
2157 tracktags(tr2)
2157 tracktags(tr2)
2158 repo = reporef()
2158 repo = reporef()
2159
2159
2160 singleheadopt = (b'experimental', b'single-head-per-branch')
2160 singleheadopt = (b'experimental', b'single-head-per-branch')
2161 singlehead = repo.ui.configbool(*singleheadopt)
2161 singlehead = repo.ui.configbool(*singleheadopt)
2162 if singlehead:
2162 if singlehead:
2163 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2163 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2164 accountclosed = singleheadsub.get(
2164 accountclosed = singleheadsub.get(
2165 b"account-closed-heads", False
2165 b"account-closed-heads", False
2166 )
2166 )
2167 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2167 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2168 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2168 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2169 for name, (old, new) in sorted(
2169 for name, (old, new) in sorted(
2170 tr.changes[b'bookmarks'].items()
2170 tr.changes[b'bookmarks'].items()
2171 ):
2171 ):
2172 args = tr.hookargs.copy()
2172 args = tr.hookargs.copy()
2173 args.update(bookmarks.preparehookargs(name, old, new))
2173 args.update(bookmarks.preparehookargs(name, old, new))
2174 repo.hook(
2174 repo.hook(
2175 b'pretxnclose-bookmark',
2175 b'pretxnclose-bookmark',
2176 throw=True,
2176 throw=True,
2177 **pycompat.strkwargs(args)
2177 **pycompat.strkwargs(args)
2178 )
2178 )
2179 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2179 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2180 cl = repo.unfiltered().changelog
2180 cl = repo.unfiltered().changelog
2181 for revs, (old, new) in tr.changes[b'phases']:
2181 for revs, (old, new) in tr.changes[b'phases']:
2182 for rev in revs:
2182 for rev in revs:
2183 args = tr.hookargs.copy()
2183 args = tr.hookargs.copy()
2184 node = hex(cl.node(rev))
2184 node = hex(cl.node(rev))
2185 args.update(phases.preparehookargs(node, old, new))
2185 args.update(phases.preparehookargs(node, old, new))
2186 repo.hook(
2186 repo.hook(
2187 b'pretxnclose-phase',
2187 b'pretxnclose-phase',
2188 throw=True,
2188 throw=True,
2189 **pycompat.strkwargs(args)
2189 **pycompat.strkwargs(args)
2190 )
2190 )
2191
2191
2192 repo.hook(
2192 repo.hook(
2193 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2193 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2194 )
2194 )
2195
2195
2196 def releasefn(tr, success):
2196 def releasefn(tr, success):
2197 repo = reporef()
2197 repo = reporef()
2198 if repo is None:
2198 if repo is None:
2199 # If the repo has been GC'd (and this release function is being
2199 # If the repo has been GC'd (and this release function is being
2200 # called from transaction.__del__), there's not much we can do,
2200 # called from transaction.__del__), there's not much we can do,
2201 # so just leave the unfinished transaction there and let the
2201 # so just leave the unfinished transaction there and let the
2202 # user run `hg recover`.
2202 # user run `hg recover`.
2203 return
2203 return
2204 if success:
2204 if success:
2205 # this should be explicitly invoked here, because
2205 # this should be explicitly invoked here, because
2206 # in-memory changes aren't written out at closing
2206 # in-memory changes aren't written out at closing
2207 # transaction, if tr.addfilegenerator (via
2207 # transaction, if tr.addfilegenerator (via
2208 # dirstate.write or so) isn't invoked while
2208 # dirstate.write or so) isn't invoked while
2209 # transaction running
2209 # transaction running
2210 repo.dirstate.write(None)
2210 repo.dirstate.write(None)
2211 else:
2211 else:
2212 # discard all changes (including ones already written
2212 # discard all changes (including ones already written
2213 # out) in this transaction
2213 # out) in this transaction
2214 narrowspec.restorebackup(self, b'journal.narrowspec')
2214 narrowspec.restorebackup(self, b'journal.narrowspec')
2215 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2215 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2216 repo.dirstate.restorebackup(None, b'journal.dirstate')
2216 repo.dirstate.restorebackup(None, b'journal.dirstate')
2217
2217
2218 repo.invalidate(clearfilecache=True)
2218 repo.invalidate(clearfilecache=True)
2219
2219
2220 tr = transaction.transaction(
2220 tr = transaction.transaction(
2221 rp,
2221 rp,
2222 self.svfs,
2222 self.svfs,
2223 vfsmap,
2223 vfsmap,
2224 b"journal",
2224 b"journal",
2225 b"undo",
2225 b"undo",
2226 aftertrans(renames),
2226 aftertrans(renames),
2227 self.store.createmode,
2227 self.store.createmode,
2228 validator=validate,
2228 validator=validate,
2229 releasefn=releasefn,
2229 releasefn=releasefn,
2230 checkambigfiles=_cachedfiles,
2230 checkambigfiles=_cachedfiles,
2231 name=desc,
2231 name=desc,
2232 )
2232 )
2233 tr.changes[b'origrepolen'] = len(self)
2233 tr.changes[b'origrepolen'] = len(self)
2234 tr.changes[b'obsmarkers'] = set()
2234 tr.changes[b'obsmarkers'] = set()
2235 tr.changes[b'phases'] = []
2235 tr.changes[b'phases'] = []
2236 tr.changes[b'bookmarks'] = {}
2236 tr.changes[b'bookmarks'] = {}
2237
2237
2238 tr.hookargs[b'txnid'] = txnid
2238 tr.hookargs[b'txnid'] = txnid
2239 tr.hookargs[b'txnname'] = desc
2239 tr.hookargs[b'txnname'] = desc
2240 # note: writing the fncache only during finalize mean that the file is
2240 # note: writing the fncache only during finalize mean that the file is
2241 # outdated when running hooks. As fncache is used for streaming clone,
2241 # outdated when running hooks. As fncache is used for streaming clone,
2242 # this is not expected to break anything that happen during the hooks.
2242 # this is not expected to break anything that happen during the hooks.
2243 tr.addfinalize(b'flush-fncache', self.store.write)
2243 tr.addfinalize(b'flush-fncache', self.store.write)
2244
2244
2245 def txnclosehook(tr2):
2245 def txnclosehook(tr2):
2246 """To be run if transaction is successful, will schedule a hook run
2246 """To be run if transaction is successful, will schedule a hook run
2247 """
2247 """
2248 # Don't reference tr2 in hook() so we don't hold a reference.
2248 # Don't reference tr2 in hook() so we don't hold a reference.
2249 # This reduces memory consumption when there are multiple
2249 # This reduces memory consumption when there are multiple
2250 # transactions per lock. This can likely go away if issue5045
2250 # transactions per lock. This can likely go away if issue5045
2251 # fixes the function accumulation.
2251 # fixes the function accumulation.
2252 hookargs = tr2.hookargs
2252 hookargs = tr2.hookargs
2253
2253
2254 def hookfunc(unused_success):
2254 def hookfunc(unused_success):
2255 repo = reporef()
2255 repo = reporef()
2256 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2256 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2257 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2257 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2258 for name, (old, new) in bmchanges:
2258 for name, (old, new) in bmchanges:
2259 args = tr.hookargs.copy()
2259 args = tr.hookargs.copy()
2260 args.update(bookmarks.preparehookargs(name, old, new))
2260 args.update(bookmarks.preparehookargs(name, old, new))
2261 repo.hook(
2261 repo.hook(
2262 b'txnclose-bookmark',
2262 b'txnclose-bookmark',
2263 throw=False,
2263 throw=False,
2264 **pycompat.strkwargs(args)
2264 **pycompat.strkwargs(args)
2265 )
2265 )
2266
2266
2267 if hook.hashook(repo.ui, b'txnclose-phase'):
2267 if hook.hashook(repo.ui, b'txnclose-phase'):
2268 cl = repo.unfiltered().changelog
2268 cl = repo.unfiltered().changelog
2269 phasemv = sorted(
2269 phasemv = sorted(
2270 tr.changes[b'phases'], key=lambda r: r[0][0]
2270 tr.changes[b'phases'], key=lambda r: r[0][0]
2271 )
2271 )
2272 for revs, (old, new) in phasemv:
2272 for revs, (old, new) in phasemv:
2273 for rev in revs:
2273 for rev in revs:
2274 args = tr.hookargs.copy()
2274 args = tr.hookargs.copy()
2275 node = hex(cl.node(rev))
2275 node = hex(cl.node(rev))
2276 args.update(phases.preparehookargs(node, old, new))
2276 args.update(phases.preparehookargs(node, old, new))
2277 repo.hook(
2277 repo.hook(
2278 b'txnclose-phase',
2278 b'txnclose-phase',
2279 throw=False,
2279 throw=False,
2280 **pycompat.strkwargs(args)
2280 **pycompat.strkwargs(args)
2281 )
2281 )
2282
2282
2283 repo.hook(
2283 repo.hook(
2284 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2284 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2285 )
2285 )
2286
2286
2287 reporef()._afterlock(hookfunc)
2287 reporef()._afterlock(hookfunc)
2288
2288
2289 tr.addfinalize(b'txnclose-hook', txnclosehook)
2289 tr.addfinalize(b'txnclose-hook', txnclosehook)
2290 # Include a leading "-" to make it happen before the transaction summary
2290 # Include a leading "-" to make it happen before the transaction summary
2291 # reports registered via scmutil.registersummarycallback() whose names
2291 # reports registered via scmutil.registersummarycallback() whose names
2292 # are 00-txnreport etc. That way, the caches will be warm when the
2292 # are 00-txnreport etc. That way, the caches will be warm when the
2293 # callbacks run.
2293 # callbacks run.
2294 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2294 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2295
2295
2296 def txnaborthook(tr2):
2296 def txnaborthook(tr2):
2297 """To be run if transaction is aborted
2297 """To be run if transaction is aborted
2298 """
2298 """
2299 reporef().hook(
2299 reporef().hook(
2300 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2300 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2301 )
2301 )
2302
2302
2303 tr.addabort(b'txnabort-hook', txnaborthook)
2303 tr.addabort(b'txnabort-hook', txnaborthook)
2304 # avoid eager cache invalidation. in-memory data should be identical
2304 # avoid eager cache invalidation. in-memory data should be identical
2305 # to stored data if transaction has no error.
2305 # to stored data if transaction has no error.
2306 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2306 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2307 self._transref = weakref.ref(tr)
2307 self._transref = weakref.ref(tr)
2308 scmutil.registersummarycallback(self, tr, desc)
2308 scmutil.registersummarycallback(self, tr, desc)
2309 return tr
2309 return tr
2310
2310
2311 def _journalfiles(self):
2311 def _journalfiles(self):
2312 return (
2312 return (
2313 (self.svfs, b'journal'),
2313 (self.svfs, b'journal'),
2314 (self.svfs, b'journal.narrowspec'),
2314 (self.svfs, b'journal.narrowspec'),
2315 (self.vfs, b'journal.narrowspec.dirstate'),
2315 (self.vfs, b'journal.narrowspec.dirstate'),
2316 (self.vfs, b'journal.dirstate'),
2316 (self.vfs, b'journal.dirstate'),
2317 (self.vfs, b'journal.branch'),
2317 (self.vfs, b'journal.branch'),
2318 (self.vfs, b'journal.desc'),
2318 (self.vfs, b'journal.desc'),
2319 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2319 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2320 (self.svfs, b'journal.phaseroots'),
2320 (self.svfs, b'journal.phaseroots'),
2321 )
2321 )
2322
2322
2323 def undofiles(self):
2323 def undofiles(self):
2324 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2324 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2325
2325
2326 @unfilteredmethod
2326 @unfilteredmethod
2327 def _writejournal(self, desc):
2327 def _writejournal(self, desc):
2328 self.dirstate.savebackup(None, b'journal.dirstate')
2328 self.dirstate.savebackup(None, b'journal.dirstate')
2329 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2329 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2330 narrowspec.savebackup(self, b'journal.narrowspec')
2330 narrowspec.savebackup(self, b'journal.narrowspec')
2331 self.vfs.write(
2331 self.vfs.write(
2332 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2332 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2333 )
2333 )
2334 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2334 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2335 bookmarksvfs = bookmarks.bookmarksvfs(self)
2335 bookmarksvfs = bookmarks.bookmarksvfs(self)
2336 bookmarksvfs.write(
2336 bookmarksvfs.write(
2337 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2337 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2338 )
2338 )
2339 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2339 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2340
2340
2341 def recover(self):
2341 def recover(self):
2342 with self.lock():
2342 with self.lock():
2343 if self.svfs.exists(b"journal"):
2343 if self.svfs.exists(b"journal"):
2344 self.ui.status(_(b"rolling back interrupted transaction\n"))
2344 self.ui.status(_(b"rolling back interrupted transaction\n"))
2345 vfsmap = {
2345 vfsmap = {
2346 b'': self.svfs,
2346 b'': self.svfs,
2347 b'plain': self.vfs,
2347 b'plain': self.vfs,
2348 }
2348 }
2349 transaction.rollback(
2349 transaction.rollback(
2350 self.svfs,
2350 self.svfs,
2351 vfsmap,
2351 vfsmap,
2352 b"journal",
2352 b"journal",
2353 self.ui.warn,
2353 self.ui.warn,
2354 checkambigfiles=_cachedfiles,
2354 checkambigfiles=_cachedfiles,
2355 )
2355 )
2356 self.invalidate()
2356 self.invalidate()
2357 return True
2357 return True
2358 else:
2358 else:
2359 self.ui.warn(_(b"no interrupted transaction available\n"))
2359 self.ui.warn(_(b"no interrupted transaction available\n"))
2360 return False
2360 return False
2361
2361
2362 def rollback(self, dryrun=False, force=False):
2362 def rollback(self, dryrun=False, force=False):
2363 wlock = lock = dsguard = None
2363 wlock = lock = dsguard = None
2364 try:
2364 try:
2365 wlock = self.wlock()
2365 wlock = self.wlock()
2366 lock = self.lock()
2366 lock = self.lock()
2367 if self.svfs.exists(b"undo"):
2367 if self.svfs.exists(b"undo"):
2368 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2368 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2369
2369
2370 return self._rollback(dryrun, force, dsguard)
2370 return self._rollback(dryrun, force, dsguard)
2371 else:
2371 else:
2372 self.ui.warn(_(b"no rollback information available\n"))
2372 self.ui.warn(_(b"no rollback information available\n"))
2373 return 1
2373 return 1
2374 finally:
2374 finally:
2375 release(dsguard, lock, wlock)
2375 release(dsguard, lock, wlock)
2376
2376
2377 @unfilteredmethod # Until we get smarter cache management
2377 @unfilteredmethod # Until we get smarter cache management
2378 def _rollback(self, dryrun, force, dsguard):
2378 def _rollback(self, dryrun, force, dsguard):
2379 ui = self.ui
2379 ui = self.ui
2380 try:
2380 try:
2381 args = self.vfs.read(b'undo.desc').splitlines()
2381 args = self.vfs.read(b'undo.desc').splitlines()
2382 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2382 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2383 if len(args) >= 3:
2383 if len(args) >= 3:
2384 detail = args[2]
2384 detail = args[2]
2385 oldtip = oldlen - 1
2385 oldtip = oldlen - 1
2386
2386
2387 if detail and ui.verbose:
2387 if detail and ui.verbose:
2388 msg = _(
2388 msg = _(
2389 b'repository tip rolled back to revision %d'
2389 b'repository tip rolled back to revision %d'
2390 b' (undo %s: %s)\n'
2390 b' (undo %s: %s)\n'
2391 ) % (oldtip, desc, detail)
2391 ) % (oldtip, desc, detail)
2392 else:
2392 else:
2393 msg = _(
2393 msg = _(
2394 b'repository tip rolled back to revision %d (undo %s)\n'
2394 b'repository tip rolled back to revision %d (undo %s)\n'
2395 ) % (oldtip, desc)
2395 ) % (oldtip, desc)
2396 except IOError:
2396 except IOError:
2397 msg = _(b'rolling back unknown transaction\n')
2397 msg = _(b'rolling back unknown transaction\n')
2398 desc = None
2398 desc = None
2399
2399
2400 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2400 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2401 raise error.Abort(
2401 raise error.Abort(
2402 _(
2402 _(
2403 b'rollback of last commit while not checked out '
2403 b'rollback of last commit while not checked out '
2404 b'may lose data'
2404 b'may lose data'
2405 ),
2405 ),
2406 hint=_(b'use -f to force'),
2406 hint=_(b'use -f to force'),
2407 )
2407 )
2408
2408
2409 ui.status(msg)
2409 ui.status(msg)
2410 if dryrun:
2410 if dryrun:
2411 return 0
2411 return 0
2412
2412
2413 parents = self.dirstate.parents()
2413 parents = self.dirstate.parents()
2414 self.destroying()
2414 self.destroying()
2415 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2415 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2416 transaction.rollback(
2416 transaction.rollback(
2417 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2417 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2418 )
2418 )
2419 bookmarksvfs = bookmarks.bookmarksvfs(self)
2419 bookmarksvfs = bookmarks.bookmarksvfs(self)
2420 if bookmarksvfs.exists(b'undo.bookmarks'):
2420 if bookmarksvfs.exists(b'undo.bookmarks'):
2421 bookmarksvfs.rename(
2421 bookmarksvfs.rename(
2422 b'undo.bookmarks', b'bookmarks', checkambig=True
2422 b'undo.bookmarks', b'bookmarks', checkambig=True
2423 )
2423 )
2424 if self.svfs.exists(b'undo.phaseroots'):
2424 if self.svfs.exists(b'undo.phaseroots'):
2425 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2425 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2426 self.invalidate()
2426 self.invalidate()
2427
2427
2428 has_node = self.changelog.index.has_node
2428 has_node = self.changelog.index.has_node
2429 parentgone = any(not has_node(p) for p in parents)
2429 parentgone = any(not has_node(p) for p in parents)
2430 if parentgone:
2430 if parentgone:
2431 # prevent dirstateguard from overwriting already restored one
2431 # prevent dirstateguard from overwriting already restored one
2432 dsguard.close()
2432 dsguard.close()
2433
2433
2434 narrowspec.restorebackup(self, b'undo.narrowspec')
2434 narrowspec.restorebackup(self, b'undo.narrowspec')
2435 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2435 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2436 self.dirstate.restorebackup(None, b'undo.dirstate')
2436 self.dirstate.restorebackup(None, b'undo.dirstate')
2437 try:
2437 try:
2438 branch = self.vfs.read(b'undo.branch')
2438 branch = self.vfs.read(b'undo.branch')
2439 self.dirstate.setbranch(encoding.tolocal(branch))
2439 self.dirstate.setbranch(encoding.tolocal(branch))
2440 except IOError:
2440 except IOError:
2441 ui.warn(
2441 ui.warn(
2442 _(
2442 _(
2443 b'named branch could not be reset: '
2443 b'named branch could not be reset: '
2444 b'current branch is still \'%s\'\n'
2444 b'current branch is still \'%s\'\n'
2445 )
2445 )
2446 % self.dirstate.branch()
2446 % self.dirstate.branch()
2447 )
2447 )
2448
2448
2449 parents = tuple([p.rev() for p in self[None].parents()])
2449 parents = tuple([p.rev() for p in self[None].parents()])
2450 if len(parents) > 1:
2450 if len(parents) > 1:
2451 ui.status(
2451 ui.status(
2452 _(
2452 _(
2453 b'working directory now based on '
2453 b'working directory now based on '
2454 b'revisions %d and %d\n'
2454 b'revisions %d and %d\n'
2455 )
2455 )
2456 % parents
2456 % parents
2457 )
2457 )
2458 else:
2458 else:
2459 ui.status(
2459 ui.status(
2460 _(b'working directory now based on revision %d\n') % parents
2460 _(b'working directory now based on revision %d\n') % parents
2461 )
2461 )
2462 mergemod.mergestate.clean(self, self[b'.'].node())
2462 mergemod.mergestate.clean(self, self[b'.'].node())
2463
2463
2464 # TODO: if we know which new heads may result from this rollback, pass
2464 # TODO: if we know which new heads may result from this rollback, pass
2465 # them to destroy(), which will prevent the branchhead cache from being
2465 # them to destroy(), which will prevent the branchhead cache from being
2466 # invalidated.
2466 # invalidated.
2467 self.destroyed()
2467 self.destroyed()
2468 return 0
2468 return 0
2469
2469
2470 def _buildcacheupdater(self, newtransaction):
2470 def _buildcacheupdater(self, newtransaction):
2471 """called during transaction to build the callback updating cache
2471 """called during transaction to build the callback updating cache
2472
2472
2473 Lives on the repository to help extension who might want to augment
2473 Lives on the repository to help extension who might want to augment
2474 this logic. For this purpose, the created transaction is passed to the
2474 this logic. For this purpose, the created transaction is passed to the
2475 method.
2475 method.
2476 """
2476 """
2477 # we must avoid cyclic reference between repo and transaction.
2477 # we must avoid cyclic reference between repo and transaction.
2478 reporef = weakref.ref(self)
2478 reporef = weakref.ref(self)
2479
2479
2480 def updater(tr):
2480 def updater(tr):
2481 repo = reporef()
2481 repo = reporef()
2482 repo.updatecaches(tr)
2482 repo.updatecaches(tr)
2483
2483
2484 return updater
2484 return updater
2485
2485
2486 @unfilteredmethod
2486 @unfilteredmethod
2487 def updatecaches(self, tr=None, full=False):
2487 def updatecaches(self, tr=None, full=False):
2488 """warm appropriate caches
2488 """warm appropriate caches
2489
2489
2490 If this function is called after a transaction closed. The transaction
2490 If this function is called after a transaction closed. The transaction
2491 will be available in the 'tr' argument. This can be used to selectively
2491 will be available in the 'tr' argument. This can be used to selectively
2492 update caches relevant to the changes in that transaction.
2492 update caches relevant to the changes in that transaction.
2493
2493
2494 If 'full' is set, make sure all caches the function knows about have
2494 If 'full' is set, make sure all caches the function knows about have
2495 up-to-date data. Even the ones usually loaded more lazily.
2495 up-to-date data. Even the ones usually loaded more lazily.
2496 """
2496 """
2497 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2497 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2498 # During strip, many caches are invalid but
2498 # During strip, many caches are invalid but
2499 # later call to `destroyed` will refresh them.
2499 # later call to `destroyed` will refresh them.
2500 return
2500 return
2501
2501
2502 if tr is None or tr.changes[b'origrepolen'] < len(self):
2502 if tr is None or tr.changes[b'origrepolen'] < len(self):
2503 # accessing the 'ser ved' branchmap should refresh all the others,
2503 # accessing the 'ser ved' branchmap should refresh all the others,
2504 self.ui.debug(b'updating the branch cache\n')
2504 self.ui.debug(b'updating the branch cache\n')
2505 self.filtered(b'served').branchmap()
2505 self.filtered(b'served').branchmap()
2506 self.filtered(b'served.hidden').branchmap()
2506 self.filtered(b'served.hidden').branchmap()
2507
2507
2508 if full:
2508 if full:
2509 unfi = self.unfiltered()
2509 unfi = self.unfiltered()
2510
2510
2511 self.changelog.update_caches(transaction=tr)
2511 self.changelog.update_caches(transaction=tr)
2512 self.manifestlog.update_caches(transaction=tr)
2512
2513
2513 rbc = unfi.revbranchcache()
2514 rbc = unfi.revbranchcache()
2514 for r in unfi.changelog:
2515 for r in unfi.changelog:
2515 rbc.branchinfo(r)
2516 rbc.branchinfo(r)
2516 rbc.write()
2517 rbc.write()
2517
2518
2518 # ensure the working copy parents are in the manifestfulltextcache
2519 # ensure the working copy parents are in the manifestfulltextcache
2519 for ctx in self[b'.'].parents():
2520 for ctx in self[b'.'].parents():
2520 ctx.manifest() # accessing the manifest is enough
2521 ctx.manifest() # accessing the manifest is enough
2521
2522
2522 # accessing fnode cache warms the cache
2523 # accessing fnode cache warms the cache
2523 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2524 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2524 # accessing tags warm the cache
2525 # accessing tags warm the cache
2525 self.tags()
2526 self.tags()
2526 self.filtered(b'served').tags()
2527 self.filtered(b'served').tags()
2527
2528
2528 # The `full` arg is documented as updating even the lazily-loaded
2529 # The `full` arg is documented as updating even the lazily-loaded
2529 # caches immediately, so we're forcing a write to cause these caches
2530 # caches immediately, so we're forcing a write to cause these caches
2530 # to be warmed up even if they haven't explicitly been requested
2531 # to be warmed up even if they haven't explicitly been requested
2531 # yet (if they've never been used by hg, they won't ever have been
2532 # yet (if they've never been used by hg, they won't ever have been
2532 # written, even if they're a subset of another kind of cache that
2533 # written, even if they're a subset of another kind of cache that
2533 # *has* been used).
2534 # *has* been used).
2534 for filt in repoview.filtertable.keys():
2535 for filt in repoview.filtertable.keys():
2535 filtered = self.filtered(filt)
2536 filtered = self.filtered(filt)
2536 filtered.branchmap().write(filtered)
2537 filtered.branchmap().write(filtered)
2537
2538
2538 def invalidatecaches(self):
2539 def invalidatecaches(self):
2539
2540
2540 if '_tagscache' in vars(self):
2541 if '_tagscache' in vars(self):
2541 # can't use delattr on proxy
2542 # can't use delattr on proxy
2542 del self.__dict__['_tagscache']
2543 del self.__dict__['_tagscache']
2543
2544
2544 self._branchcaches.clear()
2545 self._branchcaches.clear()
2545 self.invalidatevolatilesets()
2546 self.invalidatevolatilesets()
2546 self._sparsesignaturecache.clear()
2547 self._sparsesignaturecache.clear()
2547
2548
2548 def invalidatevolatilesets(self):
2549 def invalidatevolatilesets(self):
2549 self.filteredrevcache.clear()
2550 self.filteredrevcache.clear()
2550 obsolete.clearobscaches(self)
2551 obsolete.clearobscaches(self)
2551 self._quick_access_changeid_invalidate()
2552 self._quick_access_changeid_invalidate()
2552
2553
2553 def invalidatedirstate(self):
2554 def invalidatedirstate(self):
2554 '''Invalidates the dirstate, causing the next call to dirstate
2555 '''Invalidates the dirstate, causing the next call to dirstate
2555 to check if it was modified since the last time it was read,
2556 to check if it was modified since the last time it was read,
2556 rereading it if it has.
2557 rereading it if it has.
2557
2558
2558 This is different to dirstate.invalidate() that it doesn't always
2559 This is different to dirstate.invalidate() that it doesn't always
2559 rereads the dirstate. Use dirstate.invalidate() if you want to
2560 rereads the dirstate. Use dirstate.invalidate() if you want to
2560 explicitly read the dirstate again (i.e. restoring it to a previous
2561 explicitly read the dirstate again (i.e. restoring it to a previous
2561 known good state).'''
2562 known good state).'''
2562 if hasunfilteredcache(self, 'dirstate'):
2563 if hasunfilteredcache(self, 'dirstate'):
2563 for k in self.dirstate._filecache:
2564 for k in self.dirstate._filecache:
2564 try:
2565 try:
2565 delattr(self.dirstate, k)
2566 delattr(self.dirstate, k)
2566 except AttributeError:
2567 except AttributeError:
2567 pass
2568 pass
2568 delattr(self.unfiltered(), 'dirstate')
2569 delattr(self.unfiltered(), 'dirstate')
2569
2570
2570 def invalidate(self, clearfilecache=False):
2571 def invalidate(self, clearfilecache=False):
2571 '''Invalidates both store and non-store parts other than dirstate
2572 '''Invalidates both store and non-store parts other than dirstate
2572
2573
2573 If a transaction is running, invalidation of store is omitted,
2574 If a transaction is running, invalidation of store is omitted,
2574 because discarding in-memory changes might cause inconsistency
2575 because discarding in-memory changes might cause inconsistency
2575 (e.g. incomplete fncache causes unintentional failure, but
2576 (e.g. incomplete fncache causes unintentional failure, but
2576 redundant one doesn't).
2577 redundant one doesn't).
2577 '''
2578 '''
2578 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2579 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2579 for k in list(self._filecache.keys()):
2580 for k in list(self._filecache.keys()):
2580 # dirstate is invalidated separately in invalidatedirstate()
2581 # dirstate is invalidated separately in invalidatedirstate()
2581 if k == b'dirstate':
2582 if k == b'dirstate':
2582 continue
2583 continue
2583 if (
2584 if (
2584 k == b'changelog'
2585 k == b'changelog'
2585 and self.currenttransaction()
2586 and self.currenttransaction()
2586 and self.changelog._delayed
2587 and self.changelog._delayed
2587 ):
2588 ):
2588 # The changelog object may store unwritten revisions. We don't
2589 # The changelog object may store unwritten revisions. We don't
2589 # want to lose them.
2590 # want to lose them.
2590 # TODO: Solve the problem instead of working around it.
2591 # TODO: Solve the problem instead of working around it.
2591 continue
2592 continue
2592
2593
2593 if clearfilecache:
2594 if clearfilecache:
2594 del self._filecache[k]
2595 del self._filecache[k]
2595 try:
2596 try:
2596 delattr(unfiltered, k)
2597 delattr(unfiltered, k)
2597 except AttributeError:
2598 except AttributeError:
2598 pass
2599 pass
2599 self.invalidatecaches()
2600 self.invalidatecaches()
2600 if not self.currenttransaction():
2601 if not self.currenttransaction():
2601 # TODO: Changing contents of store outside transaction
2602 # TODO: Changing contents of store outside transaction
2602 # causes inconsistency. We should make in-memory store
2603 # causes inconsistency. We should make in-memory store
2603 # changes detectable, and abort if changed.
2604 # changes detectable, and abort if changed.
2604 self.store.invalidatecaches()
2605 self.store.invalidatecaches()
2605
2606
2606 def invalidateall(self):
2607 def invalidateall(self):
2607 '''Fully invalidates both store and non-store parts, causing the
2608 '''Fully invalidates both store and non-store parts, causing the
2608 subsequent operation to reread any outside changes.'''
2609 subsequent operation to reread any outside changes.'''
2609 # extension should hook this to invalidate its caches
2610 # extension should hook this to invalidate its caches
2610 self.invalidate()
2611 self.invalidate()
2611 self.invalidatedirstate()
2612 self.invalidatedirstate()
2612
2613
2613 @unfilteredmethod
2614 @unfilteredmethod
2614 def _refreshfilecachestats(self, tr):
2615 def _refreshfilecachestats(self, tr):
2615 """Reload stats of cached files so that they are flagged as valid"""
2616 """Reload stats of cached files so that they are flagged as valid"""
2616 for k, ce in self._filecache.items():
2617 for k, ce in self._filecache.items():
2617 k = pycompat.sysstr(k)
2618 k = pycompat.sysstr(k)
2618 if k == 'dirstate' or k not in self.__dict__:
2619 if k == 'dirstate' or k not in self.__dict__:
2619 continue
2620 continue
2620 ce.refresh()
2621 ce.refresh()
2621
2622
2622 def _lock(
2623 def _lock(
2623 self,
2624 self,
2624 vfs,
2625 vfs,
2625 lockname,
2626 lockname,
2626 wait,
2627 wait,
2627 releasefn,
2628 releasefn,
2628 acquirefn,
2629 acquirefn,
2629 desc,
2630 desc,
2630 inheritchecker=None,
2631 inheritchecker=None,
2631 parentenvvar=None,
2632 parentenvvar=None,
2632 ):
2633 ):
2633 parentlock = None
2634 parentlock = None
2634 # the contents of parentenvvar are used by the underlying lock to
2635 # the contents of parentenvvar are used by the underlying lock to
2635 # determine whether it can be inherited
2636 # determine whether it can be inherited
2636 if parentenvvar is not None:
2637 if parentenvvar is not None:
2637 parentlock = encoding.environ.get(parentenvvar)
2638 parentlock = encoding.environ.get(parentenvvar)
2638
2639
2639 timeout = 0
2640 timeout = 0
2640 warntimeout = 0
2641 warntimeout = 0
2641 if wait:
2642 if wait:
2642 timeout = self.ui.configint(b"ui", b"timeout")
2643 timeout = self.ui.configint(b"ui", b"timeout")
2643 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2644 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2644 # internal config: ui.signal-safe-lock
2645 # internal config: ui.signal-safe-lock
2645 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2646 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2646
2647
2647 l = lockmod.trylock(
2648 l = lockmod.trylock(
2648 self.ui,
2649 self.ui,
2649 vfs,
2650 vfs,
2650 lockname,
2651 lockname,
2651 timeout,
2652 timeout,
2652 warntimeout,
2653 warntimeout,
2653 releasefn=releasefn,
2654 releasefn=releasefn,
2654 acquirefn=acquirefn,
2655 acquirefn=acquirefn,
2655 desc=desc,
2656 desc=desc,
2656 inheritchecker=inheritchecker,
2657 inheritchecker=inheritchecker,
2657 parentlock=parentlock,
2658 parentlock=parentlock,
2658 signalsafe=signalsafe,
2659 signalsafe=signalsafe,
2659 )
2660 )
2660 return l
2661 return l
2661
2662
2662 def _afterlock(self, callback):
2663 def _afterlock(self, callback):
2663 """add a callback to be run when the repository is fully unlocked
2664 """add a callback to be run when the repository is fully unlocked
2664
2665
2665 The callback will be executed when the outermost lock is released
2666 The callback will be executed when the outermost lock is released
2666 (with wlock being higher level than 'lock')."""
2667 (with wlock being higher level than 'lock')."""
2667 for ref in (self._wlockref, self._lockref):
2668 for ref in (self._wlockref, self._lockref):
2668 l = ref and ref()
2669 l = ref and ref()
2669 if l and l.held:
2670 if l and l.held:
2670 l.postrelease.append(callback)
2671 l.postrelease.append(callback)
2671 break
2672 break
2672 else: # no lock have been found.
2673 else: # no lock have been found.
2673 callback(True)
2674 callback(True)
2674
2675
2675 def lock(self, wait=True):
2676 def lock(self, wait=True):
2676 '''Lock the repository store (.hg/store) and return a weak reference
2677 '''Lock the repository store (.hg/store) and return a weak reference
2677 to the lock. Use this before modifying the store (e.g. committing or
2678 to the lock. Use this before modifying the store (e.g. committing or
2678 stripping). If you are opening a transaction, get a lock as well.)
2679 stripping). If you are opening a transaction, get a lock as well.)
2679
2680
2680 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2681 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2681 'wlock' first to avoid a dead-lock hazard.'''
2682 'wlock' first to avoid a dead-lock hazard.'''
2682 l = self._currentlock(self._lockref)
2683 l = self._currentlock(self._lockref)
2683 if l is not None:
2684 if l is not None:
2684 l.lock()
2685 l.lock()
2685 return l
2686 return l
2686
2687
2687 l = self._lock(
2688 l = self._lock(
2688 vfs=self.svfs,
2689 vfs=self.svfs,
2689 lockname=b"lock",
2690 lockname=b"lock",
2690 wait=wait,
2691 wait=wait,
2691 releasefn=None,
2692 releasefn=None,
2692 acquirefn=self.invalidate,
2693 acquirefn=self.invalidate,
2693 desc=_(b'repository %s') % self.origroot,
2694 desc=_(b'repository %s') % self.origroot,
2694 )
2695 )
2695 self._lockref = weakref.ref(l)
2696 self._lockref = weakref.ref(l)
2696 return l
2697 return l
2697
2698
2698 def _wlockchecktransaction(self):
2699 def _wlockchecktransaction(self):
2699 if self.currenttransaction() is not None:
2700 if self.currenttransaction() is not None:
2700 raise error.LockInheritanceContractViolation(
2701 raise error.LockInheritanceContractViolation(
2701 b'wlock cannot be inherited in the middle of a transaction'
2702 b'wlock cannot be inherited in the middle of a transaction'
2702 )
2703 )
2703
2704
2704 def wlock(self, wait=True):
2705 def wlock(self, wait=True):
2705 '''Lock the non-store parts of the repository (everything under
2706 '''Lock the non-store parts of the repository (everything under
2706 .hg except .hg/store) and return a weak reference to the lock.
2707 .hg except .hg/store) and return a weak reference to the lock.
2707
2708
2708 Use this before modifying files in .hg.
2709 Use this before modifying files in .hg.
2709
2710
2710 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2711 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2711 'wlock' first to avoid a dead-lock hazard.'''
2712 'wlock' first to avoid a dead-lock hazard.'''
2712 l = self._wlockref and self._wlockref()
2713 l = self._wlockref and self._wlockref()
2713 if l is not None and l.held:
2714 if l is not None and l.held:
2714 l.lock()
2715 l.lock()
2715 return l
2716 return l
2716
2717
2717 # We do not need to check for non-waiting lock acquisition. Such
2718 # We do not need to check for non-waiting lock acquisition. Such
2718 # acquisition would not cause dead-lock as they would just fail.
2719 # acquisition would not cause dead-lock as they would just fail.
2719 if wait and (
2720 if wait and (
2720 self.ui.configbool(b'devel', b'all-warnings')
2721 self.ui.configbool(b'devel', b'all-warnings')
2721 or self.ui.configbool(b'devel', b'check-locks')
2722 or self.ui.configbool(b'devel', b'check-locks')
2722 ):
2723 ):
2723 if self._currentlock(self._lockref) is not None:
2724 if self._currentlock(self._lockref) is not None:
2724 self.ui.develwarn(b'"wlock" acquired after "lock"')
2725 self.ui.develwarn(b'"wlock" acquired after "lock"')
2725
2726
2726 def unlock():
2727 def unlock():
2727 if self.dirstate.pendingparentchange():
2728 if self.dirstate.pendingparentchange():
2728 self.dirstate.invalidate()
2729 self.dirstate.invalidate()
2729 else:
2730 else:
2730 self.dirstate.write(None)
2731 self.dirstate.write(None)
2731
2732
2732 self._filecache[b'dirstate'].refresh()
2733 self._filecache[b'dirstate'].refresh()
2733
2734
2734 l = self._lock(
2735 l = self._lock(
2735 self.vfs,
2736 self.vfs,
2736 b"wlock",
2737 b"wlock",
2737 wait,
2738 wait,
2738 unlock,
2739 unlock,
2739 self.invalidatedirstate,
2740 self.invalidatedirstate,
2740 _(b'working directory of %s') % self.origroot,
2741 _(b'working directory of %s') % self.origroot,
2741 inheritchecker=self._wlockchecktransaction,
2742 inheritchecker=self._wlockchecktransaction,
2742 parentenvvar=b'HG_WLOCK_LOCKER',
2743 parentenvvar=b'HG_WLOCK_LOCKER',
2743 )
2744 )
2744 self._wlockref = weakref.ref(l)
2745 self._wlockref = weakref.ref(l)
2745 return l
2746 return l
2746
2747
2747 def _currentlock(self, lockref):
2748 def _currentlock(self, lockref):
2748 """Returns the lock if it's held, or None if it's not."""
2749 """Returns the lock if it's held, or None if it's not."""
2749 if lockref is None:
2750 if lockref is None:
2750 return None
2751 return None
2751 l = lockref()
2752 l = lockref()
2752 if l is None or not l.held:
2753 if l is None or not l.held:
2753 return None
2754 return None
2754 return l
2755 return l
2755
2756
2756 def currentwlock(self):
2757 def currentwlock(self):
2757 """Returns the wlock if it's held, or None if it's not."""
2758 """Returns the wlock if it's held, or None if it's not."""
2758 return self._currentlock(self._wlockref)
2759 return self._currentlock(self._wlockref)
2759
2760
2760 def _filecommit(
2761 def _filecommit(
2761 self,
2762 self,
2762 fctx,
2763 fctx,
2763 manifest1,
2764 manifest1,
2764 manifest2,
2765 manifest2,
2765 linkrev,
2766 linkrev,
2766 tr,
2767 tr,
2767 changelist,
2768 changelist,
2768 includecopymeta,
2769 includecopymeta,
2769 ):
2770 ):
2770 """
2771 """
2771 commit an individual file as part of a larger transaction
2772 commit an individual file as part of a larger transaction
2772 """
2773 """
2773
2774
2774 fname = fctx.path()
2775 fname = fctx.path()
2775 fparent1 = manifest1.get(fname, nullid)
2776 fparent1 = manifest1.get(fname, nullid)
2776 fparent2 = manifest2.get(fname, nullid)
2777 fparent2 = manifest2.get(fname, nullid)
2777 if isinstance(fctx, context.filectx):
2778 if isinstance(fctx, context.filectx):
2778 node = fctx.filenode()
2779 node = fctx.filenode()
2779 if node in [fparent1, fparent2]:
2780 if node in [fparent1, fparent2]:
2780 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2781 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2781 if (
2782 if (
2782 fparent1 != nullid
2783 fparent1 != nullid
2783 and manifest1.flags(fname) != fctx.flags()
2784 and manifest1.flags(fname) != fctx.flags()
2784 ) or (
2785 ) or (
2785 fparent2 != nullid
2786 fparent2 != nullid
2786 and manifest2.flags(fname) != fctx.flags()
2787 and manifest2.flags(fname) != fctx.flags()
2787 ):
2788 ):
2788 changelist.append(fname)
2789 changelist.append(fname)
2789 return node
2790 return node
2790
2791
2791 flog = self.file(fname)
2792 flog = self.file(fname)
2792 meta = {}
2793 meta = {}
2793 cfname = fctx.copysource()
2794 cfname = fctx.copysource()
2794 if cfname and cfname != fname:
2795 if cfname and cfname != fname:
2795 # Mark the new revision of this file as a copy of another
2796 # Mark the new revision of this file as a copy of another
2796 # file. This copy data will effectively act as a parent
2797 # file. This copy data will effectively act as a parent
2797 # of this new revision. If this is a merge, the first
2798 # of this new revision. If this is a merge, the first
2798 # parent will be the nullid (meaning "look up the copy data")
2799 # parent will be the nullid (meaning "look up the copy data")
2799 # and the second one will be the other parent. For example:
2800 # and the second one will be the other parent. For example:
2800 #
2801 #
2801 # 0 --- 1 --- 3 rev1 changes file foo
2802 # 0 --- 1 --- 3 rev1 changes file foo
2802 # \ / rev2 renames foo to bar and changes it
2803 # \ / rev2 renames foo to bar and changes it
2803 # \- 2 -/ rev3 should have bar with all changes and
2804 # \- 2 -/ rev3 should have bar with all changes and
2804 # should record that bar descends from
2805 # should record that bar descends from
2805 # bar in rev2 and foo in rev1
2806 # bar in rev2 and foo in rev1
2806 #
2807 #
2807 # this allows this merge to succeed:
2808 # this allows this merge to succeed:
2808 #
2809 #
2809 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2810 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2810 # \ / merging rev3 and rev4 should use bar@rev2
2811 # \ / merging rev3 and rev4 should use bar@rev2
2811 # \- 2 --- 4 as the merge base
2812 # \- 2 --- 4 as the merge base
2812 #
2813 #
2813
2814
2814 cnode = manifest1.get(cfname)
2815 cnode = manifest1.get(cfname)
2815 newfparent = fparent2
2816 newfparent = fparent2
2816
2817
2817 if manifest2: # branch merge
2818 if manifest2: # branch merge
2818 if fparent2 == nullid or cnode is None: # copied on remote side
2819 if fparent2 == nullid or cnode is None: # copied on remote side
2819 if cfname in manifest2:
2820 if cfname in manifest2:
2820 cnode = manifest2[cfname]
2821 cnode = manifest2[cfname]
2821 newfparent = fparent1
2822 newfparent = fparent1
2822
2823
2823 # Here, we used to search backwards through history to try to find
2824 # Here, we used to search backwards through history to try to find
2824 # where the file copy came from if the source of a copy was not in
2825 # where the file copy came from if the source of a copy was not in
2825 # the parent directory. However, this doesn't actually make sense to
2826 # the parent directory. However, this doesn't actually make sense to
2826 # do (what does a copy from something not in your working copy even
2827 # do (what does a copy from something not in your working copy even
2827 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2828 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2828 # the user that copy information was dropped, so if they didn't
2829 # the user that copy information was dropped, so if they didn't
2829 # expect this outcome it can be fixed, but this is the correct
2830 # expect this outcome it can be fixed, but this is the correct
2830 # behavior in this circumstance.
2831 # behavior in this circumstance.
2831
2832
2832 if cnode:
2833 if cnode:
2833 self.ui.debug(
2834 self.ui.debug(
2834 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2835 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2835 )
2836 )
2836 if includecopymeta:
2837 if includecopymeta:
2837 meta[b"copy"] = cfname
2838 meta[b"copy"] = cfname
2838 meta[b"copyrev"] = hex(cnode)
2839 meta[b"copyrev"] = hex(cnode)
2839 fparent1, fparent2 = nullid, newfparent
2840 fparent1, fparent2 = nullid, newfparent
2840 else:
2841 else:
2841 self.ui.warn(
2842 self.ui.warn(
2842 _(
2843 _(
2843 b"warning: can't find ancestor for '%s' "
2844 b"warning: can't find ancestor for '%s' "
2844 b"copied from '%s'!\n"
2845 b"copied from '%s'!\n"
2845 )
2846 )
2846 % (fname, cfname)
2847 % (fname, cfname)
2847 )
2848 )
2848
2849
2849 elif fparent1 == nullid:
2850 elif fparent1 == nullid:
2850 fparent1, fparent2 = fparent2, nullid
2851 fparent1, fparent2 = fparent2, nullid
2851 elif fparent2 != nullid:
2852 elif fparent2 != nullid:
2852 # is one parent an ancestor of the other?
2853 # is one parent an ancestor of the other?
2853 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2854 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2854 if fparent1 in fparentancestors:
2855 if fparent1 in fparentancestors:
2855 fparent1, fparent2 = fparent2, nullid
2856 fparent1, fparent2 = fparent2, nullid
2856 elif fparent2 in fparentancestors:
2857 elif fparent2 in fparentancestors:
2857 fparent2 = nullid
2858 fparent2 = nullid
2858 elif not fparentancestors:
2859 elif not fparentancestors:
2859 # TODO: this whole if-else might be simplified much more
2860 # TODO: this whole if-else might be simplified much more
2860 ms = mergemod.mergestate.read(self)
2861 ms = mergemod.mergestate.read(self)
2861 if (
2862 if (
2862 fname in ms
2863 fname in ms
2863 and ms[fname] == mergemod.MERGE_RECORD_MERGED_OTHER
2864 and ms[fname] == mergemod.MERGE_RECORD_MERGED_OTHER
2864 ):
2865 ):
2865 fparent1, fparent2 = fparent2, nullid
2866 fparent1, fparent2 = fparent2, nullid
2866
2867
2867 # is the file changed?
2868 # is the file changed?
2868 text = fctx.data()
2869 text = fctx.data()
2869 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2870 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2870 changelist.append(fname)
2871 changelist.append(fname)
2871 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2872 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2872 # are just the flags changed during merge?
2873 # are just the flags changed during merge?
2873 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2874 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2874 changelist.append(fname)
2875 changelist.append(fname)
2875
2876
2876 return fparent1
2877 return fparent1
2877
2878
2878 def checkcommitpatterns(self, wctx, match, status, fail):
2879 def checkcommitpatterns(self, wctx, match, status, fail):
2879 """check for commit arguments that aren't committable"""
2880 """check for commit arguments that aren't committable"""
2880 if match.isexact() or match.prefix():
2881 if match.isexact() or match.prefix():
2881 matched = set(status.modified + status.added + status.removed)
2882 matched = set(status.modified + status.added + status.removed)
2882
2883
2883 for f in match.files():
2884 for f in match.files():
2884 f = self.dirstate.normalize(f)
2885 f = self.dirstate.normalize(f)
2885 if f == b'.' or f in matched or f in wctx.substate:
2886 if f == b'.' or f in matched or f in wctx.substate:
2886 continue
2887 continue
2887 if f in status.deleted:
2888 if f in status.deleted:
2888 fail(f, _(b'file not found!'))
2889 fail(f, _(b'file not found!'))
2889 # Is it a directory that exists or used to exist?
2890 # Is it a directory that exists or used to exist?
2890 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2891 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2891 d = f + b'/'
2892 d = f + b'/'
2892 for mf in matched:
2893 for mf in matched:
2893 if mf.startswith(d):
2894 if mf.startswith(d):
2894 break
2895 break
2895 else:
2896 else:
2896 fail(f, _(b"no match under directory!"))
2897 fail(f, _(b"no match under directory!"))
2897 elif f not in self.dirstate:
2898 elif f not in self.dirstate:
2898 fail(f, _(b"file not tracked!"))
2899 fail(f, _(b"file not tracked!"))
2899
2900
2900 @unfilteredmethod
2901 @unfilteredmethod
2901 def commit(
2902 def commit(
2902 self,
2903 self,
2903 text=b"",
2904 text=b"",
2904 user=None,
2905 user=None,
2905 date=None,
2906 date=None,
2906 match=None,
2907 match=None,
2907 force=False,
2908 force=False,
2908 editor=None,
2909 editor=None,
2909 extra=None,
2910 extra=None,
2910 ):
2911 ):
2911 """Add a new revision to current repository.
2912 """Add a new revision to current repository.
2912
2913
2913 Revision information is gathered from the working directory,
2914 Revision information is gathered from the working directory,
2914 match can be used to filter the committed files. If editor is
2915 match can be used to filter the committed files. If editor is
2915 supplied, it is called to get a commit message.
2916 supplied, it is called to get a commit message.
2916 """
2917 """
2917 if extra is None:
2918 if extra is None:
2918 extra = {}
2919 extra = {}
2919
2920
2920 def fail(f, msg):
2921 def fail(f, msg):
2921 raise error.Abort(b'%s: %s' % (f, msg))
2922 raise error.Abort(b'%s: %s' % (f, msg))
2922
2923
2923 if not match:
2924 if not match:
2924 match = matchmod.always()
2925 match = matchmod.always()
2925
2926
2926 if not force:
2927 if not force:
2927 match.bad = fail
2928 match.bad = fail
2928
2929
2929 # lock() for recent changelog (see issue4368)
2930 # lock() for recent changelog (see issue4368)
2930 with self.wlock(), self.lock():
2931 with self.wlock(), self.lock():
2931 wctx = self[None]
2932 wctx = self[None]
2932 merge = len(wctx.parents()) > 1
2933 merge = len(wctx.parents()) > 1
2933
2934
2934 if not force and merge and not match.always():
2935 if not force and merge and not match.always():
2935 raise error.Abort(
2936 raise error.Abort(
2936 _(
2937 _(
2937 b'cannot partially commit a merge '
2938 b'cannot partially commit a merge '
2938 b'(do not specify files or patterns)'
2939 b'(do not specify files or patterns)'
2939 )
2940 )
2940 )
2941 )
2941
2942
2942 status = self.status(match=match, clean=force)
2943 status = self.status(match=match, clean=force)
2943 if force:
2944 if force:
2944 status.modified.extend(
2945 status.modified.extend(
2945 status.clean
2946 status.clean
2946 ) # mq may commit clean files
2947 ) # mq may commit clean files
2947
2948
2948 # check subrepos
2949 # check subrepos
2949 subs, commitsubs, newstate = subrepoutil.precommit(
2950 subs, commitsubs, newstate = subrepoutil.precommit(
2950 self.ui, wctx, status, match, force=force
2951 self.ui, wctx, status, match, force=force
2951 )
2952 )
2952
2953
2953 # make sure all explicit patterns are matched
2954 # make sure all explicit patterns are matched
2954 if not force:
2955 if not force:
2955 self.checkcommitpatterns(wctx, match, status, fail)
2956 self.checkcommitpatterns(wctx, match, status, fail)
2956
2957
2957 cctx = context.workingcommitctx(
2958 cctx = context.workingcommitctx(
2958 self, status, text, user, date, extra
2959 self, status, text, user, date, extra
2959 )
2960 )
2960
2961
2961 ms = mergemod.mergestate.read(self)
2962 ms = mergemod.mergestate.read(self)
2962 mergeutil.checkunresolved(ms)
2963 mergeutil.checkunresolved(ms)
2963
2964
2964 # internal config: ui.allowemptycommit
2965 # internal config: ui.allowemptycommit
2965 allowemptycommit = (
2966 allowemptycommit = (
2966 wctx.branch() != wctx.p1().branch()
2967 wctx.branch() != wctx.p1().branch()
2967 or extra.get(b'close')
2968 or extra.get(b'close')
2968 or merge
2969 or merge
2969 or cctx.files()
2970 or cctx.files()
2970 or self.ui.configbool(b'ui', b'allowemptycommit')
2971 or self.ui.configbool(b'ui', b'allowemptycommit')
2971 )
2972 )
2972 if not allowemptycommit:
2973 if not allowemptycommit:
2973 self.ui.debug(b'nothing to commit, clearing merge state\n')
2974 self.ui.debug(b'nothing to commit, clearing merge state\n')
2974 ms.reset()
2975 ms.reset()
2975 return None
2976 return None
2976
2977
2977 if merge and cctx.deleted():
2978 if merge and cctx.deleted():
2978 raise error.Abort(_(b"cannot commit merge with missing files"))
2979 raise error.Abort(_(b"cannot commit merge with missing files"))
2979
2980
2980 if editor:
2981 if editor:
2981 cctx._text = editor(self, cctx, subs)
2982 cctx._text = editor(self, cctx, subs)
2982 edited = text != cctx._text
2983 edited = text != cctx._text
2983
2984
2984 # Save commit message in case this transaction gets rolled back
2985 # Save commit message in case this transaction gets rolled back
2985 # (e.g. by a pretxncommit hook). Leave the content alone on
2986 # (e.g. by a pretxncommit hook). Leave the content alone on
2986 # the assumption that the user will use the same editor again.
2987 # the assumption that the user will use the same editor again.
2987 msgfn = self.savecommitmessage(cctx._text)
2988 msgfn = self.savecommitmessage(cctx._text)
2988
2989
2989 # commit subs and write new state
2990 # commit subs and write new state
2990 if subs:
2991 if subs:
2991 uipathfn = scmutil.getuipathfn(self)
2992 uipathfn = scmutil.getuipathfn(self)
2992 for s in sorted(commitsubs):
2993 for s in sorted(commitsubs):
2993 sub = wctx.sub(s)
2994 sub = wctx.sub(s)
2994 self.ui.status(
2995 self.ui.status(
2995 _(b'committing subrepository %s\n')
2996 _(b'committing subrepository %s\n')
2996 % uipathfn(subrepoutil.subrelpath(sub))
2997 % uipathfn(subrepoutil.subrelpath(sub))
2997 )
2998 )
2998 sr = sub.commit(cctx._text, user, date)
2999 sr = sub.commit(cctx._text, user, date)
2999 newstate[s] = (newstate[s][0], sr)
3000 newstate[s] = (newstate[s][0], sr)
3000 subrepoutil.writestate(self, newstate)
3001 subrepoutil.writestate(self, newstate)
3001
3002
3002 p1, p2 = self.dirstate.parents()
3003 p1, p2 = self.dirstate.parents()
3003 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3004 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3004 try:
3005 try:
3005 self.hook(
3006 self.hook(
3006 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3007 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3007 )
3008 )
3008 with self.transaction(b'commit'):
3009 with self.transaction(b'commit'):
3009 ret = self.commitctx(cctx, True)
3010 ret = self.commitctx(cctx, True)
3010 # update bookmarks, dirstate and mergestate
3011 # update bookmarks, dirstate and mergestate
3011 bookmarks.update(self, [p1, p2], ret)
3012 bookmarks.update(self, [p1, p2], ret)
3012 cctx.markcommitted(ret)
3013 cctx.markcommitted(ret)
3013 ms.reset()
3014 ms.reset()
3014 except: # re-raises
3015 except: # re-raises
3015 if edited:
3016 if edited:
3016 self.ui.write(
3017 self.ui.write(
3017 _(b'note: commit message saved in %s\n') % msgfn
3018 _(b'note: commit message saved in %s\n') % msgfn
3018 )
3019 )
3019 self.ui.write(
3020 self.ui.write(
3020 _(
3021 _(
3021 b"note: use 'hg commit --logfile "
3022 b"note: use 'hg commit --logfile "
3022 b".hg/last-message.txt --edit' to reuse it\n"
3023 b".hg/last-message.txt --edit' to reuse it\n"
3023 )
3024 )
3024 )
3025 )
3025 raise
3026 raise
3026
3027
3027 def commithook(unused_success):
3028 def commithook(unused_success):
3028 # hack for command that use a temporary commit (eg: histedit)
3029 # hack for command that use a temporary commit (eg: histedit)
3029 # temporary commit got stripped before hook release
3030 # temporary commit got stripped before hook release
3030 if self.changelog.hasnode(ret):
3031 if self.changelog.hasnode(ret):
3031 self.hook(
3032 self.hook(
3032 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3033 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3033 )
3034 )
3034
3035
3035 self._afterlock(commithook)
3036 self._afterlock(commithook)
3036 return ret
3037 return ret
3037
3038
3038 @unfilteredmethod
3039 @unfilteredmethod
3039 def commitctx(self, ctx, error=False, origctx=None):
3040 def commitctx(self, ctx, error=False, origctx=None):
3040 """Add a new revision to current repository.
3041 """Add a new revision to current repository.
3041 Revision information is passed via the context argument.
3042 Revision information is passed via the context argument.
3042
3043
3043 ctx.files() should list all files involved in this commit, i.e.
3044 ctx.files() should list all files involved in this commit, i.e.
3044 modified/added/removed files. On merge, it may be wider than the
3045 modified/added/removed files. On merge, it may be wider than the
3045 ctx.files() to be committed, since any file nodes derived directly
3046 ctx.files() to be committed, since any file nodes derived directly
3046 from p1 or p2 are excluded from the committed ctx.files().
3047 from p1 or p2 are excluded from the committed ctx.files().
3047
3048
3048 origctx is for convert to work around the problem that bug
3049 origctx is for convert to work around the problem that bug
3049 fixes to the files list in changesets change hashes. For
3050 fixes to the files list in changesets change hashes. For
3050 convert to be the identity, it can pass an origctx and this
3051 convert to be the identity, it can pass an origctx and this
3051 function will use the same files list when it makes sense to
3052 function will use the same files list when it makes sense to
3052 do so.
3053 do so.
3053 """
3054 """
3054
3055
3055 p1, p2 = ctx.p1(), ctx.p2()
3056 p1, p2 = ctx.p1(), ctx.p2()
3056 user = ctx.user()
3057 user = ctx.user()
3057
3058
3058 if self.filecopiesmode == b'changeset-sidedata':
3059 if self.filecopiesmode == b'changeset-sidedata':
3059 writechangesetcopy = True
3060 writechangesetcopy = True
3060 writefilecopymeta = True
3061 writefilecopymeta = True
3061 writecopiesto = None
3062 writecopiesto = None
3062 else:
3063 else:
3063 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
3064 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
3064 writefilecopymeta = writecopiesto != b'changeset-only'
3065 writefilecopymeta = writecopiesto != b'changeset-only'
3065 writechangesetcopy = writecopiesto in (
3066 writechangesetcopy = writecopiesto in (
3066 b'changeset-only',
3067 b'changeset-only',
3067 b'compatibility',
3068 b'compatibility',
3068 )
3069 )
3069 p1copies, p2copies = None, None
3070 p1copies, p2copies = None, None
3070 if writechangesetcopy:
3071 if writechangesetcopy:
3071 p1copies = ctx.p1copies()
3072 p1copies = ctx.p1copies()
3072 p2copies = ctx.p2copies()
3073 p2copies = ctx.p2copies()
3073 filesadded, filesremoved = None, None
3074 filesadded, filesremoved = None, None
3074 with self.lock(), self.transaction(b"commit") as tr:
3075 with self.lock(), self.transaction(b"commit") as tr:
3075 trp = weakref.proxy(tr)
3076 trp = weakref.proxy(tr)
3076
3077
3077 if ctx.manifestnode():
3078 if ctx.manifestnode():
3078 # reuse an existing manifest revision
3079 # reuse an existing manifest revision
3079 self.ui.debug(b'reusing known manifest\n')
3080 self.ui.debug(b'reusing known manifest\n')
3080 mn = ctx.manifestnode()
3081 mn = ctx.manifestnode()
3081 files = ctx.files()
3082 files = ctx.files()
3082 if writechangesetcopy:
3083 if writechangesetcopy:
3083 filesadded = ctx.filesadded()
3084 filesadded = ctx.filesadded()
3084 filesremoved = ctx.filesremoved()
3085 filesremoved = ctx.filesremoved()
3085 elif ctx.files():
3086 elif ctx.files():
3086 m1ctx = p1.manifestctx()
3087 m1ctx = p1.manifestctx()
3087 m2ctx = p2.manifestctx()
3088 m2ctx = p2.manifestctx()
3088 mctx = m1ctx.copy()
3089 mctx = m1ctx.copy()
3089
3090
3090 m = mctx.read()
3091 m = mctx.read()
3091 m1 = m1ctx.read()
3092 m1 = m1ctx.read()
3092 m2 = m2ctx.read()
3093 m2 = m2ctx.read()
3093
3094
3094 # check in files
3095 # check in files
3095 added = []
3096 added = []
3096 changed = []
3097 changed = []
3097 removed = list(ctx.removed())
3098 removed = list(ctx.removed())
3098 linkrev = len(self)
3099 linkrev = len(self)
3099 self.ui.note(_(b"committing files:\n"))
3100 self.ui.note(_(b"committing files:\n"))
3100 uipathfn = scmutil.getuipathfn(self)
3101 uipathfn = scmutil.getuipathfn(self)
3101 for f in sorted(ctx.modified() + ctx.added()):
3102 for f in sorted(ctx.modified() + ctx.added()):
3102 self.ui.note(uipathfn(f) + b"\n")
3103 self.ui.note(uipathfn(f) + b"\n")
3103 try:
3104 try:
3104 fctx = ctx[f]
3105 fctx = ctx[f]
3105 if fctx is None:
3106 if fctx is None:
3106 removed.append(f)
3107 removed.append(f)
3107 else:
3108 else:
3108 added.append(f)
3109 added.append(f)
3109 m[f] = self._filecommit(
3110 m[f] = self._filecommit(
3110 fctx,
3111 fctx,
3111 m1,
3112 m1,
3112 m2,
3113 m2,
3113 linkrev,
3114 linkrev,
3114 trp,
3115 trp,
3115 changed,
3116 changed,
3116 writefilecopymeta,
3117 writefilecopymeta,
3117 )
3118 )
3118 m.setflag(f, fctx.flags())
3119 m.setflag(f, fctx.flags())
3119 except OSError:
3120 except OSError:
3120 self.ui.warn(
3121 self.ui.warn(
3121 _(b"trouble committing %s!\n") % uipathfn(f)
3122 _(b"trouble committing %s!\n") % uipathfn(f)
3122 )
3123 )
3123 raise
3124 raise
3124 except IOError as inst:
3125 except IOError as inst:
3125 errcode = getattr(inst, 'errno', errno.ENOENT)
3126 errcode = getattr(inst, 'errno', errno.ENOENT)
3126 if error or errcode and errcode != errno.ENOENT:
3127 if error or errcode and errcode != errno.ENOENT:
3127 self.ui.warn(
3128 self.ui.warn(
3128 _(b"trouble committing %s!\n") % uipathfn(f)
3129 _(b"trouble committing %s!\n") % uipathfn(f)
3129 )
3130 )
3130 raise
3131 raise
3131
3132
3132 # update manifest
3133 # update manifest
3133 removed = [f for f in removed if f in m1 or f in m2]
3134 removed = [f for f in removed if f in m1 or f in m2]
3134 drop = sorted([f for f in removed if f in m])
3135 drop = sorted([f for f in removed if f in m])
3135 for f in drop:
3136 for f in drop:
3136 del m[f]
3137 del m[f]
3137 if p2.rev() != nullrev:
3138 if p2.rev() != nullrev:
3138
3139
3139 @util.cachefunc
3140 @util.cachefunc
3140 def mas():
3141 def mas():
3141 p1n = p1.node()
3142 p1n = p1.node()
3142 p2n = p2.node()
3143 p2n = p2.node()
3143 cahs = self.changelog.commonancestorsheads(p1n, p2n)
3144 cahs = self.changelog.commonancestorsheads(p1n, p2n)
3144 if not cahs:
3145 if not cahs:
3145 cahs = [nullrev]
3146 cahs = [nullrev]
3146 return [self[r].manifest() for r in cahs]
3147 return [self[r].manifest() for r in cahs]
3147
3148
3148 def deletionfromparent(f):
3149 def deletionfromparent(f):
3149 # When a file is removed relative to p1 in a merge, this
3150 # When a file is removed relative to p1 in a merge, this
3150 # function determines whether the absence is due to a
3151 # function determines whether the absence is due to a
3151 # deletion from a parent, or whether the merge commit
3152 # deletion from a parent, or whether the merge commit
3152 # itself deletes the file. We decide this by doing a
3153 # itself deletes the file. We decide this by doing a
3153 # simplified three way merge of the manifest entry for
3154 # simplified three way merge of the manifest entry for
3154 # the file. There are two ways we decide the merge
3155 # the file. There are two ways we decide the merge
3155 # itself didn't delete a file:
3156 # itself didn't delete a file:
3156 # - neither parent (nor the merge) contain the file
3157 # - neither parent (nor the merge) contain the file
3157 # - exactly one parent contains the file, and that
3158 # - exactly one parent contains the file, and that
3158 # parent has the same filelog entry as the merge
3159 # parent has the same filelog entry as the merge
3159 # ancestor (or all of them if there two). In other
3160 # ancestor (or all of them if there two). In other
3160 # words, that parent left the file unchanged while the
3161 # words, that parent left the file unchanged while the
3161 # other one deleted it.
3162 # other one deleted it.
3162 # One way to think about this is that deleting a file is
3163 # One way to think about this is that deleting a file is
3163 # similar to emptying it, so the list of changed files
3164 # similar to emptying it, so the list of changed files
3164 # should be similar either way. The computation
3165 # should be similar either way. The computation
3165 # described above is not done directly in _filecommit
3166 # described above is not done directly in _filecommit
3166 # when creating the list of changed files, however
3167 # when creating the list of changed files, however
3167 # it does something very similar by comparing filelog
3168 # it does something very similar by comparing filelog
3168 # nodes.
3169 # nodes.
3169 if f in m1:
3170 if f in m1:
3170 return f not in m2 and all(
3171 return f not in m2 and all(
3171 f in ma and ma.find(f) == m1.find(f)
3172 f in ma and ma.find(f) == m1.find(f)
3172 for ma in mas()
3173 for ma in mas()
3173 )
3174 )
3174 elif f in m2:
3175 elif f in m2:
3175 return all(
3176 return all(
3176 f in ma and ma.find(f) == m2.find(f)
3177 f in ma and ma.find(f) == m2.find(f)
3177 for ma in mas()
3178 for ma in mas()
3178 )
3179 )
3179 else:
3180 else:
3180 return True
3181 return True
3181
3182
3182 removed = [f for f in removed if not deletionfromparent(f)]
3183 removed = [f for f in removed if not deletionfromparent(f)]
3183
3184
3184 files = changed + removed
3185 files = changed + removed
3185 md = None
3186 md = None
3186 if not files:
3187 if not files:
3187 # if no "files" actually changed in terms of the changelog,
3188 # if no "files" actually changed in terms of the changelog,
3188 # try hard to detect unmodified manifest entry so that the
3189 # try hard to detect unmodified manifest entry so that the
3189 # exact same commit can be reproduced later on convert.
3190 # exact same commit can be reproduced later on convert.
3190 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3191 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3191 if not files and md:
3192 if not files and md:
3192 self.ui.debug(
3193 self.ui.debug(
3193 b'not reusing manifest (no file change in '
3194 b'not reusing manifest (no file change in '
3194 b'changelog, but manifest differs)\n'
3195 b'changelog, but manifest differs)\n'
3195 )
3196 )
3196 if files or md:
3197 if files or md:
3197 self.ui.note(_(b"committing manifest\n"))
3198 self.ui.note(_(b"committing manifest\n"))
3198 # we're using narrowmatch here since it's already applied at
3199 # we're using narrowmatch here since it's already applied at
3199 # other stages (such as dirstate.walk), so we're already
3200 # other stages (such as dirstate.walk), so we're already
3200 # ignoring things outside of narrowspec in most cases. The
3201 # ignoring things outside of narrowspec in most cases. The
3201 # one case where we might have files outside the narrowspec
3202 # one case where we might have files outside the narrowspec
3202 # at this point is merges, and we already error out in the
3203 # at this point is merges, and we already error out in the
3203 # case where the merge has files outside of the narrowspec,
3204 # case where the merge has files outside of the narrowspec,
3204 # so this is safe.
3205 # so this is safe.
3205 mn = mctx.write(
3206 mn = mctx.write(
3206 trp,
3207 trp,
3207 linkrev,
3208 linkrev,
3208 p1.manifestnode(),
3209 p1.manifestnode(),
3209 p2.manifestnode(),
3210 p2.manifestnode(),
3210 added,
3211 added,
3211 drop,
3212 drop,
3212 match=self.narrowmatch(),
3213 match=self.narrowmatch(),
3213 )
3214 )
3214
3215
3215 if writechangesetcopy:
3216 if writechangesetcopy:
3216 filesadded = [
3217 filesadded = [
3217 f for f in changed if not (f in m1 or f in m2)
3218 f for f in changed if not (f in m1 or f in m2)
3218 ]
3219 ]
3219 filesremoved = removed
3220 filesremoved = removed
3220 else:
3221 else:
3221 self.ui.debug(
3222 self.ui.debug(
3222 b'reusing manifest from p1 (listed files '
3223 b'reusing manifest from p1 (listed files '
3223 b'actually unchanged)\n'
3224 b'actually unchanged)\n'
3224 )
3225 )
3225 mn = p1.manifestnode()
3226 mn = p1.manifestnode()
3226 else:
3227 else:
3227 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3228 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3228 mn = p1.manifestnode()
3229 mn = p1.manifestnode()
3229 files = []
3230 files = []
3230
3231
3231 if writecopiesto == b'changeset-only':
3232 if writecopiesto == b'changeset-only':
3232 # If writing only to changeset extras, use None to indicate that
3233 # If writing only to changeset extras, use None to indicate that
3233 # no entry should be written. If writing to both, write an empty
3234 # no entry should be written. If writing to both, write an empty
3234 # entry to prevent the reader from falling back to reading
3235 # entry to prevent the reader from falling back to reading
3235 # filelogs.
3236 # filelogs.
3236 p1copies = p1copies or None
3237 p1copies = p1copies or None
3237 p2copies = p2copies or None
3238 p2copies = p2copies or None
3238 filesadded = filesadded or None
3239 filesadded = filesadded or None
3239 filesremoved = filesremoved or None
3240 filesremoved = filesremoved or None
3240
3241
3241 if origctx and origctx.manifestnode() == mn:
3242 if origctx and origctx.manifestnode() == mn:
3242 files = origctx.files()
3243 files = origctx.files()
3243
3244
3244 # update changelog
3245 # update changelog
3245 self.ui.note(_(b"committing changelog\n"))
3246 self.ui.note(_(b"committing changelog\n"))
3246 self.changelog.delayupdate(tr)
3247 self.changelog.delayupdate(tr)
3247 n = self.changelog.add(
3248 n = self.changelog.add(
3248 mn,
3249 mn,
3249 files,
3250 files,
3250 ctx.description(),
3251 ctx.description(),
3251 trp,
3252 trp,
3252 p1.node(),
3253 p1.node(),
3253 p2.node(),
3254 p2.node(),
3254 user,
3255 user,
3255 ctx.date(),
3256 ctx.date(),
3256 ctx.extra().copy(),
3257 ctx.extra().copy(),
3257 p1copies,
3258 p1copies,
3258 p2copies,
3259 p2copies,
3259 filesadded,
3260 filesadded,
3260 filesremoved,
3261 filesremoved,
3261 )
3262 )
3262 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3263 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3263 self.hook(
3264 self.hook(
3264 b'pretxncommit',
3265 b'pretxncommit',
3265 throw=True,
3266 throw=True,
3266 node=hex(n),
3267 node=hex(n),
3267 parent1=xp1,
3268 parent1=xp1,
3268 parent2=xp2,
3269 parent2=xp2,
3269 )
3270 )
3270 # set the new commit is proper phase
3271 # set the new commit is proper phase
3271 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3272 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3272 if targetphase:
3273 if targetphase:
3273 # retract boundary do not alter parent changeset.
3274 # retract boundary do not alter parent changeset.
3274 # if a parent have higher the resulting phase will
3275 # if a parent have higher the resulting phase will
3275 # be compliant anyway
3276 # be compliant anyway
3276 #
3277 #
3277 # if minimal phase was 0 we don't need to retract anything
3278 # if minimal phase was 0 we don't need to retract anything
3278 phases.registernew(self, tr, targetphase, [n])
3279 phases.registernew(self, tr, targetphase, [n])
3279 return n
3280 return n
3280
3281
3281 @unfilteredmethod
3282 @unfilteredmethod
3282 def destroying(self):
3283 def destroying(self):
3283 '''Inform the repository that nodes are about to be destroyed.
3284 '''Inform the repository that nodes are about to be destroyed.
3284 Intended for use by strip and rollback, so there's a common
3285 Intended for use by strip and rollback, so there's a common
3285 place for anything that has to be done before destroying history.
3286 place for anything that has to be done before destroying history.
3286
3287
3287 This is mostly useful for saving state that is in memory and waiting
3288 This is mostly useful for saving state that is in memory and waiting
3288 to be flushed when the current lock is released. Because a call to
3289 to be flushed when the current lock is released. Because a call to
3289 destroyed is imminent, the repo will be invalidated causing those
3290 destroyed is imminent, the repo will be invalidated causing those
3290 changes to stay in memory (waiting for the next unlock), or vanish
3291 changes to stay in memory (waiting for the next unlock), or vanish
3291 completely.
3292 completely.
3292 '''
3293 '''
3293 # When using the same lock to commit and strip, the phasecache is left
3294 # When using the same lock to commit and strip, the phasecache is left
3294 # dirty after committing. Then when we strip, the repo is invalidated,
3295 # dirty after committing. Then when we strip, the repo is invalidated,
3295 # causing those changes to disappear.
3296 # causing those changes to disappear.
3296 if '_phasecache' in vars(self):
3297 if '_phasecache' in vars(self):
3297 self._phasecache.write()
3298 self._phasecache.write()
3298
3299
3299 @unfilteredmethod
3300 @unfilteredmethod
3300 def destroyed(self):
3301 def destroyed(self):
3301 '''Inform the repository that nodes have been destroyed.
3302 '''Inform the repository that nodes have been destroyed.
3302 Intended for use by strip and rollback, so there's a common
3303 Intended for use by strip and rollback, so there's a common
3303 place for anything that has to be done after destroying history.
3304 place for anything that has to be done after destroying history.
3304 '''
3305 '''
3305 # When one tries to:
3306 # When one tries to:
3306 # 1) destroy nodes thus calling this method (e.g. strip)
3307 # 1) destroy nodes thus calling this method (e.g. strip)
3307 # 2) use phasecache somewhere (e.g. commit)
3308 # 2) use phasecache somewhere (e.g. commit)
3308 #
3309 #
3309 # then 2) will fail because the phasecache contains nodes that were
3310 # then 2) will fail because the phasecache contains nodes that were
3310 # removed. We can either remove phasecache from the filecache,
3311 # removed. We can either remove phasecache from the filecache,
3311 # causing it to reload next time it is accessed, or simply filter
3312 # causing it to reload next time it is accessed, or simply filter
3312 # the removed nodes now and write the updated cache.
3313 # the removed nodes now and write the updated cache.
3313 self._phasecache.filterunknown(self)
3314 self._phasecache.filterunknown(self)
3314 self._phasecache.write()
3315 self._phasecache.write()
3315
3316
3316 # refresh all repository caches
3317 # refresh all repository caches
3317 self.updatecaches()
3318 self.updatecaches()
3318
3319
3319 # Ensure the persistent tag cache is updated. Doing it now
3320 # Ensure the persistent tag cache is updated. Doing it now
3320 # means that the tag cache only has to worry about destroyed
3321 # means that the tag cache only has to worry about destroyed
3321 # heads immediately after a strip/rollback. That in turn
3322 # heads immediately after a strip/rollback. That in turn
3322 # guarantees that "cachetip == currenttip" (comparing both rev
3323 # guarantees that "cachetip == currenttip" (comparing both rev
3323 # and node) always means no nodes have been added or destroyed.
3324 # and node) always means no nodes have been added or destroyed.
3324
3325
3325 # XXX this is suboptimal when qrefresh'ing: we strip the current
3326 # XXX this is suboptimal when qrefresh'ing: we strip the current
3326 # head, refresh the tag cache, then immediately add a new head.
3327 # head, refresh the tag cache, then immediately add a new head.
3327 # But I think doing it this way is necessary for the "instant
3328 # But I think doing it this way is necessary for the "instant
3328 # tag cache retrieval" case to work.
3329 # tag cache retrieval" case to work.
3329 self.invalidate()
3330 self.invalidate()
3330
3331
3331 def status(
3332 def status(
3332 self,
3333 self,
3333 node1=b'.',
3334 node1=b'.',
3334 node2=None,
3335 node2=None,
3335 match=None,
3336 match=None,
3336 ignored=False,
3337 ignored=False,
3337 clean=False,
3338 clean=False,
3338 unknown=False,
3339 unknown=False,
3339 listsubrepos=False,
3340 listsubrepos=False,
3340 ):
3341 ):
3341 '''a convenience method that calls node1.status(node2)'''
3342 '''a convenience method that calls node1.status(node2)'''
3342 return self[node1].status(
3343 return self[node1].status(
3343 node2, match, ignored, clean, unknown, listsubrepos
3344 node2, match, ignored, clean, unknown, listsubrepos
3344 )
3345 )
3345
3346
3346 def addpostdsstatus(self, ps):
3347 def addpostdsstatus(self, ps):
3347 """Add a callback to run within the wlock, at the point at which status
3348 """Add a callback to run within the wlock, at the point at which status
3348 fixups happen.
3349 fixups happen.
3349
3350
3350 On status completion, callback(wctx, status) will be called with the
3351 On status completion, callback(wctx, status) will be called with the
3351 wlock held, unless the dirstate has changed from underneath or the wlock
3352 wlock held, unless the dirstate has changed from underneath or the wlock
3352 couldn't be grabbed.
3353 couldn't be grabbed.
3353
3354
3354 Callbacks should not capture and use a cached copy of the dirstate --
3355 Callbacks should not capture and use a cached copy of the dirstate --
3355 it might change in the meanwhile. Instead, they should access the
3356 it might change in the meanwhile. Instead, they should access the
3356 dirstate via wctx.repo().dirstate.
3357 dirstate via wctx.repo().dirstate.
3357
3358
3358 This list is emptied out after each status run -- extensions should
3359 This list is emptied out after each status run -- extensions should
3359 make sure it adds to this list each time dirstate.status is called.
3360 make sure it adds to this list each time dirstate.status is called.
3360 Extensions should also make sure they don't call this for statuses
3361 Extensions should also make sure they don't call this for statuses
3361 that don't involve the dirstate.
3362 that don't involve the dirstate.
3362 """
3363 """
3363
3364
3364 # The list is located here for uniqueness reasons -- it is actually
3365 # The list is located here for uniqueness reasons -- it is actually
3365 # managed by the workingctx, but that isn't unique per-repo.
3366 # managed by the workingctx, but that isn't unique per-repo.
3366 self._postdsstatus.append(ps)
3367 self._postdsstatus.append(ps)
3367
3368
3368 def postdsstatus(self):
3369 def postdsstatus(self):
3369 """Used by workingctx to get the list of post-dirstate-status hooks."""
3370 """Used by workingctx to get the list of post-dirstate-status hooks."""
3370 return self._postdsstatus
3371 return self._postdsstatus
3371
3372
3372 def clearpostdsstatus(self):
3373 def clearpostdsstatus(self):
3373 """Used by workingctx to clear post-dirstate-status hooks."""
3374 """Used by workingctx to clear post-dirstate-status hooks."""
3374 del self._postdsstatus[:]
3375 del self._postdsstatus[:]
3375
3376
3376 def heads(self, start=None):
3377 def heads(self, start=None):
3377 if start is None:
3378 if start is None:
3378 cl = self.changelog
3379 cl = self.changelog
3379 headrevs = reversed(cl.headrevs())
3380 headrevs = reversed(cl.headrevs())
3380 return [cl.node(rev) for rev in headrevs]
3381 return [cl.node(rev) for rev in headrevs]
3381
3382
3382 heads = self.changelog.heads(start)
3383 heads = self.changelog.heads(start)
3383 # sort the output in rev descending order
3384 # sort the output in rev descending order
3384 return sorted(heads, key=self.changelog.rev, reverse=True)
3385 return sorted(heads, key=self.changelog.rev, reverse=True)
3385
3386
3386 def branchheads(self, branch=None, start=None, closed=False):
3387 def branchheads(self, branch=None, start=None, closed=False):
3387 '''return a (possibly filtered) list of heads for the given branch
3388 '''return a (possibly filtered) list of heads for the given branch
3388
3389
3389 Heads are returned in topological order, from newest to oldest.
3390 Heads are returned in topological order, from newest to oldest.
3390 If branch is None, use the dirstate branch.
3391 If branch is None, use the dirstate branch.
3391 If start is not None, return only heads reachable from start.
3392 If start is not None, return only heads reachable from start.
3392 If closed is True, return heads that are marked as closed as well.
3393 If closed is True, return heads that are marked as closed as well.
3393 '''
3394 '''
3394 if branch is None:
3395 if branch is None:
3395 branch = self[None].branch()
3396 branch = self[None].branch()
3396 branches = self.branchmap()
3397 branches = self.branchmap()
3397 if not branches.hasbranch(branch):
3398 if not branches.hasbranch(branch):
3398 return []
3399 return []
3399 # the cache returns heads ordered lowest to highest
3400 # the cache returns heads ordered lowest to highest
3400 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3401 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3401 if start is not None:
3402 if start is not None:
3402 # filter out the heads that cannot be reached from startrev
3403 # filter out the heads that cannot be reached from startrev
3403 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3404 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3404 bheads = [h for h in bheads if h in fbheads]
3405 bheads = [h for h in bheads if h in fbheads]
3405 return bheads
3406 return bheads
3406
3407
3407 def branches(self, nodes):
3408 def branches(self, nodes):
3408 if not nodes:
3409 if not nodes:
3409 nodes = [self.changelog.tip()]
3410 nodes = [self.changelog.tip()]
3410 b = []
3411 b = []
3411 for n in nodes:
3412 for n in nodes:
3412 t = n
3413 t = n
3413 while True:
3414 while True:
3414 p = self.changelog.parents(n)
3415 p = self.changelog.parents(n)
3415 if p[1] != nullid or p[0] == nullid:
3416 if p[1] != nullid or p[0] == nullid:
3416 b.append((t, n, p[0], p[1]))
3417 b.append((t, n, p[0], p[1]))
3417 break
3418 break
3418 n = p[0]
3419 n = p[0]
3419 return b
3420 return b
3420
3421
3421 def between(self, pairs):
3422 def between(self, pairs):
3422 r = []
3423 r = []
3423
3424
3424 for top, bottom in pairs:
3425 for top, bottom in pairs:
3425 n, l, i = top, [], 0
3426 n, l, i = top, [], 0
3426 f = 1
3427 f = 1
3427
3428
3428 while n != bottom and n != nullid:
3429 while n != bottom and n != nullid:
3429 p = self.changelog.parents(n)[0]
3430 p = self.changelog.parents(n)[0]
3430 if i == f:
3431 if i == f:
3431 l.append(n)
3432 l.append(n)
3432 f = f * 2
3433 f = f * 2
3433 n = p
3434 n = p
3434 i += 1
3435 i += 1
3435
3436
3436 r.append(l)
3437 r.append(l)
3437
3438
3438 return r
3439 return r
3439
3440
3440 def checkpush(self, pushop):
3441 def checkpush(self, pushop):
3441 """Extensions can override this function if additional checks have
3442 """Extensions can override this function if additional checks have
3442 to be performed before pushing, or call it if they override push
3443 to be performed before pushing, or call it if they override push
3443 command.
3444 command.
3444 """
3445 """
3445
3446
3446 @unfilteredpropertycache
3447 @unfilteredpropertycache
3447 def prepushoutgoinghooks(self):
3448 def prepushoutgoinghooks(self):
3448 """Return util.hooks consists of a pushop with repo, remote, outgoing
3449 """Return util.hooks consists of a pushop with repo, remote, outgoing
3449 methods, which are called before pushing changesets.
3450 methods, which are called before pushing changesets.
3450 """
3451 """
3451 return util.hooks()
3452 return util.hooks()
3452
3453
3453 def pushkey(self, namespace, key, old, new):
3454 def pushkey(self, namespace, key, old, new):
3454 try:
3455 try:
3455 tr = self.currenttransaction()
3456 tr = self.currenttransaction()
3456 hookargs = {}
3457 hookargs = {}
3457 if tr is not None:
3458 if tr is not None:
3458 hookargs.update(tr.hookargs)
3459 hookargs.update(tr.hookargs)
3459 hookargs = pycompat.strkwargs(hookargs)
3460 hookargs = pycompat.strkwargs(hookargs)
3460 hookargs['namespace'] = namespace
3461 hookargs['namespace'] = namespace
3461 hookargs['key'] = key
3462 hookargs['key'] = key
3462 hookargs['old'] = old
3463 hookargs['old'] = old
3463 hookargs['new'] = new
3464 hookargs['new'] = new
3464 self.hook(b'prepushkey', throw=True, **hookargs)
3465 self.hook(b'prepushkey', throw=True, **hookargs)
3465 except error.HookAbort as exc:
3466 except error.HookAbort as exc:
3466 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3467 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3467 if exc.hint:
3468 if exc.hint:
3468 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3469 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3469 return False
3470 return False
3470 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3471 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3471 ret = pushkey.push(self, namespace, key, old, new)
3472 ret = pushkey.push(self, namespace, key, old, new)
3472
3473
3473 def runhook(unused_success):
3474 def runhook(unused_success):
3474 self.hook(
3475 self.hook(
3475 b'pushkey',
3476 b'pushkey',
3476 namespace=namespace,
3477 namespace=namespace,
3477 key=key,
3478 key=key,
3478 old=old,
3479 old=old,
3479 new=new,
3480 new=new,
3480 ret=ret,
3481 ret=ret,
3481 )
3482 )
3482
3483
3483 self._afterlock(runhook)
3484 self._afterlock(runhook)
3484 return ret
3485 return ret
3485
3486
3486 def listkeys(self, namespace):
3487 def listkeys(self, namespace):
3487 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3488 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3488 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3489 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3489 values = pushkey.list(self, namespace)
3490 values = pushkey.list(self, namespace)
3490 self.hook(b'listkeys', namespace=namespace, values=values)
3491 self.hook(b'listkeys', namespace=namespace, values=values)
3491 return values
3492 return values
3492
3493
3493 def debugwireargs(self, one, two, three=None, four=None, five=None):
3494 def debugwireargs(self, one, two, three=None, four=None, five=None):
3494 '''used to test argument passing over the wire'''
3495 '''used to test argument passing over the wire'''
3495 return b"%s %s %s %s %s" % (
3496 return b"%s %s %s %s %s" % (
3496 one,
3497 one,
3497 two,
3498 two,
3498 pycompat.bytestr(three),
3499 pycompat.bytestr(three),
3499 pycompat.bytestr(four),
3500 pycompat.bytestr(four),
3500 pycompat.bytestr(five),
3501 pycompat.bytestr(five),
3501 )
3502 )
3502
3503
3503 def savecommitmessage(self, text):
3504 def savecommitmessage(self, text):
3504 fp = self.vfs(b'last-message.txt', b'wb')
3505 fp = self.vfs(b'last-message.txt', b'wb')
3505 try:
3506 try:
3506 fp.write(text)
3507 fp.write(text)
3507 finally:
3508 finally:
3508 fp.close()
3509 fp.close()
3509 return self.pathto(fp.name[len(self.root) + 1 :])
3510 return self.pathto(fp.name[len(self.root) + 1 :])
3510
3511
3511
3512
3512 # used to avoid circular references so destructors work
3513 # used to avoid circular references so destructors work
3513 def aftertrans(files):
3514 def aftertrans(files):
3514 renamefiles = [tuple(t) for t in files]
3515 renamefiles = [tuple(t) for t in files]
3515
3516
3516 def a():
3517 def a():
3517 for vfs, src, dest in renamefiles:
3518 for vfs, src, dest in renamefiles:
3518 # if src and dest refer to a same file, vfs.rename is a no-op,
3519 # if src and dest refer to a same file, vfs.rename is a no-op,
3519 # leaving both src and dest on disk. delete dest to make sure
3520 # leaving both src and dest on disk. delete dest to make sure
3520 # the rename couldn't be such a no-op.
3521 # the rename couldn't be such a no-op.
3521 vfs.tryunlink(dest)
3522 vfs.tryunlink(dest)
3522 try:
3523 try:
3523 vfs.rename(src, dest)
3524 vfs.rename(src, dest)
3524 except OSError: # journal file does not yet exist
3525 except OSError: # journal file does not yet exist
3525 pass
3526 pass
3526
3527
3527 return a
3528 return a
3528
3529
3529
3530
3530 def undoname(fn):
3531 def undoname(fn):
3531 base, name = os.path.split(fn)
3532 base, name = os.path.split(fn)
3532 assert name.startswith(b'journal')
3533 assert name.startswith(b'journal')
3533 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3534 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3534
3535
3535
3536
3536 def instance(ui, path, create, intents=None, createopts=None):
3537 def instance(ui, path, create, intents=None, createopts=None):
3537 localpath = util.urllocalpath(path)
3538 localpath = util.urllocalpath(path)
3538 if create:
3539 if create:
3539 createrepository(ui, localpath, createopts=createopts)
3540 createrepository(ui, localpath, createopts=createopts)
3540
3541
3541 return makelocalrepository(ui, localpath, intents=intents)
3542 return makelocalrepository(ui, localpath, intents=intents)
3542
3543
3543
3544
3544 def islocal(path):
3545 def islocal(path):
3545 return True
3546 return True
3546
3547
3547
3548
3548 def defaultcreateopts(ui, createopts=None):
3549 def defaultcreateopts(ui, createopts=None):
3549 """Populate the default creation options for a repository.
3550 """Populate the default creation options for a repository.
3550
3551
3551 A dictionary of explicitly requested creation options can be passed
3552 A dictionary of explicitly requested creation options can be passed
3552 in. Missing keys will be populated.
3553 in. Missing keys will be populated.
3553 """
3554 """
3554 createopts = dict(createopts or {})
3555 createopts = dict(createopts or {})
3555
3556
3556 if b'backend' not in createopts:
3557 if b'backend' not in createopts:
3557 # experimental config: storage.new-repo-backend
3558 # experimental config: storage.new-repo-backend
3558 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3559 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3559
3560
3560 return createopts
3561 return createopts
3561
3562
3562
3563
3563 def newreporequirements(ui, createopts):
3564 def newreporequirements(ui, createopts):
3564 """Determine the set of requirements for a new local repository.
3565 """Determine the set of requirements for a new local repository.
3565
3566
3566 Extensions can wrap this function to specify custom requirements for
3567 Extensions can wrap this function to specify custom requirements for
3567 new repositories.
3568 new repositories.
3568 """
3569 """
3569 # If the repo is being created from a shared repository, we copy
3570 # If the repo is being created from a shared repository, we copy
3570 # its requirements.
3571 # its requirements.
3571 if b'sharedrepo' in createopts:
3572 if b'sharedrepo' in createopts:
3572 requirements = set(createopts[b'sharedrepo'].requirements)
3573 requirements = set(createopts[b'sharedrepo'].requirements)
3573 if createopts.get(b'sharedrelative'):
3574 if createopts.get(b'sharedrelative'):
3574 requirements.add(b'relshared')
3575 requirements.add(b'relshared')
3575 else:
3576 else:
3576 requirements.add(b'shared')
3577 requirements.add(b'shared')
3577
3578
3578 return requirements
3579 return requirements
3579
3580
3580 if b'backend' not in createopts:
3581 if b'backend' not in createopts:
3581 raise error.ProgrammingError(
3582 raise error.ProgrammingError(
3582 b'backend key not present in createopts; '
3583 b'backend key not present in createopts; '
3583 b'was defaultcreateopts() called?'
3584 b'was defaultcreateopts() called?'
3584 )
3585 )
3585
3586
3586 if createopts[b'backend'] != b'revlogv1':
3587 if createopts[b'backend'] != b'revlogv1':
3587 raise error.Abort(
3588 raise error.Abort(
3588 _(
3589 _(
3589 b'unable to determine repository requirements for '
3590 b'unable to determine repository requirements for '
3590 b'storage backend: %s'
3591 b'storage backend: %s'
3591 )
3592 )
3592 % createopts[b'backend']
3593 % createopts[b'backend']
3593 )
3594 )
3594
3595
3595 requirements = {b'revlogv1'}
3596 requirements = {b'revlogv1'}
3596 if ui.configbool(b'format', b'usestore'):
3597 if ui.configbool(b'format', b'usestore'):
3597 requirements.add(b'store')
3598 requirements.add(b'store')
3598 if ui.configbool(b'format', b'usefncache'):
3599 if ui.configbool(b'format', b'usefncache'):
3599 requirements.add(b'fncache')
3600 requirements.add(b'fncache')
3600 if ui.configbool(b'format', b'dotencode'):
3601 if ui.configbool(b'format', b'dotencode'):
3601 requirements.add(b'dotencode')
3602 requirements.add(b'dotencode')
3602
3603
3603 compengines = ui.configlist(b'format', b'revlog-compression')
3604 compengines = ui.configlist(b'format', b'revlog-compression')
3604 for compengine in compengines:
3605 for compengine in compengines:
3605 if compengine in util.compengines:
3606 if compengine in util.compengines:
3606 break
3607 break
3607 else:
3608 else:
3608 raise error.Abort(
3609 raise error.Abort(
3609 _(
3610 _(
3610 b'compression engines %s defined by '
3611 b'compression engines %s defined by '
3611 b'format.revlog-compression not available'
3612 b'format.revlog-compression not available'
3612 )
3613 )
3613 % b', '.join(b'"%s"' % e for e in compengines),
3614 % b', '.join(b'"%s"' % e for e in compengines),
3614 hint=_(
3615 hint=_(
3615 b'run "hg debuginstall" to list available '
3616 b'run "hg debuginstall" to list available '
3616 b'compression engines'
3617 b'compression engines'
3617 ),
3618 ),
3618 )
3619 )
3619
3620
3620 # zlib is the historical default and doesn't need an explicit requirement.
3621 # zlib is the historical default and doesn't need an explicit requirement.
3621 if compengine == b'zstd':
3622 if compengine == b'zstd':
3622 requirements.add(b'revlog-compression-zstd')
3623 requirements.add(b'revlog-compression-zstd')
3623 elif compengine != b'zlib':
3624 elif compengine != b'zlib':
3624 requirements.add(b'exp-compression-%s' % compengine)
3625 requirements.add(b'exp-compression-%s' % compengine)
3625
3626
3626 if scmutil.gdinitconfig(ui):
3627 if scmutil.gdinitconfig(ui):
3627 requirements.add(b'generaldelta')
3628 requirements.add(b'generaldelta')
3628 if ui.configbool(b'format', b'sparse-revlog'):
3629 if ui.configbool(b'format', b'sparse-revlog'):
3629 requirements.add(SPARSEREVLOG_REQUIREMENT)
3630 requirements.add(SPARSEREVLOG_REQUIREMENT)
3630
3631
3631 # experimental config: format.exp-use-side-data
3632 # experimental config: format.exp-use-side-data
3632 if ui.configbool(b'format', b'exp-use-side-data'):
3633 if ui.configbool(b'format', b'exp-use-side-data'):
3633 requirements.add(SIDEDATA_REQUIREMENT)
3634 requirements.add(SIDEDATA_REQUIREMENT)
3634 # experimental config: format.exp-use-copies-side-data-changeset
3635 # experimental config: format.exp-use-copies-side-data-changeset
3635 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3636 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3636 requirements.add(SIDEDATA_REQUIREMENT)
3637 requirements.add(SIDEDATA_REQUIREMENT)
3637 requirements.add(COPIESSDC_REQUIREMENT)
3638 requirements.add(COPIESSDC_REQUIREMENT)
3638 if ui.configbool(b'experimental', b'treemanifest'):
3639 if ui.configbool(b'experimental', b'treemanifest'):
3639 requirements.add(b'treemanifest')
3640 requirements.add(b'treemanifest')
3640
3641
3641 revlogv2 = ui.config(b'experimental', b'revlogv2')
3642 revlogv2 = ui.config(b'experimental', b'revlogv2')
3642 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3643 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3643 requirements.remove(b'revlogv1')
3644 requirements.remove(b'revlogv1')
3644 # generaldelta is implied by revlogv2.
3645 # generaldelta is implied by revlogv2.
3645 requirements.discard(b'generaldelta')
3646 requirements.discard(b'generaldelta')
3646 requirements.add(REVLOGV2_REQUIREMENT)
3647 requirements.add(REVLOGV2_REQUIREMENT)
3647 # experimental config: format.internal-phase
3648 # experimental config: format.internal-phase
3648 if ui.configbool(b'format', b'internal-phase'):
3649 if ui.configbool(b'format', b'internal-phase'):
3649 requirements.add(b'internal-phase')
3650 requirements.add(b'internal-phase')
3650
3651
3651 if createopts.get(b'narrowfiles'):
3652 if createopts.get(b'narrowfiles'):
3652 requirements.add(repository.NARROW_REQUIREMENT)
3653 requirements.add(repository.NARROW_REQUIREMENT)
3653
3654
3654 if createopts.get(b'lfs'):
3655 if createopts.get(b'lfs'):
3655 requirements.add(b'lfs')
3656 requirements.add(b'lfs')
3656
3657
3657 if ui.configbool(b'format', b'bookmarks-in-store'):
3658 if ui.configbool(b'format', b'bookmarks-in-store'):
3658 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3659 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3659
3660
3660 return requirements
3661 return requirements
3661
3662
3662
3663
3663 def filterknowncreateopts(ui, createopts):
3664 def filterknowncreateopts(ui, createopts):
3664 """Filters a dict of repo creation options against options that are known.
3665 """Filters a dict of repo creation options against options that are known.
3665
3666
3666 Receives a dict of repo creation options and returns a dict of those
3667 Receives a dict of repo creation options and returns a dict of those
3667 options that we don't know how to handle.
3668 options that we don't know how to handle.
3668
3669
3669 This function is called as part of repository creation. If the
3670 This function is called as part of repository creation. If the
3670 returned dict contains any items, repository creation will not
3671 returned dict contains any items, repository creation will not
3671 be allowed, as it means there was a request to create a repository
3672 be allowed, as it means there was a request to create a repository
3672 with options not recognized by loaded code.
3673 with options not recognized by loaded code.
3673
3674
3674 Extensions can wrap this function to filter out creation options
3675 Extensions can wrap this function to filter out creation options
3675 they know how to handle.
3676 they know how to handle.
3676 """
3677 """
3677 known = {
3678 known = {
3678 b'backend',
3679 b'backend',
3679 b'lfs',
3680 b'lfs',
3680 b'narrowfiles',
3681 b'narrowfiles',
3681 b'sharedrepo',
3682 b'sharedrepo',
3682 b'sharedrelative',
3683 b'sharedrelative',
3683 b'shareditems',
3684 b'shareditems',
3684 b'shallowfilestore',
3685 b'shallowfilestore',
3685 }
3686 }
3686
3687
3687 return {k: v for k, v in createopts.items() if k not in known}
3688 return {k: v for k, v in createopts.items() if k not in known}
3688
3689
3689
3690
3690 def createrepository(ui, path, createopts=None):
3691 def createrepository(ui, path, createopts=None):
3691 """Create a new repository in a vfs.
3692 """Create a new repository in a vfs.
3692
3693
3693 ``path`` path to the new repo's working directory.
3694 ``path`` path to the new repo's working directory.
3694 ``createopts`` options for the new repository.
3695 ``createopts`` options for the new repository.
3695
3696
3696 The following keys for ``createopts`` are recognized:
3697 The following keys for ``createopts`` are recognized:
3697
3698
3698 backend
3699 backend
3699 The storage backend to use.
3700 The storage backend to use.
3700 lfs
3701 lfs
3701 Repository will be created with ``lfs`` requirement. The lfs extension
3702 Repository will be created with ``lfs`` requirement. The lfs extension
3702 will automatically be loaded when the repository is accessed.
3703 will automatically be loaded when the repository is accessed.
3703 narrowfiles
3704 narrowfiles
3704 Set up repository to support narrow file storage.
3705 Set up repository to support narrow file storage.
3705 sharedrepo
3706 sharedrepo
3706 Repository object from which storage should be shared.
3707 Repository object from which storage should be shared.
3707 sharedrelative
3708 sharedrelative
3708 Boolean indicating if the path to the shared repo should be
3709 Boolean indicating if the path to the shared repo should be
3709 stored as relative. By default, the pointer to the "parent" repo
3710 stored as relative. By default, the pointer to the "parent" repo
3710 is stored as an absolute path.
3711 is stored as an absolute path.
3711 shareditems
3712 shareditems
3712 Set of items to share to the new repository (in addition to storage).
3713 Set of items to share to the new repository (in addition to storage).
3713 shallowfilestore
3714 shallowfilestore
3714 Indicates that storage for files should be shallow (not all ancestor
3715 Indicates that storage for files should be shallow (not all ancestor
3715 revisions are known).
3716 revisions are known).
3716 """
3717 """
3717 createopts = defaultcreateopts(ui, createopts=createopts)
3718 createopts = defaultcreateopts(ui, createopts=createopts)
3718
3719
3719 unknownopts = filterknowncreateopts(ui, createopts)
3720 unknownopts = filterknowncreateopts(ui, createopts)
3720
3721
3721 if not isinstance(unknownopts, dict):
3722 if not isinstance(unknownopts, dict):
3722 raise error.ProgrammingError(
3723 raise error.ProgrammingError(
3723 b'filterknowncreateopts() did not return a dict'
3724 b'filterknowncreateopts() did not return a dict'
3724 )
3725 )
3725
3726
3726 if unknownopts:
3727 if unknownopts:
3727 raise error.Abort(
3728 raise error.Abort(
3728 _(
3729 _(
3729 b'unable to create repository because of unknown '
3730 b'unable to create repository because of unknown '
3730 b'creation option: %s'
3731 b'creation option: %s'
3731 )
3732 )
3732 % b', '.join(sorted(unknownopts)),
3733 % b', '.join(sorted(unknownopts)),
3733 hint=_(b'is a required extension not loaded?'),
3734 hint=_(b'is a required extension not loaded?'),
3734 )
3735 )
3735
3736
3736 requirements = newreporequirements(ui, createopts=createopts)
3737 requirements = newreporequirements(ui, createopts=createopts)
3737
3738
3738 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3739 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3739
3740
3740 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3741 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3741 if hgvfs.exists():
3742 if hgvfs.exists():
3742 raise error.RepoError(_(b'repository %s already exists') % path)
3743 raise error.RepoError(_(b'repository %s already exists') % path)
3743
3744
3744 if b'sharedrepo' in createopts:
3745 if b'sharedrepo' in createopts:
3745 sharedpath = createopts[b'sharedrepo'].sharedpath
3746 sharedpath = createopts[b'sharedrepo'].sharedpath
3746
3747
3747 if createopts.get(b'sharedrelative'):
3748 if createopts.get(b'sharedrelative'):
3748 try:
3749 try:
3749 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3750 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3750 except (IOError, ValueError) as e:
3751 except (IOError, ValueError) as e:
3751 # ValueError is raised on Windows if the drive letters differ
3752 # ValueError is raised on Windows if the drive letters differ
3752 # on each path.
3753 # on each path.
3753 raise error.Abort(
3754 raise error.Abort(
3754 _(b'cannot calculate relative path'),
3755 _(b'cannot calculate relative path'),
3755 hint=stringutil.forcebytestr(e),
3756 hint=stringutil.forcebytestr(e),
3756 )
3757 )
3757
3758
3758 if not wdirvfs.exists():
3759 if not wdirvfs.exists():
3759 wdirvfs.makedirs()
3760 wdirvfs.makedirs()
3760
3761
3761 hgvfs.makedir(notindexed=True)
3762 hgvfs.makedir(notindexed=True)
3762 if b'sharedrepo' not in createopts:
3763 if b'sharedrepo' not in createopts:
3763 hgvfs.mkdir(b'cache')
3764 hgvfs.mkdir(b'cache')
3764 hgvfs.mkdir(b'wcache')
3765 hgvfs.mkdir(b'wcache')
3765
3766
3766 if b'store' in requirements and b'sharedrepo' not in createopts:
3767 if b'store' in requirements and b'sharedrepo' not in createopts:
3767 hgvfs.mkdir(b'store')
3768 hgvfs.mkdir(b'store')
3768
3769
3769 # We create an invalid changelog outside the store so very old
3770 # We create an invalid changelog outside the store so very old
3770 # Mercurial versions (which didn't know about the requirements
3771 # Mercurial versions (which didn't know about the requirements
3771 # file) encounter an error on reading the changelog. This
3772 # file) encounter an error on reading the changelog. This
3772 # effectively locks out old clients and prevents them from
3773 # effectively locks out old clients and prevents them from
3773 # mucking with a repo in an unknown format.
3774 # mucking with a repo in an unknown format.
3774 #
3775 #
3775 # The revlog header has version 2, which won't be recognized by
3776 # The revlog header has version 2, which won't be recognized by
3776 # such old clients.
3777 # such old clients.
3777 hgvfs.append(
3778 hgvfs.append(
3778 b'00changelog.i',
3779 b'00changelog.i',
3779 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3780 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3780 b'layout',
3781 b'layout',
3781 )
3782 )
3782
3783
3783 scmutil.writerequires(hgvfs, requirements)
3784 scmutil.writerequires(hgvfs, requirements)
3784
3785
3785 # Write out file telling readers where to find the shared store.
3786 # Write out file telling readers where to find the shared store.
3786 if b'sharedrepo' in createopts:
3787 if b'sharedrepo' in createopts:
3787 hgvfs.write(b'sharedpath', sharedpath)
3788 hgvfs.write(b'sharedpath', sharedpath)
3788
3789
3789 if createopts.get(b'shareditems'):
3790 if createopts.get(b'shareditems'):
3790 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3791 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3791 hgvfs.write(b'shared', shared)
3792 hgvfs.write(b'shared', shared)
3792
3793
3793
3794
3794 def poisonrepository(repo):
3795 def poisonrepository(repo):
3795 """Poison a repository instance so it can no longer be used."""
3796 """Poison a repository instance so it can no longer be used."""
3796 # Perform any cleanup on the instance.
3797 # Perform any cleanup on the instance.
3797 repo.close()
3798 repo.close()
3798
3799
3799 # Our strategy is to replace the type of the object with one that
3800 # Our strategy is to replace the type of the object with one that
3800 # has all attribute lookups result in error.
3801 # has all attribute lookups result in error.
3801 #
3802 #
3802 # But we have to allow the close() method because some constructors
3803 # But we have to allow the close() method because some constructors
3803 # of repos call close() on repo references.
3804 # of repos call close() on repo references.
3804 class poisonedrepository(object):
3805 class poisonedrepository(object):
3805 def __getattribute__(self, item):
3806 def __getattribute__(self, item):
3806 if item == 'close':
3807 if item == 'close':
3807 return object.__getattribute__(self, item)
3808 return object.__getattribute__(self, item)
3808
3809
3809 raise error.ProgrammingError(
3810 raise error.ProgrammingError(
3810 b'repo instances should not be used after unshare'
3811 b'repo instances should not be used after unshare'
3811 )
3812 )
3812
3813
3813 def close(self):
3814 def close(self):
3814 pass
3815 pass
3815
3816
3816 # We may have a repoview, which intercepts __setattr__. So be sure
3817 # We may have a repoview, which intercepts __setattr__. So be sure
3817 # we operate at the lowest level possible.
3818 # we operate at the lowest level possible.
3818 object.__setattr__(repo, '__class__', poisonedrepository)
3819 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,2301 +1,2304
1 # manifest.py - manifest revision class for mercurial
1 # manifest.py - manifest revision class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11 import itertools
11 import itertools
12 import struct
12 import struct
13 import weakref
13 import weakref
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 bin,
17 bin,
18 hex,
18 hex,
19 nullid,
19 nullid,
20 nullrev,
20 nullrev,
21 )
21 )
22 from .pycompat import getattr
22 from .pycompat import getattr
23 from . import (
23 from . import (
24 encoding,
24 encoding,
25 error,
25 error,
26 match as matchmod,
26 match as matchmod,
27 mdiff,
27 mdiff,
28 pathutil,
28 pathutil,
29 policy,
29 policy,
30 pycompat,
30 pycompat,
31 revlog,
31 revlog,
32 util,
32 util,
33 )
33 )
34 from .interfaces import (
34 from .interfaces import (
35 repository,
35 repository,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 propertycache = util.propertycache
40 propertycache = util.propertycache
41
41
42 # Allow tests to more easily test the alternate path in manifestdict.fastdelta()
42 # Allow tests to more easily test the alternate path in manifestdict.fastdelta()
43 FASTDELTA_TEXTDIFF_THRESHOLD = 1000
43 FASTDELTA_TEXTDIFF_THRESHOLD = 1000
44
44
45
45
46 def _parse(data):
46 def _parse(data):
47 # This method does a little bit of excessive-looking
47 # This method does a little bit of excessive-looking
48 # precondition checking. This is so that the behavior of this
48 # precondition checking. This is so that the behavior of this
49 # class exactly matches its C counterpart to try and help
49 # class exactly matches its C counterpart to try and help
50 # prevent surprise breakage for anyone that develops against
50 # prevent surprise breakage for anyone that develops against
51 # the pure version.
51 # the pure version.
52 if data and data[-1:] != b'\n':
52 if data and data[-1:] != b'\n':
53 raise ValueError(b'Manifest did not end in a newline.')
53 raise ValueError(b'Manifest did not end in a newline.')
54 prev = None
54 prev = None
55 for l in data.splitlines():
55 for l in data.splitlines():
56 if prev is not None and prev > l:
56 if prev is not None and prev > l:
57 raise ValueError(b'Manifest lines not in sorted order.')
57 raise ValueError(b'Manifest lines not in sorted order.')
58 prev = l
58 prev = l
59 f, n = l.split(b'\0')
59 f, n = l.split(b'\0')
60 nl = len(n)
60 nl = len(n)
61 if 64 < nl:
61 if 64 < nl:
62 # modern hash, full width
62 # modern hash, full width
63 yield f, bin(n[:64]), n[64:]
63 yield f, bin(n[:64]), n[64:]
64 elif 40 < nl < 45:
64 elif 40 < nl < 45:
65 # legacy hash, always sha1
65 # legacy hash, always sha1
66 yield f, bin(n[:40]), n[40:]
66 yield f, bin(n[:40]), n[40:]
67 else:
67 else:
68 yield f, bin(n), b''
68 yield f, bin(n), b''
69
69
70
70
71 def _text(it):
71 def _text(it):
72 files = []
72 files = []
73 lines = []
73 lines = []
74 for f, n, fl in it:
74 for f, n, fl in it:
75 files.append(f)
75 files.append(f)
76 # if this is changed to support newlines in filenames,
76 # if this is changed to support newlines in filenames,
77 # be sure to check the templates/ dir again (especially *-raw.tmpl)
77 # be sure to check the templates/ dir again (especially *-raw.tmpl)
78 lines.append(b"%s\0%s%s\n" % (f, hex(n), fl))
78 lines.append(b"%s\0%s%s\n" % (f, hex(n), fl))
79
79
80 _checkforbidden(files)
80 _checkforbidden(files)
81 return b''.join(lines)
81 return b''.join(lines)
82
82
83
83
84 class lazymanifestiter(object):
84 class lazymanifestiter(object):
85 def __init__(self, lm):
85 def __init__(self, lm):
86 self.pos = 0
86 self.pos = 0
87 self.lm = lm
87 self.lm = lm
88
88
89 def __iter__(self):
89 def __iter__(self):
90 return self
90 return self
91
91
92 def next(self):
92 def next(self):
93 try:
93 try:
94 data, pos = self.lm._get(self.pos)
94 data, pos = self.lm._get(self.pos)
95 except IndexError:
95 except IndexError:
96 raise StopIteration
96 raise StopIteration
97 if pos == -1:
97 if pos == -1:
98 self.pos += 1
98 self.pos += 1
99 return data[0]
99 return data[0]
100 self.pos += 1
100 self.pos += 1
101 zeropos = data.find(b'\x00', pos)
101 zeropos = data.find(b'\x00', pos)
102 return data[pos:zeropos]
102 return data[pos:zeropos]
103
103
104 __next__ = next
104 __next__ = next
105
105
106
106
107 class lazymanifestiterentries(object):
107 class lazymanifestiterentries(object):
108 def __init__(self, lm):
108 def __init__(self, lm):
109 self.lm = lm
109 self.lm = lm
110 self.pos = 0
110 self.pos = 0
111
111
112 def __iter__(self):
112 def __iter__(self):
113 return self
113 return self
114
114
115 def next(self):
115 def next(self):
116 try:
116 try:
117 data, pos = self.lm._get(self.pos)
117 data, pos = self.lm._get(self.pos)
118 except IndexError:
118 except IndexError:
119 raise StopIteration
119 raise StopIteration
120 if pos == -1:
120 if pos == -1:
121 self.pos += 1
121 self.pos += 1
122 return data
122 return data
123 zeropos = data.find(b'\x00', pos)
123 zeropos = data.find(b'\x00', pos)
124 hashval = unhexlify(data, self.lm.extrainfo[self.pos], zeropos + 1, 40)
124 hashval = unhexlify(data, self.lm.extrainfo[self.pos], zeropos + 1, 40)
125 flags = self.lm._getflags(data, self.pos, zeropos)
125 flags = self.lm._getflags(data, self.pos, zeropos)
126 self.pos += 1
126 self.pos += 1
127 return (data[pos:zeropos], hashval, flags)
127 return (data[pos:zeropos], hashval, flags)
128
128
129 __next__ = next
129 __next__ = next
130
130
131
131
132 def unhexlify(data, extra, pos, length):
132 def unhexlify(data, extra, pos, length):
133 s = bin(data[pos : pos + length])
133 s = bin(data[pos : pos + length])
134 if extra:
134 if extra:
135 s += chr(extra & 0xFF)
135 s += chr(extra & 0xFF)
136 return s
136 return s
137
137
138
138
139 def _cmp(a, b):
139 def _cmp(a, b):
140 return (a > b) - (a < b)
140 return (a > b) - (a < b)
141
141
142
142
143 class _lazymanifest(object):
143 class _lazymanifest(object):
144 """A pure python manifest backed by a byte string. It is supplimented with
144 """A pure python manifest backed by a byte string. It is supplimented with
145 internal lists as it is modified, until it is compacted back to a pure byte
145 internal lists as it is modified, until it is compacted back to a pure byte
146 string.
146 string.
147
147
148 ``data`` is the initial manifest data.
148 ``data`` is the initial manifest data.
149
149
150 ``positions`` is a list of offsets, one per manifest entry. Positive
150 ``positions`` is a list of offsets, one per manifest entry. Positive
151 values are offsets into ``data``, negative values are offsets into the
151 values are offsets into ``data``, negative values are offsets into the
152 ``extradata`` list. When an entry is removed, its entry is dropped from
152 ``extradata`` list. When an entry is removed, its entry is dropped from
153 ``positions``. The values are encoded such that when walking the list and
153 ``positions``. The values are encoded such that when walking the list and
154 indexing into ``data`` or ``extradata`` as appropriate, the entries are
154 indexing into ``data`` or ``extradata`` as appropriate, the entries are
155 sorted by filename.
155 sorted by filename.
156
156
157 ``extradata`` is a list of (key, hash, flags) for entries that were added or
157 ``extradata`` is a list of (key, hash, flags) for entries that were added or
158 modified since the manifest was created or compacted.
158 modified since the manifest was created or compacted.
159 """
159 """
160
160
161 def __init__(
161 def __init__(
162 self,
162 self,
163 data,
163 data,
164 positions=None,
164 positions=None,
165 extrainfo=None,
165 extrainfo=None,
166 extradata=None,
166 extradata=None,
167 hasremovals=False,
167 hasremovals=False,
168 ):
168 ):
169 if positions is None:
169 if positions is None:
170 self.positions = self.findlines(data)
170 self.positions = self.findlines(data)
171 self.extrainfo = [0] * len(self.positions)
171 self.extrainfo = [0] * len(self.positions)
172 self.data = data
172 self.data = data
173 self.extradata = []
173 self.extradata = []
174 self.hasremovals = False
174 self.hasremovals = False
175 else:
175 else:
176 self.positions = positions[:]
176 self.positions = positions[:]
177 self.extrainfo = extrainfo[:]
177 self.extrainfo = extrainfo[:]
178 self.extradata = extradata[:]
178 self.extradata = extradata[:]
179 self.data = data
179 self.data = data
180 self.hasremovals = hasremovals
180 self.hasremovals = hasremovals
181
181
182 def findlines(self, data):
182 def findlines(self, data):
183 if not data:
183 if not data:
184 return []
184 return []
185 pos = data.find(b"\n")
185 pos = data.find(b"\n")
186 if pos == -1 or data[-1:] != b'\n':
186 if pos == -1 or data[-1:] != b'\n':
187 raise ValueError(b"Manifest did not end in a newline.")
187 raise ValueError(b"Manifest did not end in a newline.")
188 positions = [0]
188 positions = [0]
189 prev = data[: data.find(b'\x00')]
189 prev = data[: data.find(b'\x00')]
190 while pos < len(data) - 1 and pos != -1:
190 while pos < len(data) - 1 and pos != -1:
191 positions.append(pos + 1)
191 positions.append(pos + 1)
192 nexts = data[pos + 1 : data.find(b'\x00', pos + 1)]
192 nexts = data[pos + 1 : data.find(b'\x00', pos + 1)]
193 if nexts < prev:
193 if nexts < prev:
194 raise ValueError(b"Manifest lines not in sorted order.")
194 raise ValueError(b"Manifest lines not in sorted order.")
195 prev = nexts
195 prev = nexts
196 pos = data.find(b"\n", pos + 1)
196 pos = data.find(b"\n", pos + 1)
197 return positions
197 return positions
198
198
199 def _get(self, index):
199 def _get(self, index):
200 # get the position encoded in pos:
200 # get the position encoded in pos:
201 # positive number is an index in 'data'
201 # positive number is an index in 'data'
202 # negative number is in extrapieces
202 # negative number is in extrapieces
203 pos = self.positions[index]
203 pos = self.positions[index]
204 if pos >= 0:
204 if pos >= 0:
205 return self.data, pos
205 return self.data, pos
206 return self.extradata[-pos - 1], -1
206 return self.extradata[-pos - 1], -1
207
207
208 def _getkey(self, pos):
208 def _getkey(self, pos):
209 if pos >= 0:
209 if pos >= 0:
210 return self.data[pos : self.data.find(b'\x00', pos + 1)]
210 return self.data[pos : self.data.find(b'\x00', pos + 1)]
211 return self.extradata[-pos - 1][0]
211 return self.extradata[-pos - 1][0]
212
212
213 def bsearch(self, key):
213 def bsearch(self, key):
214 first = 0
214 first = 0
215 last = len(self.positions) - 1
215 last = len(self.positions) - 1
216
216
217 while first <= last:
217 while first <= last:
218 midpoint = (first + last) // 2
218 midpoint = (first + last) // 2
219 nextpos = self.positions[midpoint]
219 nextpos = self.positions[midpoint]
220 candidate = self._getkey(nextpos)
220 candidate = self._getkey(nextpos)
221 r = _cmp(key, candidate)
221 r = _cmp(key, candidate)
222 if r == 0:
222 if r == 0:
223 return midpoint
223 return midpoint
224 else:
224 else:
225 if r < 0:
225 if r < 0:
226 last = midpoint - 1
226 last = midpoint - 1
227 else:
227 else:
228 first = midpoint + 1
228 first = midpoint + 1
229 return -1
229 return -1
230
230
231 def bsearch2(self, key):
231 def bsearch2(self, key):
232 # same as the above, but will always return the position
232 # same as the above, but will always return the position
233 # done for performance reasons
233 # done for performance reasons
234 first = 0
234 first = 0
235 last = len(self.positions) - 1
235 last = len(self.positions) - 1
236
236
237 while first <= last:
237 while first <= last:
238 midpoint = (first + last) // 2
238 midpoint = (first + last) // 2
239 nextpos = self.positions[midpoint]
239 nextpos = self.positions[midpoint]
240 candidate = self._getkey(nextpos)
240 candidate = self._getkey(nextpos)
241 r = _cmp(key, candidate)
241 r = _cmp(key, candidate)
242 if r == 0:
242 if r == 0:
243 return (midpoint, True)
243 return (midpoint, True)
244 else:
244 else:
245 if r < 0:
245 if r < 0:
246 last = midpoint - 1
246 last = midpoint - 1
247 else:
247 else:
248 first = midpoint + 1
248 first = midpoint + 1
249 return (first, False)
249 return (first, False)
250
250
251 def __contains__(self, key):
251 def __contains__(self, key):
252 return self.bsearch(key) != -1
252 return self.bsearch(key) != -1
253
253
254 def _getflags(self, data, needle, pos):
254 def _getflags(self, data, needle, pos):
255 start = pos + 41
255 start = pos + 41
256 end = data.find(b"\n", start)
256 end = data.find(b"\n", start)
257 if end == -1:
257 if end == -1:
258 end = len(data) - 1
258 end = len(data) - 1
259 if start == end:
259 if start == end:
260 return b''
260 return b''
261 return self.data[start:end]
261 return self.data[start:end]
262
262
263 def __getitem__(self, key):
263 def __getitem__(self, key):
264 if not isinstance(key, bytes):
264 if not isinstance(key, bytes):
265 raise TypeError(b"getitem: manifest keys must be a bytes.")
265 raise TypeError(b"getitem: manifest keys must be a bytes.")
266 needle = self.bsearch(key)
266 needle = self.bsearch(key)
267 if needle == -1:
267 if needle == -1:
268 raise KeyError
268 raise KeyError
269 data, pos = self._get(needle)
269 data, pos = self._get(needle)
270 if pos == -1:
270 if pos == -1:
271 return (data[1], data[2])
271 return (data[1], data[2])
272 zeropos = data.find(b'\x00', pos)
272 zeropos = data.find(b'\x00', pos)
273 nlpos = data.find(b'\n', zeropos)
273 nlpos = data.find(b'\n', zeropos)
274 assert 0 <= needle <= len(self.positions)
274 assert 0 <= needle <= len(self.positions)
275 assert len(self.extrainfo) == len(self.positions)
275 assert len(self.extrainfo) == len(self.positions)
276 hlen = nlpos - zeropos - 1
276 hlen = nlpos - zeropos - 1
277 # Hashes sometimes have an extra byte tucked on the end, so
277 # Hashes sometimes have an extra byte tucked on the end, so
278 # detect that.
278 # detect that.
279 if hlen % 2:
279 if hlen % 2:
280 hlen -= 1
280 hlen -= 1
281 hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, hlen)
281 hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, hlen)
282 flags = self._getflags(data, needle, zeropos)
282 flags = self._getflags(data, needle, zeropos)
283 return (hashval, flags)
283 return (hashval, flags)
284
284
285 def __delitem__(self, key):
285 def __delitem__(self, key):
286 needle, found = self.bsearch2(key)
286 needle, found = self.bsearch2(key)
287 if not found:
287 if not found:
288 raise KeyError
288 raise KeyError
289 cur = self.positions[needle]
289 cur = self.positions[needle]
290 self.positions = self.positions[:needle] + self.positions[needle + 1 :]
290 self.positions = self.positions[:needle] + self.positions[needle + 1 :]
291 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1 :]
291 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1 :]
292 if cur >= 0:
292 if cur >= 0:
293 # This does NOT unsort the list as far as the search functions are
293 # This does NOT unsort the list as far as the search functions are
294 # concerned, as they only examine lines mapped by self.positions.
294 # concerned, as they only examine lines mapped by self.positions.
295 self.data = self.data[:cur] + b'\x00' + self.data[cur + 1 :]
295 self.data = self.data[:cur] + b'\x00' + self.data[cur + 1 :]
296 self.hasremovals = True
296 self.hasremovals = True
297
297
298 def __setitem__(self, key, value):
298 def __setitem__(self, key, value):
299 if not isinstance(key, bytes):
299 if not isinstance(key, bytes):
300 raise TypeError(b"setitem: manifest keys must be a byte string.")
300 raise TypeError(b"setitem: manifest keys must be a byte string.")
301 if not isinstance(value, tuple) or len(value) != 2:
301 if not isinstance(value, tuple) or len(value) != 2:
302 raise TypeError(
302 raise TypeError(
303 b"Manifest values must be a tuple of (node, flags)."
303 b"Manifest values must be a tuple of (node, flags)."
304 )
304 )
305 hashval = value[0]
305 hashval = value[0]
306 # hashes are either 20 or 32 bytes (sha1 or its replacement),
306 # hashes are either 20 or 32 bytes (sha1 or its replacement),
307 # and allow one extra byte taht won't be persisted to disk but
307 # and allow one extra byte taht won't be persisted to disk but
308 # is sometimes used in memory.
308 # is sometimes used in memory.
309 if not isinstance(hashval, bytes) or not (
309 if not isinstance(hashval, bytes) or not (
310 20 <= len(hashval) <= 22 or 32 <= len(hashval) <= 34
310 20 <= len(hashval) <= 22 or 32 <= len(hashval) <= 34
311 ):
311 ):
312 raise TypeError(b"node must be a 20-byte or 32-byte byte string")
312 raise TypeError(b"node must be a 20-byte or 32-byte byte string")
313 flags = value[1]
313 flags = value[1]
314 if len(hashval) == 22:
314 if len(hashval) == 22:
315 hashval = hashval[:-1]
315 hashval = hashval[:-1]
316 if not isinstance(flags, bytes) or len(flags) > 1:
316 if not isinstance(flags, bytes) or len(flags) > 1:
317 raise TypeError(b"flags must a 0 or 1 byte string, got %r", flags)
317 raise TypeError(b"flags must a 0 or 1 byte string, got %r", flags)
318 needle, found = self.bsearch2(key)
318 needle, found = self.bsearch2(key)
319 if found:
319 if found:
320 # put the item
320 # put the item
321 pos = self.positions[needle]
321 pos = self.positions[needle]
322 if pos < 0:
322 if pos < 0:
323 self.extradata[-pos - 1] = (key, hashval, value[1])
323 self.extradata[-pos - 1] = (key, hashval, value[1])
324 else:
324 else:
325 # just don't bother
325 # just don't bother
326 self.extradata.append((key, hashval, value[1]))
326 self.extradata.append((key, hashval, value[1]))
327 self.positions[needle] = -len(self.extradata)
327 self.positions[needle] = -len(self.extradata)
328 else:
328 else:
329 # not found, put it in with extra positions
329 # not found, put it in with extra positions
330 self.extradata.append((key, hashval, value[1]))
330 self.extradata.append((key, hashval, value[1]))
331 self.positions = (
331 self.positions = (
332 self.positions[:needle]
332 self.positions[:needle]
333 + [-len(self.extradata)]
333 + [-len(self.extradata)]
334 + self.positions[needle:]
334 + self.positions[needle:]
335 )
335 )
336 self.extrainfo = (
336 self.extrainfo = (
337 self.extrainfo[:needle] + [0] + self.extrainfo[needle:]
337 self.extrainfo[:needle] + [0] + self.extrainfo[needle:]
338 )
338 )
339
339
340 def copy(self):
340 def copy(self):
341 # XXX call _compact like in C?
341 # XXX call _compact like in C?
342 return _lazymanifest(
342 return _lazymanifest(
343 self.data,
343 self.data,
344 self.positions,
344 self.positions,
345 self.extrainfo,
345 self.extrainfo,
346 self.extradata,
346 self.extradata,
347 self.hasremovals,
347 self.hasremovals,
348 )
348 )
349
349
350 def _compact(self):
350 def _compact(self):
351 # hopefully not called TOO often
351 # hopefully not called TOO often
352 if len(self.extradata) == 0 and not self.hasremovals:
352 if len(self.extradata) == 0 and not self.hasremovals:
353 return
353 return
354 l = []
354 l = []
355 i = 0
355 i = 0
356 offset = 0
356 offset = 0
357 self.extrainfo = [0] * len(self.positions)
357 self.extrainfo = [0] * len(self.positions)
358 while i < len(self.positions):
358 while i < len(self.positions):
359 if self.positions[i] >= 0:
359 if self.positions[i] >= 0:
360 cur = self.positions[i]
360 cur = self.positions[i]
361 last_cut = cur
361 last_cut = cur
362
362
363 # Collect all contiguous entries in the buffer at the current
363 # Collect all contiguous entries in the buffer at the current
364 # offset, breaking out only for added/modified items held in
364 # offset, breaking out only for added/modified items held in
365 # extradata, or a deleted line prior to the next position.
365 # extradata, or a deleted line prior to the next position.
366 while True:
366 while True:
367 self.positions[i] = offset
367 self.positions[i] = offset
368 i += 1
368 i += 1
369 if i == len(self.positions) or self.positions[i] < 0:
369 if i == len(self.positions) or self.positions[i] < 0:
370 break
370 break
371
371
372 # A removed file has no positions[] entry, but does have an
372 # A removed file has no positions[] entry, but does have an
373 # overwritten first byte. Break out and find the end of the
373 # overwritten first byte. Break out and find the end of the
374 # current good entry/entries if there is a removed file
374 # current good entry/entries if there is a removed file
375 # before the next position.
375 # before the next position.
376 if (
376 if (
377 self.hasremovals
377 self.hasremovals
378 and self.data.find(b'\n\x00', cur, self.positions[i])
378 and self.data.find(b'\n\x00', cur, self.positions[i])
379 != -1
379 != -1
380 ):
380 ):
381 break
381 break
382
382
383 offset += self.positions[i] - cur
383 offset += self.positions[i] - cur
384 cur = self.positions[i]
384 cur = self.positions[i]
385 end_cut = self.data.find(b'\n', cur)
385 end_cut = self.data.find(b'\n', cur)
386 if end_cut != -1:
386 if end_cut != -1:
387 end_cut += 1
387 end_cut += 1
388 offset += end_cut - cur
388 offset += end_cut - cur
389 l.append(self.data[last_cut:end_cut])
389 l.append(self.data[last_cut:end_cut])
390 else:
390 else:
391 while i < len(self.positions) and self.positions[i] < 0:
391 while i < len(self.positions) and self.positions[i] < 0:
392 cur = self.positions[i]
392 cur = self.positions[i]
393 t = self.extradata[-cur - 1]
393 t = self.extradata[-cur - 1]
394 l.append(self._pack(t))
394 l.append(self._pack(t))
395 self.positions[i] = offset
395 self.positions[i] = offset
396 # Hashes are either 20 bytes (old sha1s) or 32
396 # Hashes are either 20 bytes (old sha1s) or 32
397 # bytes (new non-sha1).
397 # bytes (new non-sha1).
398 hlen = 20
398 hlen = 20
399 if len(t[1]) > 25:
399 if len(t[1]) > 25:
400 hlen = 32
400 hlen = 32
401 if len(t[1]) > hlen:
401 if len(t[1]) > hlen:
402 self.extrainfo[i] = ord(t[1][hlen + 1])
402 self.extrainfo[i] = ord(t[1][hlen + 1])
403 offset += len(l[-1])
403 offset += len(l[-1])
404 i += 1
404 i += 1
405 self.data = b''.join(l)
405 self.data = b''.join(l)
406 self.hasremovals = False
406 self.hasremovals = False
407 self.extradata = []
407 self.extradata = []
408
408
409 def _pack(self, d):
409 def _pack(self, d):
410 n = d[1]
410 n = d[1]
411 if len(n) == 21 or len(n) == 33:
411 if len(n) == 21 or len(n) == 33:
412 n = n[:-1]
412 n = n[:-1]
413 assert len(n) == 20 or len(n) == 32
413 assert len(n) == 20 or len(n) == 32
414 return d[0] + b'\x00' + hex(n) + d[2] + b'\n'
414 return d[0] + b'\x00' + hex(n) + d[2] + b'\n'
415
415
416 def text(self):
416 def text(self):
417 self._compact()
417 self._compact()
418 return self.data
418 return self.data
419
419
420 def diff(self, m2, clean=False):
420 def diff(self, m2, clean=False):
421 '''Finds changes between the current manifest and m2.'''
421 '''Finds changes between the current manifest and m2.'''
422 # XXX think whether efficiency matters here
422 # XXX think whether efficiency matters here
423 diff = {}
423 diff = {}
424
424
425 for fn, e1, flags in self.iterentries():
425 for fn, e1, flags in self.iterentries():
426 if fn not in m2:
426 if fn not in m2:
427 diff[fn] = (e1, flags), (None, b'')
427 diff[fn] = (e1, flags), (None, b'')
428 else:
428 else:
429 e2 = m2[fn]
429 e2 = m2[fn]
430 if (e1, flags) != e2:
430 if (e1, flags) != e2:
431 diff[fn] = (e1, flags), e2
431 diff[fn] = (e1, flags), e2
432 elif clean:
432 elif clean:
433 diff[fn] = None
433 diff[fn] = None
434
434
435 for fn, e2, flags in m2.iterentries():
435 for fn, e2, flags in m2.iterentries():
436 if fn not in self:
436 if fn not in self:
437 diff[fn] = (None, b''), (e2, flags)
437 diff[fn] = (None, b''), (e2, flags)
438
438
439 return diff
439 return diff
440
440
441 def iterentries(self):
441 def iterentries(self):
442 return lazymanifestiterentries(self)
442 return lazymanifestiterentries(self)
443
443
444 def iterkeys(self):
444 def iterkeys(self):
445 return lazymanifestiter(self)
445 return lazymanifestiter(self)
446
446
447 def __iter__(self):
447 def __iter__(self):
448 return lazymanifestiter(self)
448 return lazymanifestiter(self)
449
449
450 def __len__(self):
450 def __len__(self):
451 return len(self.positions)
451 return len(self.positions)
452
452
453 def filtercopy(self, filterfn):
453 def filtercopy(self, filterfn):
454 # XXX should be optimized
454 # XXX should be optimized
455 c = _lazymanifest(b'')
455 c = _lazymanifest(b'')
456 for f, n, fl in self.iterentries():
456 for f, n, fl in self.iterentries():
457 if filterfn(f):
457 if filterfn(f):
458 c[f] = n, fl
458 c[f] = n, fl
459 return c
459 return c
460
460
461
461
462 try:
462 try:
463 _lazymanifest = parsers.lazymanifest
463 _lazymanifest = parsers.lazymanifest
464 except AttributeError:
464 except AttributeError:
465 pass
465 pass
466
466
467
467
468 @interfaceutil.implementer(repository.imanifestdict)
468 @interfaceutil.implementer(repository.imanifestdict)
469 class manifestdict(object):
469 class manifestdict(object):
470 def __init__(self, data=b''):
470 def __init__(self, data=b''):
471 self._lm = _lazymanifest(data)
471 self._lm = _lazymanifest(data)
472
472
473 def __getitem__(self, key):
473 def __getitem__(self, key):
474 return self._lm[key][0]
474 return self._lm[key][0]
475
475
476 def find(self, key):
476 def find(self, key):
477 return self._lm[key]
477 return self._lm[key]
478
478
479 def __len__(self):
479 def __len__(self):
480 return len(self._lm)
480 return len(self._lm)
481
481
482 def __nonzero__(self):
482 def __nonzero__(self):
483 # nonzero is covered by the __len__ function, but implementing it here
483 # nonzero is covered by the __len__ function, but implementing it here
484 # makes it easier for extensions to override.
484 # makes it easier for extensions to override.
485 return len(self._lm) != 0
485 return len(self._lm) != 0
486
486
487 __bool__ = __nonzero__
487 __bool__ = __nonzero__
488
488
489 def __setitem__(self, key, node):
489 def __setitem__(self, key, node):
490 self._lm[key] = node, self.flags(key)
490 self._lm[key] = node, self.flags(key)
491
491
492 def __contains__(self, key):
492 def __contains__(self, key):
493 if key is None:
493 if key is None:
494 return False
494 return False
495 return key in self._lm
495 return key in self._lm
496
496
497 def __delitem__(self, key):
497 def __delitem__(self, key):
498 del self._lm[key]
498 del self._lm[key]
499
499
500 def __iter__(self):
500 def __iter__(self):
501 return self._lm.__iter__()
501 return self._lm.__iter__()
502
502
503 def iterkeys(self):
503 def iterkeys(self):
504 return self._lm.iterkeys()
504 return self._lm.iterkeys()
505
505
506 def keys(self):
506 def keys(self):
507 return list(self.iterkeys())
507 return list(self.iterkeys())
508
508
509 def filesnotin(self, m2, match=None):
509 def filesnotin(self, m2, match=None):
510 '''Set of files in this manifest that are not in the other'''
510 '''Set of files in this manifest that are not in the other'''
511 if match is not None:
511 if match is not None:
512 match = matchmod.badmatch(match, lambda path, msg: None)
512 match = matchmod.badmatch(match, lambda path, msg: None)
513 sm2 = set(m2.walk(match))
513 sm2 = set(m2.walk(match))
514 return {f for f in self.walk(match) if f not in sm2}
514 return {f for f in self.walk(match) if f not in sm2}
515 return {f for f in self if f not in m2}
515 return {f for f in self if f not in m2}
516
516
517 @propertycache
517 @propertycache
518 def _dirs(self):
518 def _dirs(self):
519 return pathutil.dirs(self)
519 return pathutil.dirs(self)
520
520
521 def dirs(self):
521 def dirs(self):
522 return self._dirs
522 return self._dirs
523
523
524 def hasdir(self, dir):
524 def hasdir(self, dir):
525 return dir in self._dirs
525 return dir in self._dirs
526
526
527 def _filesfastpath(self, match):
527 def _filesfastpath(self, match):
528 '''Checks whether we can correctly and quickly iterate over matcher
528 '''Checks whether we can correctly and quickly iterate over matcher
529 files instead of over manifest files.'''
529 files instead of over manifest files.'''
530 files = match.files()
530 files = match.files()
531 return len(files) < 100 and (
531 return len(files) < 100 and (
532 match.isexact()
532 match.isexact()
533 or (match.prefix() and all(fn in self for fn in files))
533 or (match.prefix() and all(fn in self for fn in files))
534 )
534 )
535
535
536 def walk(self, match):
536 def walk(self, match):
537 '''Generates matching file names.
537 '''Generates matching file names.
538
538
539 Equivalent to manifest.matches(match).iterkeys(), but without creating
539 Equivalent to manifest.matches(match).iterkeys(), but without creating
540 an entirely new manifest.
540 an entirely new manifest.
541
541
542 It also reports nonexistent files by marking them bad with match.bad().
542 It also reports nonexistent files by marking them bad with match.bad().
543 '''
543 '''
544 if match.always():
544 if match.always():
545 for f in iter(self):
545 for f in iter(self):
546 yield f
546 yield f
547 return
547 return
548
548
549 fset = set(match.files())
549 fset = set(match.files())
550
550
551 # avoid the entire walk if we're only looking for specific files
551 # avoid the entire walk if we're only looking for specific files
552 if self._filesfastpath(match):
552 if self._filesfastpath(match):
553 for fn in sorted(fset):
553 for fn in sorted(fset):
554 if fn in self:
554 if fn in self:
555 yield fn
555 yield fn
556 return
556 return
557
557
558 for fn in self:
558 for fn in self:
559 if fn in fset:
559 if fn in fset:
560 # specified pattern is the exact name
560 # specified pattern is the exact name
561 fset.remove(fn)
561 fset.remove(fn)
562 if match(fn):
562 if match(fn):
563 yield fn
563 yield fn
564
564
565 # for dirstate.walk, files=[''] means "walk the whole tree".
565 # for dirstate.walk, files=[''] means "walk the whole tree".
566 # follow that here, too
566 # follow that here, too
567 fset.discard(b'')
567 fset.discard(b'')
568
568
569 for fn in sorted(fset):
569 for fn in sorted(fset):
570 if not self.hasdir(fn):
570 if not self.hasdir(fn):
571 match.bad(fn, None)
571 match.bad(fn, None)
572
572
573 def _matches(self, match):
573 def _matches(self, match):
574 '''generate a new manifest filtered by the match argument'''
574 '''generate a new manifest filtered by the match argument'''
575 if match.always():
575 if match.always():
576 return self.copy()
576 return self.copy()
577
577
578 if self._filesfastpath(match):
578 if self._filesfastpath(match):
579 m = manifestdict()
579 m = manifestdict()
580 lm = self._lm
580 lm = self._lm
581 for fn in match.files():
581 for fn in match.files():
582 if fn in lm:
582 if fn in lm:
583 m._lm[fn] = lm[fn]
583 m._lm[fn] = lm[fn]
584 return m
584 return m
585
585
586 m = manifestdict()
586 m = manifestdict()
587 m._lm = self._lm.filtercopy(match)
587 m._lm = self._lm.filtercopy(match)
588 return m
588 return m
589
589
590 def diff(self, m2, match=None, clean=False):
590 def diff(self, m2, match=None, clean=False):
591 '''Finds changes between the current manifest and m2.
591 '''Finds changes between the current manifest and m2.
592
592
593 Args:
593 Args:
594 m2: the manifest to which this manifest should be compared.
594 m2: the manifest to which this manifest should be compared.
595 clean: if true, include files unchanged between these manifests
595 clean: if true, include files unchanged between these manifests
596 with a None value in the returned dictionary.
596 with a None value in the returned dictionary.
597
597
598 The result is returned as a dict with filename as key and
598 The result is returned as a dict with filename as key and
599 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
599 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
600 nodeid in the current/other manifest and fl1/fl2 is the flag
600 nodeid in the current/other manifest and fl1/fl2 is the flag
601 in the current/other manifest. Where the file does not exist,
601 in the current/other manifest. Where the file does not exist,
602 the nodeid will be None and the flags will be the empty
602 the nodeid will be None and the flags will be the empty
603 string.
603 string.
604 '''
604 '''
605 if match:
605 if match:
606 m1 = self._matches(match)
606 m1 = self._matches(match)
607 m2 = m2._matches(match)
607 m2 = m2._matches(match)
608 return m1.diff(m2, clean=clean)
608 return m1.diff(m2, clean=clean)
609 return self._lm.diff(m2._lm, clean)
609 return self._lm.diff(m2._lm, clean)
610
610
611 def setflag(self, key, flag):
611 def setflag(self, key, flag):
612 self._lm[key] = self[key], flag
612 self._lm[key] = self[key], flag
613
613
614 def get(self, key, default=None):
614 def get(self, key, default=None):
615 try:
615 try:
616 return self._lm[key][0]
616 return self._lm[key][0]
617 except KeyError:
617 except KeyError:
618 return default
618 return default
619
619
620 def flags(self, key):
620 def flags(self, key):
621 try:
621 try:
622 return self._lm[key][1]
622 return self._lm[key][1]
623 except KeyError:
623 except KeyError:
624 return b''
624 return b''
625
625
626 def copy(self):
626 def copy(self):
627 c = manifestdict()
627 c = manifestdict()
628 c._lm = self._lm.copy()
628 c._lm = self._lm.copy()
629 return c
629 return c
630
630
631 def items(self):
631 def items(self):
632 return (x[:2] for x in self._lm.iterentries())
632 return (x[:2] for x in self._lm.iterentries())
633
633
634 def iteritems(self):
634 def iteritems(self):
635 return (x[:2] for x in self._lm.iterentries())
635 return (x[:2] for x in self._lm.iterentries())
636
636
637 def iterentries(self):
637 def iterentries(self):
638 return self._lm.iterentries()
638 return self._lm.iterentries()
639
639
640 def text(self):
640 def text(self):
641 # most likely uses native version
641 # most likely uses native version
642 return self._lm.text()
642 return self._lm.text()
643
643
644 def fastdelta(self, base, changes):
644 def fastdelta(self, base, changes):
645 """Given a base manifest text as a bytearray and a list of changes
645 """Given a base manifest text as a bytearray and a list of changes
646 relative to that text, compute a delta that can be used by revlog.
646 relative to that text, compute a delta that can be used by revlog.
647 """
647 """
648 delta = []
648 delta = []
649 dstart = None
649 dstart = None
650 dend = None
650 dend = None
651 dline = [b""]
651 dline = [b""]
652 start = 0
652 start = 0
653 # zero copy representation of base as a buffer
653 # zero copy representation of base as a buffer
654 addbuf = util.buffer(base)
654 addbuf = util.buffer(base)
655
655
656 changes = list(changes)
656 changes = list(changes)
657 if len(changes) < FASTDELTA_TEXTDIFF_THRESHOLD:
657 if len(changes) < FASTDELTA_TEXTDIFF_THRESHOLD:
658 # start with a readonly loop that finds the offset of
658 # start with a readonly loop that finds the offset of
659 # each line and creates the deltas
659 # each line and creates the deltas
660 for f, todelete in changes:
660 for f, todelete in changes:
661 # bs will either be the index of the item or the insert point
661 # bs will either be the index of the item or the insert point
662 start, end = _msearch(addbuf, f, start)
662 start, end = _msearch(addbuf, f, start)
663 if not todelete:
663 if not todelete:
664 h, fl = self._lm[f]
664 h, fl = self._lm[f]
665 l = b"%s\0%s%s\n" % (f, hex(h), fl)
665 l = b"%s\0%s%s\n" % (f, hex(h), fl)
666 else:
666 else:
667 if start == end:
667 if start == end:
668 # item we want to delete was not found, error out
668 # item we want to delete was not found, error out
669 raise AssertionError(
669 raise AssertionError(
670 _(b"failed to remove %s from manifest") % f
670 _(b"failed to remove %s from manifest") % f
671 )
671 )
672 l = b""
672 l = b""
673 if dstart is not None and dstart <= start and dend >= start:
673 if dstart is not None and dstart <= start and dend >= start:
674 if dend < end:
674 if dend < end:
675 dend = end
675 dend = end
676 if l:
676 if l:
677 dline.append(l)
677 dline.append(l)
678 else:
678 else:
679 if dstart is not None:
679 if dstart is not None:
680 delta.append([dstart, dend, b"".join(dline)])
680 delta.append([dstart, dend, b"".join(dline)])
681 dstart = start
681 dstart = start
682 dend = end
682 dend = end
683 dline = [l]
683 dline = [l]
684
684
685 if dstart is not None:
685 if dstart is not None:
686 delta.append([dstart, dend, b"".join(dline)])
686 delta.append([dstart, dend, b"".join(dline)])
687 # apply the delta to the base, and get a delta for addrevision
687 # apply the delta to the base, and get a delta for addrevision
688 deltatext, arraytext = _addlistdelta(base, delta)
688 deltatext, arraytext = _addlistdelta(base, delta)
689 else:
689 else:
690 # For large changes, it's much cheaper to just build the text and
690 # For large changes, it's much cheaper to just build the text and
691 # diff it.
691 # diff it.
692 arraytext = bytearray(self.text())
692 arraytext = bytearray(self.text())
693 deltatext = mdiff.textdiff(
693 deltatext = mdiff.textdiff(
694 util.buffer(base), util.buffer(arraytext)
694 util.buffer(base), util.buffer(arraytext)
695 )
695 )
696
696
697 return arraytext, deltatext
697 return arraytext, deltatext
698
698
699
699
700 def _msearch(m, s, lo=0, hi=None):
700 def _msearch(m, s, lo=0, hi=None):
701 '''return a tuple (start, end) that says where to find s within m.
701 '''return a tuple (start, end) that says where to find s within m.
702
702
703 If the string is found m[start:end] are the line containing
703 If the string is found m[start:end] are the line containing
704 that string. If start == end the string was not found and
704 that string. If start == end the string was not found and
705 they indicate the proper sorted insertion point.
705 they indicate the proper sorted insertion point.
706
706
707 m should be a buffer, a memoryview or a byte string.
707 m should be a buffer, a memoryview or a byte string.
708 s is a byte string'''
708 s is a byte string'''
709
709
710 def advance(i, c):
710 def advance(i, c):
711 while i < lenm and m[i : i + 1] != c:
711 while i < lenm and m[i : i + 1] != c:
712 i += 1
712 i += 1
713 return i
713 return i
714
714
715 if not s:
715 if not s:
716 return (lo, lo)
716 return (lo, lo)
717 lenm = len(m)
717 lenm = len(m)
718 if not hi:
718 if not hi:
719 hi = lenm
719 hi = lenm
720 while lo < hi:
720 while lo < hi:
721 mid = (lo + hi) // 2
721 mid = (lo + hi) // 2
722 start = mid
722 start = mid
723 while start > 0 and m[start - 1 : start] != b'\n':
723 while start > 0 and m[start - 1 : start] != b'\n':
724 start -= 1
724 start -= 1
725 end = advance(start, b'\0')
725 end = advance(start, b'\0')
726 if bytes(m[start:end]) < s:
726 if bytes(m[start:end]) < s:
727 # we know that after the null there are 40 bytes of sha1
727 # we know that after the null there are 40 bytes of sha1
728 # this translates to the bisect lo = mid + 1
728 # this translates to the bisect lo = mid + 1
729 lo = advance(end + 40, b'\n') + 1
729 lo = advance(end + 40, b'\n') + 1
730 else:
730 else:
731 # this translates to the bisect hi = mid
731 # this translates to the bisect hi = mid
732 hi = start
732 hi = start
733 end = advance(lo, b'\0')
733 end = advance(lo, b'\0')
734 found = m[lo:end]
734 found = m[lo:end]
735 if s == found:
735 if s == found:
736 # we know that after the null there are 40 bytes of sha1
736 # we know that after the null there are 40 bytes of sha1
737 end = advance(end + 40, b'\n')
737 end = advance(end + 40, b'\n')
738 return (lo, end + 1)
738 return (lo, end + 1)
739 else:
739 else:
740 return (lo, lo)
740 return (lo, lo)
741
741
742
742
743 def _checkforbidden(l):
743 def _checkforbidden(l):
744 """Check filenames for illegal characters."""
744 """Check filenames for illegal characters."""
745 for f in l:
745 for f in l:
746 if b'\n' in f or b'\r' in f:
746 if b'\n' in f or b'\r' in f:
747 raise error.StorageError(
747 raise error.StorageError(
748 _(b"'\\n' and '\\r' disallowed in filenames: %r")
748 _(b"'\\n' and '\\r' disallowed in filenames: %r")
749 % pycompat.bytestr(f)
749 % pycompat.bytestr(f)
750 )
750 )
751
751
752
752
753 # apply the changes collected during the bisect loop to our addlist
753 # apply the changes collected during the bisect loop to our addlist
754 # return a delta suitable for addrevision
754 # return a delta suitable for addrevision
755 def _addlistdelta(addlist, x):
755 def _addlistdelta(addlist, x):
756 # for large addlist arrays, building a new array is cheaper
756 # for large addlist arrays, building a new array is cheaper
757 # than repeatedly modifying the existing one
757 # than repeatedly modifying the existing one
758 currentposition = 0
758 currentposition = 0
759 newaddlist = bytearray()
759 newaddlist = bytearray()
760
760
761 for start, end, content in x:
761 for start, end, content in x:
762 newaddlist += addlist[currentposition:start]
762 newaddlist += addlist[currentposition:start]
763 if content:
763 if content:
764 newaddlist += bytearray(content)
764 newaddlist += bytearray(content)
765
765
766 currentposition = end
766 currentposition = end
767
767
768 newaddlist += addlist[currentposition:]
768 newaddlist += addlist[currentposition:]
769
769
770 deltatext = b"".join(
770 deltatext = b"".join(
771 struct.pack(b">lll", start, end, len(content)) + content
771 struct.pack(b">lll", start, end, len(content)) + content
772 for start, end, content in x
772 for start, end, content in x
773 )
773 )
774 return deltatext, newaddlist
774 return deltatext, newaddlist
775
775
776
776
777 def _splittopdir(f):
777 def _splittopdir(f):
778 if b'/' in f:
778 if b'/' in f:
779 dir, subpath = f.split(b'/', 1)
779 dir, subpath = f.split(b'/', 1)
780 return dir + b'/', subpath
780 return dir + b'/', subpath
781 else:
781 else:
782 return b'', f
782 return b'', f
783
783
784
784
785 _noop = lambda s: None
785 _noop = lambda s: None
786
786
787
787
788 @interfaceutil.implementer(repository.imanifestdict)
788 @interfaceutil.implementer(repository.imanifestdict)
789 class treemanifest(object):
789 class treemanifest(object):
790 def __init__(self, dir=b'', text=b''):
790 def __init__(self, dir=b'', text=b''):
791 self._dir = dir
791 self._dir = dir
792 self._node = nullid
792 self._node = nullid
793 self._loadfunc = _noop
793 self._loadfunc = _noop
794 self._copyfunc = _noop
794 self._copyfunc = _noop
795 self._dirty = False
795 self._dirty = False
796 self._dirs = {}
796 self._dirs = {}
797 self._lazydirs = {}
797 self._lazydirs = {}
798 # Using _lazymanifest here is a little slower than plain old dicts
798 # Using _lazymanifest here is a little slower than plain old dicts
799 self._files = {}
799 self._files = {}
800 self._flags = {}
800 self._flags = {}
801 if text:
801 if text:
802
802
803 def readsubtree(subdir, subm):
803 def readsubtree(subdir, subm):
804 raise AssertionError(
804 raise AssertionError(
805 b'treemanifest constructor only accepts flat manifests'
805 b'treemanifest constructor only accepts flat manifests'
806 )
806 )
807
807
808 self.parse(text, readsubtree)
808 self.parse(text, readsubtree)
809 self._dirty = True # Mark flat manifest dirty after parsing
809 self._dirty = True # Mark flat manifest dirty after parsing
810
810
811 def _subpath(self, path):
811 def _subpath(self, path):
812 return self._dir + path
812 return self._dir + path
813
813
814 def _loadalllazy(self):
814 def _loadalllazy(self):
815 selfdirs = self._dirs
815 selfdirs = self._dirs
816 for d, (path, node, readsubtree, docopy) in pycompat.iteritems(
816 for d, (path, node, readsubtree, docopy) in pycompat.iteritems(
817 self._lazydirs
817 self._lazydirs
818 ):
818 ):
819 if docopy:
819 if docopy:
820 selfdirs[d] = readsubtree(path, node).copy()
820 selfdirs[d] = readsubtree(path, node).copy()
821 else:
821 else:
822 selfdirs[d] = readsubtree(path, node)
822 selfdirs[d] = readsubtree(path, node)
823 self._lazydirs = {}
823 self._lazydirs = {}
824
824
825 def _loadlazy(self, d):
825 def _loadlazy(self, d):
826 v = self._lazydirs.get(d)
826 v = self._lazydirs.get(d)
827 if v:
827 if v:
828 path, node, readsubtree, docopy = v
828 path, node, readsubtree, docopy = v
829 if docopy:
829 if docopy:
830 self._dirs[d] = readsubtree(path, node).copy()
830 self._dirs[d] = readsubtree(path, node).copy()
831 else:
831 else:
832 self._dirs[d] = readsubtree(path, node)
832 self._dirs[d] = readsubtree(path, node)
833 del self._lazydirs[d]
833 del self._lazydirs[d]
834
834
835 def _loadchildrensetlazy(self, visit):
835 def _loadchildrensetlazy(self, visit):
836 if not visit:
836 if not visit:
837 return None
837 return None
838 if visit == b'all' or visit == b'this':
838 if visit == b'all' or visit == b'this':
839 self._loadalllazy()
839 self._loadalllazy()
840 return None
840 return None
841
841
842 loadlazy = self._loadlazy
842 loadlazy = self._loadlazy
843 for k in visit:
843 for k in visit:
844 loadlazy(k + b'/')
844 loadlazy(k + b'/')
845 return visit
845 return visit
846
846
847 def _loaddifflazy(self, t1, t2):
847 def _loaddifflazy(self, t1, t2):
848 """load items in t1 and t2 if they're needed for diffing.
848 """load items in t1 and t2 if they're needed for diffing.
849
849
850 The criteria currently is:
850 The criteria currently is:
851 - if it's not present in _lazydirs in either t1 or t2, load it in the
851 - if it's not present in _lazydirs in either t1 or t2, load it in the
852 other (it may already be loaded or it may not exist, doesn't matter)
852 other (it may already be loaded or it may not exist, doesn't matter)
853 - if it's present in _lazydirs in both, compare the nodeid; if it
853 - if it's present in _lazydirs in both, compare the nodeid; if it
854 differs, load it in both
854 differs, load it in both
855 """
855 """
856 toloadlazy = []
856 toloadlazy = []
857 for d, v1 in pycompat.iteritems(t1._lazydirs):
857 for d, v1 in pycompat.iteritems(t1._lazydirs):
858 v2 = t2._lazydirs.get(d)
858 v2 = t2._lazydirs.get(d)
859 if not v2 or v2[1] != v1[1]:
859 if not v2 or v2[1] != v1[1]:
860 toloadlazy.append(d)
860 toloadlazy.append(d)
861 for d, v1 in pycompat.iteritems(t2._lazydirs):
861 for d, v1 in pycompat.iteritems(t2._lazydirs):
862 if d not in t1._lazydirs:
862 if d not in t1._lazydirs:
863 toloadlazy.append(d)
863 toloadlazy.append(d)
864
864
865 for d in toloadlazy:
865 for d in toloadlazy:
866 t1._loadlazy(d)
866 t1._loadlazy(d)
867 t2._loadlazy(d)
867 t2._loadlazy(d)
868
868
869 def __len__(self):
869 def __len__(self):
870 self._load()
870 self._load()
871 size = len(self._files)
871 size = len(self._files)
872 self._loadalllazy()
872 self._loadalllazy()
873 for m in self._dirs.values():
873 for m in self._dirs.values():
874 size += m.__len__()
874 size += m.__len__()
875 return size
875 return size
876
876
877 def __nonzero__(self):
877 def __nonzero__(self):
878 # Faster than "__len() != 0" since it avoids loading sub-manifests
878 # Faster than "__len() != 0" since it avoids loading sub-manifests
879 return not self._isempty()
879 return not self._isempty()
880
880
881 __bool__ = __nonzero__
881 __bool__ = __nonzero__
882
882
883 def _isempty(self):
883 def _isempty(self):
884 self._load() # for consistency; already loaded by all callers
884 self._load() # for consistency; already loaded by all callers
885 # See if we can skip loading everything.
885 # See if we can skip loading everything.
886 if self._files or (
886 if self._files or (
887 self._dirs and any(not m._isempty() for m in self._dirs.values())
887 self._dirs and any(not m._isempty() for m in self._dirs.values())
888 ):
888 ):
889 return False
889 return False
890 self._loadalllazy()
890 self._loadalllazy()
891 return not self._dirs or all(m._isempty() for m in self._dirs.values())
891 return not self._dirs or all(m._isempty() for m in self._dirs.values())
892
892
893 @encoding.strmethod
893 @encoding.strmethod
894 def __repr__(self):
894 def __repr__(self):
895 return (
895 return (
896 b'<treemanifest dir=%s, node=%s, loaded=%r, dirty=%r at 0x%x>'
896 b'<treemanifest dir=%s, node=%s, loaded=%r, dirty=%r at 0x%x>'
897 % (
897 % (
898 self._dir,
898 self._dir,
899 hex(self._node),
899 hex(self._node),
900 bool(self._loadfunc is _noop),
900 bool(self._loadfunc is _noop),
901 self._dirty,
901 self._dirty,
902 id(self),
902 id(self),
903 )
903 )
904 )
904 )
905
905
906 def dir(self):
906 def dir(self):
907 '''The directory that this tree manifest represents, including a
907 '''The directory that this tree manifest represents, including a
908 trailing '/'. Empty string for the repo root directory.'''
908 trailing '/'. Empty string for the repo root directory.'''
909 return self._dir
909 return self._dir
910
910
911 def node(self):
911 def node(self):
912 '''This node of this instance. nullid for unsaved instances. Should
912 '''This node of this instance. nullid for unsaved instances. Should
913 be updated when the instance is read or written from a revlog.
913 be updated when the instance is read or written from a revlog.
914 '''
914 '''
915 assert not self._dirty
915 assert not self._dirty
916 return self._node
916 return self._node
917
917
918 def setnode(self, node):
918 def setnode(self, node):
919 self._node = node
919 self._node = node
920 self._dirty = False
920 self._dirty = False
921
921
922 def iterentries(self):
922 def iterentries(self):
923 self._load()
923 self._load()
924 self._loadalllazy()
924 self._loadalllazy()
925 for p, n in sorted(
925 for p, n in sorted(
926 itertools.chain(self._dirs.items(), self._files.items())
926 itertools.chain(self._dirs.items(), self._files.items())
927 ):
927 ):
928 if p in self._files:
928 if p in self._files:
929 yield self._subpath(p), n, self._flags.get(p, b'')
929 yield self._subpath(p), n, self._flags.get(p, b'')
930 else:
930 else:
931 for x in n.iterentries():
931 for x in n.iterentries():
932 yield x
932 yield x
933
933
934 def items(self):
934 def items(self):
935 self._load()
935 self._load()
936 self._loadalllazy()
936 self._loadalllazy()
937 for p, n in sorted(
937 for p, n in sorted(
938 itertools.chain(self._dirs.items(), self._files.items())
938 itertools.chain(self._dirs.items(), self._files.items())
939 ):
939 ):
940 if p in self._files:
940 if p in self._files:
941 yield self._subpath(p), n
941 yield self._subpath(p), n
942 else:
942 else:
943 for f, sn in pycompat.iteritems(n):
943 for f, sn in pycompat.iteritems(n):
944 yield f, sn
944 yield f, sn
945
945
946 iteritems = items
946 iteritems = items
947
947
948 def iterkeys(self):
948 def iterkeys(self):
949 self._load()
949 self._load()
950 self._loadalllazy()
950 self._loadalllazy()
951 for p in sorted(itertools.chain(self._dirs, self._files)):
951 for p in sorted(itertools.chain(self._dirs, self._files)):
952 if p in self._files:
952 if p in self._files:
953 yield self._subpath(p)
953 yield self._subpath(p)
954 else:
954 else:
955 for f in self._dirs[p]:
955 for f in self._dirs[p]:
956 yield f
956 yield f
957
957
958 def keys(self):
958 def keys(self):
959 return list(self.iterkeys())
959 return list(self.iterkeys())
960
960
961 def __iter__(self):
961 def __iter__(self):
962 return self.iterkeys()
962 return self.iterkeys()
963
963
964 def __contains__(self, f):
964 def __contains__(self, f):
965 if f is None:
965 if f is None:
966 return False
966 return False
967 self._load()
967 self._load()
968 dir, subpath = _splittopdir(f)
968 dir, subpath = _splittopdir(f)
969 if dir:
969 if dir:
970 self._loadlazy(dir)
970 self._loadlazy(dir)
971
971
972 if dir not in self._dirs:
972 if dir not in self._dirs:
973 return False
973 return False
974
974
975 return self._dirs[dir].__contains__(subpath)
975 return self._dirs[dir].__contains__(subpath)
976 else:
976 else:
977 return f in self._files
977 return f in self._files
978
978
979 def get(self, f, default=None):
979 def get(self, f, default=None):
980 self._load()
980 self._load()
981 dir, subpath = _splittopdir(f)
981 dir, subpath = _splittopdir(f)
982 if dir:
982 if dir:
983 self._loadlazy(dir)
983 self._loadlazy(dir)
984
984
985 if dir not in self._dirs:
985 if dir not in self._dirs:
986 return default
986 return default
987 return self._dirs[dir].get(subpath, default)
987 return self._dirs[dir].get(subpath, default)
988 else:
988 else:
989 return self._files.get(f, default)
989 return self._files.get(f, default)
990
990
991 def __getitem__(self, f):
991 def __getitem__(self, f):
992 self._load()
992 self._load()
993 dir, subpath = _splittopdir(f)
993 dir, subpath = _splittopdir(f)
994 if dir:
994 if dir:
995 self._loadlazy(dir)
995 self._loadlazy(dir)
996
996
997 return self._dirs[dir].__getitem__(subpath)
997 return self._dirs[dir].__getitem__(subpath)
998 else:
998 else:
999 return self._files[f]
999 return self._files[f]
1000
1000
1001 def flags(self, f):
1001 def flags(self, f):
1002 self._load()
1002 self._load()
1003 dir, subpath = _splittopdir(f)
1003 dir, subpath = _splittopdir(f)
1004 if dir:
1004 if dir:
1005 self._loadlazy(dir)
1005 self._loadlazy(dir)
1006
1006
1007 if dir not in self._dirs:
1007 if dir not in self._dirs:
1008 return b''
1008 return b''
1009 return self._dirs[dir].flags(subpath)
1009 return self._dirs[dir].flags(subpath)
1010 else:
1010 else:
1011 if f in self._lazydirs or f in self._dirs:
1011 if f in self._lazydirs or f in self._dirs:
1012 return b''
1012 return b''
1013 return self._flags.get(f, b'')
1013 return self._flags.get(f, b'')
1014
1014
1015 def find(self, f):
1015 def find(self, f):
1016 self._load()
1016 self._load()
1017 dir, subpath = _splittopdir(f)
1017 dir, subpath = _splittopdir(f)
1018 if dir:
1018 if dir:
1019 self._loadlazy(dir)
1019 self._loadlazy(dir)
1020
1020
1021 return self._dirs[dir].find(subpath)
1021 return self._dirs[dir].find(subpath)
1022 else:
1022 else:
1023 return self._files[f], self._flags.get(f, b'')
1023 return self._files[f], self._flags.get(f, b'')
1024
1024
1025 def __delitem__(self, f):
1025 def __delitem__(self, f):
1026 self._load()
1026 self._load()
1027 dir, subpath = _splittopdir(f)
1027 dir, subpath = _splittopdir(f)
1028 if dir:
1028 if dir:
1029 self._loadlazy(dir)
1029 self._loadlazy(dir)
1030
1030
1031 self._dirs[dir].__delitem__(subpath)
1031 self._dirs[dir].__delitem__(subpath)
1032 # If the directory is now empty, remove it
1032 # If the directory is now empty, remove it
1033 if self._dirs[dir]._isempty():
1033 if self._dirs[dir]._isempty():
1034 del self._dirs[dir]
1034 del self._dirs[dir]
1035 else:
1035 else:
1036 del self._files[f]
1036 del self._files[f]
1037 if f in self._flags:
1037 if f in self._flags:
1038 del self._flags[f]
1038 del self._flags[f]
1039 self._dirty = True
1039 self._dirty = True
1040
1040
1041 def __setitem__(self, f, n):
1041 def __setitem__(self, f, n):
1042 assert n is not None
1042 assert n is not None
1043 self._load()
1043 self._load()
1044 dir, subpath = _splittopdir(f)
1044 dir, subpath = _splittopdir(f)
1045 if dir:
1045 if dir:
1046 self._loadlazy(dir)
1046 self._loadlazy(dir)
1047 if dir not in self._dirs:
1047 if dir not in self._dirs:
1048 self._dirs[dir] = treemanifest(self._subpath(dir))
1048 self._dirs[dir] = treemanifest(self._subpath(dir))
1049 self._dirs[dir].__setitem__(subpath, n)
1049 self._dirs[dir].__setitem__(subpath, n)
1050 else:
1050 else:
1051 # manifest nodes are either 20 bytes or 32 bytes,
1051 # manifest nodes are either 20 bytes or 32 bytes,
1052 # depending on the hash in use. An extra byte is
1052 # depending on the hash in use. An extra byte is
1053 # occasionally used by hg, but won't ever be
1053 # occasionally used by hg, but won't ever be
1054 # persisted. Trim to 21 or 33 bytes as appropriate.
1054 # persisted. Trim to 21 or 33 bytes as appropriate.
1055 trim = 21 if len(n) < 25 else 33
1055 trim = 21 if len(n) < 25 else 33
1056 self._files[f] = n[:trim] # to match manifestdict's behavior
1056 self._files[f] = n[:trim] # to match manifestdict's behavior
1057 self._dirty = True
1057 self._dirty = True
1058
1058
1059 def _load(self):
1059 def _load(self):
1060 if self._loadfunc is not _noop:
1060 if self._loadfunc is not _noop:
1061 lf, self._loadfunc = self._loadfunc, _noop
1061 lf, self._loadfunc = self._loadfunc, _noop
1062 lf(self)
1062 lf(self)
1063 elif self._copyfunc is not _noop:
1063 elif self._copyfunc is not _noop:
1064 cf, self._copyfunc = self._copyfunc, _noop
1064 cf, self._copyfunc = self._copyfunc, _noop
1065 cf(self)
1065 cf(self)
1066
1066
1067 def setflag(self, f, flags):
1067 def setflag(self, f, flags):
1068 """Set the flags (symlink, executable) for path f."""
1068 """Set the flags (symlink, executable) for path f."""
1069 self._load()
1069 self._load()
1070 dir, subpath = _splittopdir(f)
1070 dir, subpath = _splittopdir(f)
1071 if dir:
1071 if dir:
1072 self._loadlazy(dir)
1072 self._loadlazy(dir)
1073 if dir not in self._dirs:
1073 if dir not in self._dirs:
1074 self._dirs[dir] = treemanifest(self._subpath(dir))
1074 self._dirs[dir] = treemanifest(self._subpath(dir))
1075 self._dirs[dir].setflag(subpath, flags)
1075 self._dirs[dir].setflag(subpath, flags)
1076 else:
1076 else:
1077 self._flags[f] = flags
1077 self._flags[f] = flags
1078 self._dirty = True
1078 self._dirty = True
1079
1079
1080 def copy(self):
1080 def copy(self):
1081 copy = treemanifest(self._dir)
1081 copy = treemanifest(self._dir)
1082 copy._node = self._node
1082 copy._node = self._node
1083 copy._dirty = self._dirty
1083 copy._dirty = self._dirty
1084 if self._copyfunc is _noop:
1084 if self._copyfunc is _noop:
1085
1085
1086 def _copyfunc(s):
1086 def _copyfunc(s):
1087 self._load()
1087 self._load()
1088 s._lazydirs = {
1088 s._lazydirs = {
1089 d: (p, n, r, True)
1089 d: (p, n, r, True)
1090 for d, (p, n, r, c) in pycompat.iteritems(self._lazydirs)
1090 for d, (p, n, r, c) in pycompat.iteritems(self._lazydirs)
1091 }
1091 }
1092 sdirs = s._dirs
1092 sdirs = s._dirs
1093 for d, v in pycompat.iteritems(self._dirs):
1093 for d, v in pycompat.iteritems(self._dirs):
1094 sdirs[d] = v.copy()
1094 sdirs[d] = v.copy()
1095 s._files = dict.copy(self._files)
1095 s._files = dict.copy(self._files)
1096 s._flags = dict.copy(self._flags)
1096 s._flags = dict.copy(self._flags)
1097
1097
1098 if self._loadfunc is _noop:
1098 if self._loadfunc is _noop:
1099 _copyfunc(copy)
1099 _copyfunc(copy)
1100 else:
1100 else:
1101 copy._copyfunc = _copyfunc
1101 copy._copyfunc = _copyfunc
1102 else:
1102 else:
1103 copy._copyfunc = self._copyfunc
1103 copy._copyfunc = self._copyfunc
1104 return copy
1104 return copy
1105
1105
1106 def filesnotin(self, m2, match=None):
1106 def filesnotin(self, m2, match=None):
1107 '''Set of files in this manifest that are not in the other'''
1107 '''Set of files in this manifest that are not in the other'''
1108 if match and not match.always():
1108 if match and not match.always():
1109 m1 = self._matches(match)
1109 m1 = self._matches(match)
1110 m2 = m2._matches(match)
1110 m2 = m2._matches(match)
1111 return m1.filesnotin(m2)
1111 return m1.filesnotin(m2)
1112
1112
1113 files = set()
1113 files = set()
1114
1114
1115 def _filesnotin(t1, t2):
1115 def _filesnotin(t1, t2):
1116 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1116 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1117 return
1117 return
1118 t1._load()
1118 t1._load()
1119 t2._load()
1119 t2._load()
1120 self._loaddifflazy(t1, t2)
1120 self._loaddifflazy(t1, t2)
1121 for d, m1 in pycompat.iteritems(t1._dirs):
1121 for d, m1 in pycompat.iteritems(t1._dirs):
1122 if d in t2._dirs:
1122 if d in t2._dirs:
1123 m2 = t2._dirs[d]
1123 m2 = t2._dirs[d]
1124 _filesnotin(m1, m2)
1124 _filesnotin(m1, m2)
1125 else:
1125 else:
1126 files.update(m1.iterkeys())
1126 files.update(m1.iterkeys())
1127
1127
1128 for fn in t1._files:
1128 for fn in t1._files:
1129 if fn not in t2._files:
1129 if fn not in t2._files:
1130 files.add(t1._subpath(fn))
1130 files.add(t1._subpath(fn))
1131
1131
1132 _filesnotin(self, m2)
1132 _filesnotin(self, m2)
1133 return files
1133 return files
1134
1134
1135 @propertycache
1135 @propertycache
1136 def _alldirs(self):
1136 def _alldirs(self):
1137 return pathutil.dirs(self)
1137 return pathutil.dirs(self)
1138
1138
1139 def dirs(self):
1139 def dirs(self):
1140 return self._alldirs
1140 return self._alldirs
1141
1141
1142 def hasdir(self, dir):
1142 def hasdir(self, dir):
1143 self._load()
1143 self._load()
1144 topdir, subdir = _splittopdir(dir)
1144 topdir, subdir = _splittopdir(dir)
1145 if topdir:
1145 if topdir:
1146 self._loadlazy(topdir)
1146 self._loadlazy(topdir)
1147 if topdir in self._dirs:
1147 if topdir in self._dirs:
1148 return self._dirs[topdir].hasdir(subdir)
1148 return self._dirs[topdir].hasdir(subdir)
1149 return False
1149 return False
1150 dirslash = dir + b'/'
1150 dirslash = dir + b'/'
1151 return dirslash in self._dirs or dirslash in self._lazydirs
1151 return dirslash in self._dirs or dirslash in self._lazydirs
1152
1152
1153 def walk(self, match):
1153 def walk(self, match):
1154 '''Generates matching file names.
1154 '''Generates matching file names.
1155
1155
1156 It also reports nonexistent files by marking them bad with match.bad().
1156 It also reports nonexistent files by marking them bad with match.bad().
1157 '''
1157 '''
1158 if match.always():
1158 if match.always():
1159 for f in iter(self):
1159 for f in iter(self):
1160 yield f
1160 yield f
1161 return
1161 return
1162
1162
1163 fset = set(match.files())
1163 fset = set(match.files())
1164
1164
1165 for fn in self._walk(match):
1165 for fn in self._walk(match):
1166 if fn in fset:
1166 if fn in fset:
1167 # specified pattern is the exact name
1167 # specified pattern is the exact name
1168 fset.remove(fn)
1168 fset.remove(fn)
1169 yield fn
1169 yield fn
1170
1170
1171 # for dirstate.walk, files=[''] means "walk the whole tree".
1171 # for dirstate.walk, files=[''] means "walk the whole tree".
1172 # follow that here, too
1172 # follow that here, too
1173 fset.discard(b'')
1173 fset.discard(b'')
1174
1174
1175 for fn in sorted(fset):
1175 for fn in sorted(fset):
1176 if not self.hasdir(fn):
1176 if not self.hasdir(fn):
1177 match.bad(fn, None)
1177 match.bad(fn, None)
1178
1178
1179 def _walk(self, match):
1179 def _walk(self, match):
1180 '''Recursively generates matching file names for walk().'''
1180 '''Recursively generates matching file names for walk().'''
1181 visit = match.visitchildrenset(self._dir[:-1])
1181 visit = match.visitchildrenset(self._dir[:-1])
1182 if not visit:
1182 if not visit:
1183 return
1183 return
1184
1184
1185 # yield this dir's files and walk its submanifests
1185 # yield this dir's files and walk its submanifests
1186 self._load()
1186 self._load()
1187 visit = self._loadchildrensetlazy(visit)
1187 visit = self._loadchildrensetlazy(visit)
1188 for p in sorted(list(self._dirs) + list(self._files)):
1188 for p in sorted(list(self._dirs) + list(self._files)):
1189 if p in self._files:
1189 if p in self._files:
1190 fullp = self._subpath(p)
1190 fullp = self._subpath(p)
1191 if match(fullp):
1191 if match(fullp):
1192 yield fullp
1192 yield fullp
1193 else:
1193 else:
1194 if not visit or p[:-1] in visit:
1194 if not visit or p[:-1] in visit:
1195 for f in self._dirs[p]._walk(match):
1195 for f in self._dirs[p]._walk(match):
1196 yield f
1196 yield f
1197
1197
1198 def _matches(self, match):
1198 def _matches(self, match):
1199 '''recursively generate a new manifest filtered by the match argument.
1199 '''recursively generate a new manifest filtered by the match argument.
1200 '''
1200 '''
1201 if match.always():
1201 if match.always():
1202 return self.copy()
1202 return self.copy()
1203 return self._matches_inner(match)
1203 return self._matches_inner(match)
1204
1204
1205 def _matches_inner(self, match):
1205 def _matches_inner(self, match):
1206 if match.always():
1206 if match.always():
1207 return self.copy()
1207 return self.copy()
1208
1208
1209 visit = match.visitchildrenset(self._dir[:-1])
1209 visit = match.visitchildrenset(self._dir[:-1])
1210 if visit == b'all':
1210 if visit == b'all':
1211 return self.copy()
1211 return self.copy()
1212 ret = treemanifest(self._dir)
1212 ret = treemanifest(self._dir)
1213 if not visit:
1213 if not visit:
1214 return ret
1214 return ret
1215
1215
1216 self._load()
1216 self._load()
1217 for fn in self._files:
1217 for fn in self._files:
1218 # While visitchildrenset *usually* lists only subdirs, this is
1218 # While visitchildrenset *usually* lists only subdirs, this is
1219 # actually up to the matcher and may have some files in the set().
1219 # actually up to the matcher and may have some files in the set().
1220 # If visit == 'this', we should obviously look at the files in this
1220 # If visit == 'this', we should obviously look at the files in this
1221 # directory; if visit is a set, and fn is in it, we should inspect
1221 # directory; if visit is a set, and fn is in it, we should inspect
1222 # fn (but no need to inspect things not in the set).
1222 # fn (but no need to inspect things not in the set).
1223 if visit != b'this' and fn not in visit:
1223 if visit != b'this' and fn not in visit:
1224 continue
1224 continue
1225 fullp = self._subpath(fn)
1225 fullp = self._subpath(fn)
1226 # visitchildrenset isn't perfect, we still need to call the regular
1226 # visitchildrenset isn't perfect, we still need to call the regular
1227 # matcher code to further filter results.
1227 # matcher code to further filter results.
1228 if not match(fullp):
1228 if not match(fullp):
1229 continue
1229 continue
1230 ret._files[fn] = self._files[fn]
1230 ret._files[fn] = self._files[fn]
1231 if fn in self._flags:
1231 if fn in self._flags:
1232 ret._flags[fn] = self._flags[fn]
1232 ret._flags[fn] = self._flags[fn]
1233
1233
1234 visit = self._loadchildrensetlazy(visit)
1234 visit = self._loadchildrensetlazy(visit)
1235 for dir, subm in pycompat.iteritems(self._dirs):
1235 for dir, subm in pycompat.iteritems(self._dirs):
1236 if visit and dir[:-1] not in visit:
1236 if visit and dir[:-1] not in visit:
1237 continue
1237 continue
1238 m = subm._matches_inner(match)
1238 m = subm._matches_inner(match)
1239 if not m._isempty():
1239 if not m._isempty():
1240 ret._dirs[dir] = m
1240 ret._dirs[dir] = m
1241
1241
1242 if not ret._isempty():
1242 if not ret._isempty():
1243 ret._dirty = True
1243 ret._dirty = True
1244 return ret
1244 return ret
1245
1245
1246 def fastdelta(self, base, changes):
1246 def fastdelta(self, base, changes):
1247 raise FastdeltaUnavailable()
1247 raise FastdeltaUnavailable()
1248
1248
1249 def diff(self, m2, match=None, clean=False):
1249 def diff(self, m2, match=None, clean=False):
1250 '''Finds changes between the current manifest and m2.
1250 '''Finds changes between the current manifest and m2.
1251
1251
1252 Args:
1252 Args:
1253 m2: the manifest to which this manifest should be compared.
1253 m2: the manifest to which this manifest should be compared.
1254 clean: if true, include files unchanged between these manifests
1254 clean: if true, include files unchanged between these manifests
1255 with a None value in the returned dictionary.
1255 with a None value in the returned dictionary.
1256
1256
1257 The result is returned as a dict with filename as key and
1257 The result is returned as a dict with filename as key and
1258 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
1258 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
1259 nodeid in the current/other manifest and fl1/fl2 is the flag
1259 nodeid in the current/other manifest and fl1/fl2 is the flag
1260 in the current/other manifest. Where the file does not exist,
1260 in the current/other manifest. Where the file does not exist,
1261 the nodeid will be None and the flags will be the empty
1261 the nodeid will be None and the flags will be the empty
1262 string.
1262 string.
1263 '''
1263 '''
1264 if match and not match.always():
1264 if match and not match.always():
1265 m1 = self._matches(match)
1265 m1 = self._matches(match)
1266 m2 = m2._matches(match)
1266 m2 = m2._matches(match)
1267 return m1.diff(m2, clean=clean)
1267 return m1.diff(m2, clean=clean)
1268 result = {}
1268 result = {}
1269 emptytree = treemanifest()
1269 emptytree = treemanifest()
1270
1270
1271 def _iterativediff(t1, t2, stack):
1271 def _iterativediff(t1, t2, stack):
1272 """compares two tree manifests and append new tree-manifests which
1272 """compares two tree manifests and append new tree-manifests which
1273 needs to be compared to stack"""
1273 needs to be compared to stack"""
1274 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1274 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1275 return
1275 return
1276 t1._load()
1276 t1._load()
1277 t2._load()
1277 t2._load()
1278 self._loaddifflazy(t1, t2)
1278 self._loaddifflazy(t1, t2)
1279
1279
1280 for d, m1 in pycompat.iteritems(t1._dirs):
1280 for d, m1 in pycompat.iteritems(t1._dirs):
1281 m2 = t2._dirs.get(d, emptytree)
1281 m2 = t2._dirs.get(d, emptytree)
1282 stack.append((m1, m2))
1282 stack.append((m1, m2))
1283
1283
1284 for d, m2 in pycompat.iteritems(t2._dirs):
1284 for d, m2 in pycompat.iteritems(t2._dirs):
1285 if d not in t1._dirs:
1285 if d not in t1._dirs:
1286 stack.append((emptytree, m2))
1286 stack.append((emptytree, m2))
1287
1287
1288 for fn, n1 in pycompat.iteritems(t1._files):
1288 for fn, n1 in pycompat.iteritems(t1._files):
1289 fl1 = t1._flags.get(fn, b'')
1289 fl1 = t1._flags.get(fn, b'')
1290 n2 = t2._files.get(fn, None)
1290 n2 = t2._files.get(fn, None)
1291 fl2 = t2._flags.get(fn, b'')
1291 fl2 = t2._flags.get(fn, b'')
1292 if n1 != n2 or fl1 != fl2:
1292 if n1 != n2 or fl1 != fl2:
1293 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
1293 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
1294 elif clean:
1294 elif clean:
1295 result[t1._subpath(fn)] = None
1295 result[t1._subpath(fn)] = None
1296
1296
1297 for fn, n2 in pycompat.iteritems(t2._files):
1297 for fn, n2 in pycompat.iteritems(t2._files):
1298 if fn not in t1._files:
1298 if fn not in t1._files:
1299 fl2 = t2._flags.get(fn, b'')
1299 fl2 = t2._flags.get(fn, b'')
1300 result[t2._subpath(fn)] = ((None, b''), (n2, fl2))
1300 result[t2._subpath(fn)] = ((None, b''), (n2, fl2))
1301
1301
1302 stackls = []
1302 stackls = []
1303 _iterativediff(self, m2, stackls)
1303 _iterativediff(self, m2, stackls)
1304 while stackls:
1304 while stackls:
1305 t1, t2 = stackls.pop()
1305 t1, t2 = stackls.pop()
1306 # stackls is populated in the function call
1306 # stackls is populated in the function call
1307 _iterativediff(t1, t2, stackls)
1307 _iterativediff(t1, t2, stackls)
1308 return result
1308 return result
1309
1309
1310 def unmodifiedsince(self, m2):
1310 def unmodifiedsince(self, m2):
1311 return not self._dirty and not m2._dirty and self._node == m2._node
1311 return not self._dirty and not m2._dirty and self._node == m2._node
1312
1312
1313 def parse(self, text, readsubtree):
1313 def parse(self, text, readsubtree):
1314 selflazy = self._lazydirs
1314 selflazy = self._lazydirs
1315 subpath = self._subpath
1315 subpath = self._subpath
1316 for f, n, fl in _parse(text):
1316 for f, n, fl in _parse(text):
1317 if fl == b't':
1317 if fl == b't':
1318 f = f + b'/'
1318 f = f + b'/'
1319 # False below means "doesn't need to be copied" and can use the
1319 # False below means "doesn't need to be copied" and can use the
1320 # cached value from readsubtree directly.
1320 # cached value from readsubtree directly.
1321 selflazy[f] = (subpath(f), n, readsubtree, False)
1321 selflazy[f] = (subpath(f), n, readsubtree, False)
1322 elif b'/' in f:
1322 elif b'/' in f:
1323 # This is a flat manifest, so use __setitem__ and setflag rather
1323 # This is a flat manifest, so use __setitem__ and setflag rather
1324 # than assigning directly to _files and _flags, so we can
1324 # than assigning directly to _files and _flags, so we can
1325 # assign a path in a subdirectory, and to mark dirty (compared
1325 # assign a path in a subdirectory, and to mark dirty (compared
1326 # to nullid).
1326 # to nullid).
1327 self[f] = n
1327 self[f] = n
1328 if fl:
1328 if fl:
1329 self.setflag(f, fl)
1329 self.setflag(f, fl)
1330 else:
1330 else:
1331 # Assigning to _files and _flags avoids marking as dirty,
1331 # Assigning to _files and _flags avoids marking as dirty,
1332 # and should be a little faster.
1332 # and should be a little faster.
1333 self._files[f] = n
1333 self._files[f] = n
1334 if fl:
1334 if fl:
1335 self._flags[f] = fl
1335 self._flags[f] = fl
1336
1336
1337 def text(self):
1337 def text(self):
1338 """Get the full data of this manifest as a bytestring."""
1338 """Get the full data of this manifest as a bytestring."""
1339 self._load()
1339 self._load()
1340 return _text(self.iterentries())
1340 return _text(self.iterentries())
1341
1341
1342 def dirtext(self):
1342 def dirtext(self):
1343 """Get the full data of this directory as a bytestring. Make sure that
1343 """Get the full data of this directory as a bytestring. Make sure that
1344 any submanifests have been written first, so their nodeids are correct.
1344 any submanifests have been written first, so their nodeids are correct.
1345 """
1345 """
1346 self._load()
1346 self._load()
1347 flags = self.flags
1347 flags = self.flags
1348 lazydirs = [
1348 lazydirs = [
1349 (d[:-1], v[1], b't') for d, v in pycompat.iteritems(self._lazydirs)
1349 (d[:-1], v[1], b't') for d, v in pycompat.iteritems(self._lazydirs)
1350 ]
1350 ]
1351 dirs = [(d[:-1], self._dirs[d]._node, b't') for d in self._dirs]
1351 dirs = [(d[:-1], self._dirs[d]._node, b't') for d in self._dirs]
1352 files = [(f, self._files[f], flags(f)) for f in self._files]
1352 files = [(f, self._files[f], flags(f)) for f in self._files]
1353 return _text(sorted(dirs + files + lazydirs))
1353 return _text(sorted(dirs + files + lazydirs))
1354
1354
1355 def read(self, gettext, readsubtree):
1355 def read(self, gettext, readsubtree):
1356 def _load_for_read(s):
1356 def _load_for_read(s):
1357 s.parse(gettext(), readsubtree)
1357 s.parse(gettext(), readsubtree)
1358 s._dirty = False
1358 s._dirty = False
1359
1359
1360 self._loadfunc = _load_for_read
1360 self._loadfunc = _load_for_read
1361
1361
1362 def writesubtrees(self, m1, m2, writesubtree, match):
1362 def writesubtrees(self, m1, m2, writesubtree, match):
1363 self._load() # for consistency; should never have any effect here
1363 self._load() # for consistency; should never have any effect here
1364 m1._load()
1364 m1._load()
1365 m2._load()
1365 m2._load()
1366 emptytree = treemanifest()
1366 emptytree = treemanifest()
1367
1367
1368 def getnode(m, d):
1368 def getnode(m, d):
1369 ld = m._lazydirs.get(d)
1369 ld = m._lazydirs.get(d)
1370 if ld:
1370 if ld:
1371 return ld[1]
1371 return ld[1]
1372 return m._dirs.get(d, emptytree)._node
1372 return m._dirs.get(d, emptytree)._node
1373
1373
1374 # let's skip investigating things that `match` says we do not need.
1374 # let's skip investigating things that `match` says we do not need.
1375 visit = match.visitchildrenset(self._dir[:-1])
1375 visit = match.visitchildrenset(self._dir[:-1])
1376 visit = self._loadchildrensetlazy(visit)
1376 visit = self._loadchildrensetlazy(visit)
1377 if visit == b'this' or visit == b'all':
1377 if visit == b'this' or visit == b'all':
1378 visit = None
1378 visit = None
1379 for d, subm in pycompat.iteritems(self._dirs):
1379 for d, subm in pycompat.iteritems(self._dirs):
1380 if visit and d[:-1] not in visit:
1380 if visit and d[:-1] not in visit:
1381 continue
1381 continue
1382 subp1 = getnode(m1, d)
1382 subp1 = getnode(m1, d)
1383 subp2 = getnode(m2, d)
1383 subp2 = getnode(m2, d)
1384 if subp1 == nullid:
1384 if subp1 == nullid:
1385 subp1, subp2 = subp2, subp1
1385 subp1, subp2 = subp2, subp1
1386 writesubtree(subm, subp1, subp2, match)
1386 writesubtree(subm, subp1, subp2, match)
1387
1387
1388 def walksubtrees(self, matcher=None):
1388 def walksubtrees(self, matcher=None):
1389 """Returns an iterator of the subtrees of this manifest, including this
1389 """Returns an iterator of the subtrees of this manifest, including this
1390 manifest itself.
1390 manifest itself.
1391
1391
1392 If `matcher` is provided, it only returns subtrees that match.
1392 If `matcher` is provided, it only returns subtrees that match.
1393 """
1393 """
1394 if matcher and not matcher.visitdir(self._dir[:-1]):
1394 if matcher and not matcher.visitdir(self._dir[:-1]):
1395 return
1395 return
1396 if not matcher or matcher(self._dir[:-1]):
1396 if not matcher or matcher(self._dir[:-1]):
1397 yield self
1397 yield self
1398
1398
1399 self._load()
1399 self._load()
1400 # OPT: use visitchildrenset to avoid loading everything.
1400 # OPT: use visitchildrenset to avoid loading everything.
1401 self._loadalllazy()
1401 self._loadalllazy()
1402 for d, subm in pycompat.iteritems(self._dirs):
1402 for d, subm in pycompat.iteritems(self._dirs):
1403 for subtree in subm.walksubtrees(matcher=matcher):
1403 for subtree in subm.walksubtrees(matcher=matcher):
1404 yield subtree
1404 yield subtree
1405
1405
1406
1406
1407 class manifestfulltextcache(util.lrucachedict):
1407 class manifestfulltextcache(util.lrucachedict):
1408 """File-backed LRU cache for the manifest cache
1408 """File-backed LRU cache for the manifest cache
1409
1409
1410 File consists of entries, up to EOF:
1410 File consists of entries, up to EOF:
1411
1411
1412 - 20 bytes node, 4 bytes length, <length> manifest data
1412 - 20 bytes node, 4 bytes length, <length> manifest data
1413
1413
1414 These are written in reverse cache order (oldest to newest).
1414 These are written in reverse cache order (oldest to newest).
1415
1415
1416 """
1416 """
1417
1417
1418 _file = b'manifestfulltextcache'
1418 _file = b'manifestfulltextcache'
1419
1419
1420 def __init__(self, max):
1420 def __init__(self, max):
1421 super(manifestfulltextcache, self).__init__(max)
1421 super(manifestfulltextcache, self).__init__(max)
1422 self._dirty = False
1422 self._dirty = False
1423 self._read = False
1423 self._read = False
1424 self._opener = None
1424 self._opener = None
1425
1425
1426 def read(self):
1426 def read(self):
1427 if self._read or self._opener is None:
1427 if self._read or self._opener is None:
1428 return
1428 return
1429
1429
1430 try:
1430 try:
1431 with self._opener(self._file) as fp:
1431 with self._opener(self._file) as fp:
1432 set = super(manifestfulltextcache, self).__setitem__
1432 set = super(manifestfulltextcache, self).__setitem__
1433 # ignore trailing data, this is a cache, corruption is skipped
1433 # ignore trailing data, this is a cache, corruption is skipped
1434 while True:
1434 while True:
1435 # TODO do we need to do work here for sha1 portability?
1435 # TODO do we need to do work here for sha1 portability?
1436 node = fp.read(20)
1436 node = fp.read(20)
1437 if len(node) < 20:
1437 if len(node) < 20:
1438 break
1438 break
1439 try:
1439 try:
1440 size = struct.unpack(b'>L', fp.read(4))[0]
1440 size = struct.unpack(b'>L', fp.read(4))[0]
1441 except struct.error:
1441 except struct.error:
1442 break
1442 break
1443 value = bytearray(fp.read(size))
1443 value = bytearray(fp.read(size))
1444 if len(value) != size:
1444 if len(value) != size:
1445 break
1445 break
1446 set(node, value)
1446 set(node, value)
1447 except IOError:
1447 except IOError:
1448 # the file is allowed to be missing
1448 # the file is allowed to be missing
1449 pass
1449 pass
1450
1450
1451 self._read = True
1451 self._read = True
1452 self._dirty = False
1452 self._dirty = False
1453
1453
1454 def write(self):
1454 def write(self):
1455 if not self._dirty or self._opener is None:
1455 if not self._dirty or self._opener is None:
1456 return
1456 return
1457 # rotate backwards to the first used node
1457 # rotate backwards to the first used node
1458 with self._opener(
1458 with self._opener(
1459 self._file, b'w', atomictemp=True, checkambig=True
1459 self._file, b'w', atomictemp=True, checkambig=True
1460 ) as fp:
1460 ) as fp:
1461 node = self._head.prev
1461 node = self._head.prev
1462 while True:
1462 while True:
1463 if node.key in self._cache:
1463 if node.key in self._cache:
1464 fp.write(node.key)
1464 fp.write(node.key)
1465 fp.write(struct.pack(b'>L', len(node.value)))
1465 fp.write(struct.pack(b'>L', len(node.value)))
1466 fp.write(node.value)
1466 fp.write(node.value)
1467 if node is self._head:
1467 if node is self._head:
1468 break
1468 break
1469 node = node.prev
1469 node = node.prev
1470
1470
1471 def __len__(self):
1471 def __len__(self):
1472 if not self._read:
1472 if not self._read:
1473 self.read()
1473 self.read()
1474 return super(manifestfulltextcache, self).__len__()
1474 return super(manifestfulltextcache, self).__len__()
1475
1475
1476 def __contains__(self, k):
1476 def __contains__(self, k):
1477 if not self._read:
1477 if not self._read:
1478 self.read()
1478 self.read()
1479 return super(manifestfulltextcache, self).__contains__(k)
1479 return super(manifestfulltextcache, self).__contains__(k)
1480
1480
1481 def __iter__(self):
1481 def __iter__(self):
1482 if not self._read:
1482 if not self._read:
1483 self.read()
1483 self.read()
1484 return super(manifestfulltextcache, self).__iter__()
1484 return super(manifestfulltextcache, self).__iter__()
1485
1485
1486 def __getitem__(self, k):
1486 def __getitem__(self, k):
1487 if not self._read:
1487 if not self._read:
1488 self.read()
1488 self.read()
1489 # the cache lru order can change on read
1489 # the cache lru order can change on read
1490 setdirty = self._cache.get(k) is not self._head
1490 setdirty = self._cache.get(k) is not self._head
1491 value = super(manifestfulltextcache, self).__getitem__(k)
1491 value = super(manifestfulltextcache, self).__getitem__(k)
1492 if setdirty:
1492 if setdirty:
1493 self._dirty = True
1493 self._dirty = True
1494 return value
1494 return value
1495
1495
1496 def __setitem__(self, k, v):
1496 def __setitem__(self, k, v):
1497 if not self._read:
1497 if not self._read:
1498 self.read()
1498 self.read()
1499 super(manifestfulltextcache, self).__setitem__(k, v)
1499 super(manifestfulltextcache, self).__setitem__(k, v)
1500 self._dirty = True
1500 self._dirty = True
1501
1501
1502 def __delitem__(self, k):
1502 def __delitem__(self, k):
1503 if not self._read:
1503 if not self._read:
1504 self.read()
1504 self.read()
1505 super(manifestfulltextcache, self).__delitem__(k)
1505 super(manifestfulltextcache, self).__delitem__(k)
1506 self._dirty = True
1506 self._dirty = True
1507
1507
1508 def get(self, k, default=None):
1508 def get(self, k, default=None):
1509 if not self._read:
1509 if not self._read:
1510 self.read()
1510 self.read()
1511 return super(manifestfulltextcache, self).get(k, default=default)
1511 return super(manifestfulltextcache, self).get(k, default=default)
1512
1512
1513 def clear(self, clear_persisted_data=False):
1513 def clear(self, clear_persisted_data=False):
1514 super(manifestfulltextcache, self).clear()
1514 super(manifestfulltextcache, self).clear()
1515 if clear_persisted_data:
1515 if clear_persisted_data:
1516 self._dirty = True
1516 self._dirty = True
1517 self.write()
1517 self.write()
1518 self._read = False
1518 self._read = False
1519
1519
1520
1520
1521 # and upper bound of what we expect from compression
1521 # and upper bound of what we expect from compression
1522 # (real live value seems to be "3")
1522 # (real live value seems to be "3")
1523 MAXCOMPRESSION = 3
1523 MAXCOMPRESSION = 3
1524
1524
1525
1525
1526 class FastdeltaUnavailable(Exception):
1526 class FastdeltaUnavailable(Exception):
1527 """Exception raised when fastdelta isn't usable on a manifest."""
1527 """Exception raised when fastdelta isn't usable on a manifest."""
1528
1528
1529
1529
1530 @interfaceutil.implementer(repository.imanifeststorage)
1530 @interfaceutil.implementer(repository.imanifeststorage)
1531 class manifestrevlog(object):
1531 class manifestrevlog(object):
1532 '''A revlog that stores manifest texts. This is responsible for caching the
1532 '''A revlog that stores manifest texts. This is responsible for caching the
1533 full-text manifest contents.
1533 full-text manifest contents.
1534 '''
1534 '''
1535
1535
1536 def __init__(
1536 def __init__(
1537 self,
1537 self,
1538 opener,
1538 opener,
1539 tree=b'',
1539 tree=b'',
1540 dirlogcache=None,
1540 dirlogcache=None,
1541 indexfile=None,
1541 indexfile=None,
1542 treemanifest=False,
1542 treemanifest=False,
1543 ):
1543 ):
1544 """Constructs a new manifest revlog
1544 """Constructs a new manifest revlog
1545
1545
1546 `indexfile` - used by extensions to have two manifests at once, like
1546 `indexfile` - used by extensions to have two manifests at once, like
1547 when transitioning between flatmanifeset and treemanifests.
1547 when transitioning between flatmanifeset and treemanifests.
1548
1548
1549 `treemanifest` - used to indicate this is a tree manifest revlog. Opener
1549 `treemanifest` - used to indicate this is a tree manifest revlog. Opener
1550 options can also be used to make this a tree manifest revlog. The opener
1550 options can also be used to make this a tree manifest revlog. The opener
1551 option takes precedence, so if it is set to True, we ignore whatever
1551 option takes precedence, so if it is set to True, we ignore whatever
1552 value is passed in to the constructor.
1552 value is passed in to the constructor.
1553 """
1553 """
1554 # During normal operations, we expect to deal with not more than four
1554 # During normal operations, we expect to deal with not more than four
1555 # revs at a time (such as during commit --amend). When rebasing large
1555 # revs at a time (such as during commit --amend). When rebasing large
1556 # stacks of commits, the number can go up, hence the config knob below.
1556 # stacks of commits, the number can go up, hence the config knob below.
1557 cachesize = 4
1557 cachesize = 4
1558 optiontreemanifest = False
1558 optiontreemanifest = False
1559 opts = getattr(opener, 'options', None)
1559 opts = getattr(opener, 'options', None)
1560 if opts is not None:
1560 if opts is not None:
1561 cachesize = opts.get(b'manifestcachesize', cachesize)
1561 cachesize = opts.get(b'manifestcachesize', cachesize)
1562 optiontreemanifest = opts.get(b'treemanifest', False)
1562 optiontreemanifest = opts.get(b'treemanifest', False)
1563
1563
1564 self._treeondisk = optiontreemanifest or treemanifest
1564 self._treeondisk = optiontreemanifest or treemanifest
1565
1565
1566 self._fulltextcache = manifestfulltextcache(cachesize)
1566 self._fulltextcache = manifestfulltextcache(cachesize)
1567
1567
1568 if tree:
1568 if tree:
1569 assert self._treeondisk, b'opts is %r' % opts
1569 assert self._treeondisk, b'opts is %r' % opts
1570
1570
1571 if indexfile is None:
1571 if indexfile is None:
1572 indexfile = b'00manifest.i'
1572 indexfile = b'00manifest.i'
1573 if tree:
1573 if tree:
1574 indexfile = b"meta/" + tree + indexfile
1574 indexfile = b"meta/" + tree + indexfile
1575
1575
1576 self.tree = tree
1576 self.tree = tree
1577
1577
1578 # The dirlogcache is kept on the root manifest log
1578 # The dirlogcache is kept on the root manifest log
1579 if tree:
1579 if tree:
1580 self._dirlogcache = dirlogcache
1580 self._dirlogcache = dirlogcache
1581 else:
1581 else:
1582 self._dirlogcache = {b'': self}
1582 self._dirlogcache = {b'': self}
1583
1583
1584 self._revlog = revlog.revlog(
1584 self._revlog = revlog.revlog(
1585 opener,
1585 opener,
1586 indexfile,
1586 indexfile,
1587 # only root indexfile is cached
1587 # only root indexfile is cached
1588 checkambig=not bool(tree),
1588 checkambig=not bool(tree),
1589 mmaplargeindex=True,
1589 mmaplargeindex=True,
1590 upperboundcomp=MAXCOMPRESSION,
1590 upperboundcomp=MAXCOMPRESSION,
1591 persistentnodemap=opener.options.get(
1591 persistentnodemap=opener.options.get(
1592 b'exp-persistent-nodemap', False
1592 b'exp-persistent-nodemap', False
1593 ),
1593 ),
1594 )
1594 )
1595
1595
1596 self.index = self._revlog.index
1596 self.index = self._revlog.index
1597 self.version = self._revlog.version
1597 self.version = self._revlog.version
1598 self._generaldelta = self._revlog._generaldelta
1598 self._generaldelta = self._revlog._generaldelta
1599
1599
1600 def _setupmanifestcachehooks(self, repo):
1600 def _setupmanifestcachehooks(self, repo):
1601 """Persist the manifestfulltextcache on lock release"""
1601 """Persist the manifestfulltextcache on lock release"""
1602 if not util.safehasattr(repo, b'_wlockref'):
1602 if not util.safehasattr(repo, b'_wlockref'):
1603 return
1603 return
1604
1604
1605 self._fulltextcache._opener = repo.wcachevfs
1605 self._fulltextcache._opener = repo.wcachevfs
1606 if repo._currentlock(repo._wlockref) is None:
1606 if repo._currentlock(repo._wlockref) is None:
1607 return
1607 return
1608
1608
1609 reporef = weakref.ref(repo)
1609 reporef = weakref.ref(repo)
1610 manifestrevlogref = weakref.ref(self)
1610 manifestrevlogref = weakref.ref(self)
1611
1611
1612 def persistmanifestcache(success):
1612 def persistmanifestcache(success):
1613 # Repo is in an unknown state, do not persist.
1613 # Repo is in an unknown state, do not persist.
1614 if not success:
1614 if not success:
1615 return
1615 return
1616
1616
1617 repo = reporef()
1617 repo = reporef()
1618 self = manifestrevlogref()
1618 self = manifestrevlogref()
1619 if repo is None or self is None:
1619 if repo is None or self is None:
1620 return
1620 return
1621 if repo.manifestlog.getstorage(b'') is not self:
1621 if repo.manifestlog.getstorage(b'') is not self:
1622 # there's a different manifest in play now, abort
1622 # there's a different manifest in play now, abort
1623 return
1623 return
1624 self._fulltextcache.write()
1624 self._fulltextcache.write()
1625
1625
1626 repo._afterlock(persistmanifestcache)
1626 repo._afterlock(persistmanifestcache)
1627
1627
1628 @property
1628 @property
1629 def fulltextcache(self):
1629 def fulltextcache(self):
1630 return self._fulltextcache
1630 return self._fulltextcache
1631
1631
1632 def clearcaches(self, clear_persisted_data=False):
1632 def clearcaches(self, clear_persisted_data=False):
1633 self._revlog.clearcaches()
1633 self._revlog.clearcaches()
1634 self._fulltextcache.clear(clear_persisted_data=clear_persisted_data)
1634 self._fulltextcache.clear(clear_persisted_data=clear_persisted_data)
1635 self._dirlogcache = {self.tree: self}
1635 self._dirlogcache = {self.tree: self}
1636
1636
1637 def dirlog(self, d):
1637 def dirlog(self, d):
1638 if d:
1638 if d:
1639 assert self._treeondisk
1639 assert self._treeondisk
1640 if d not in self._dirlogcache:
1640 if d not in self._dirlogcache:
1641 mfrevlog = manifestrevlog(
1641 mfrevlog = manifestrevlog(
1642 self.opener, d, self._dirlogcache, treemanifest=self._treeondisk
1642 self.opener, d, self._dirlogcache, treemanifest=self._treeondisk
1643 )
1643 )
1644 self._dirlogcache[d] = mfrevlog
1644 self._dirlogcache[d] = mfrevlog
1645 return self._dirlogcache[d]
1645 return self._dirlogcache[d]
1646
1646
1647 def add(
1647 def add(
1648 self,
1648 self,
1649 m,
1649 m,
1650 transaction,
1650 transaction,
1651 link,
1651 link,
1652 p1,
1652 p1,
1653 p2,
1653 p2,
1654 added,
1654 added,
1655 removed,
1655 removed,
1656 readtree=None,
1656 readtree=None,
1657 match=None,
1657 match=None,
1658 ):
1658 ):
1659 try:
1659 try:
1660 if p1 not in self.fulltextcache:
1660 if p1 not in self.fulltextcache:
1661 raise FastdeltaUnavailable()
1661 raise FastdeltaUnavailable()
1662 # If our first parent is in the manifest cache, we can
1662 # If our first parent is in the manifest cache, we can
1663 # compute a delta here using properties we know about the
1663 # compute a delta here using properties we know about the
1664 # manifest up-front, which may save time later for the
1664 # manifest up-front, which may save time later for the
1665 # revlog layer.
1665 # revlog layer.
1666
1666
1667 _checkforbidden(added)
1667 _checkforbidden(added)
1668 # combine the changed lists into one sorted iterator
1668 # combine the changed lists into one sorted iterator
1669 work = heapq.merge(
1669 work = heapq.merge(
1670 [(x, False) for x in sorted(added)],
1670 [(x, False) for x in sorted(added)],
1671 [(x, True) for x in sorted(removed)],
1671 [(x, True) for x in sorted(removed)],
1672 )
1672 )
1673
1673
1674 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1674 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1675 cachedelta = self._revlog.rev(p1), deltatext
1675 cachedelta = self._revlog.rev(p1), deltatext
1676 text = util.buffer(arraytext)
1676 text = util.buffer(arraytext)
1677 n = self._revlog.addrevision(
1677 n = self._revlog.addrevision(
1678 text, transaction, link, p1, p2, cachedelta
1678 text, transaction, link, p1, p2, cachedelta
1679 )
1679 )
1680 except FastdeltaUnavailable:
1680 except FastdeltaUnavailable:
1681 # The first parent manifest isn't already loaded or the
1681 # The first parent manifest isn't already loaded or the
1682 # manifest implementation doesn't support fastdelta, so
1682 # manifest implementation doesn't support fastdelta, so
1683 # we'll just encode a fulltext of the manifest and pass
1683 # we'll just encode a fulltext of the manifest and pass
1684 # that through to the revlog layer, and let it handle the
1684 # that through to the revlog layer, and let it handle the
1685 # delta process.
1685 # delta process.
1686 if self._treeondisk:
1686 if self._treeondisk:
1687 assert readtree, b"readtree must be set for treemanifest writes"
1687 assert readtree, b"readtree must be set for treemanifest writes"
1688 assert match, b"match must be specified for treemanifest writes"
1688 assert match, b"match must be specified for treemanifest writes"
1689 m1 = readtree(self.tree, p1)
1689 m1 = readtree(self.tree, p1)
1690 m2 = readtree(self.tree, p2)
1690 m2 = readtree(self.tree, p2)
1691 n = self._addtree(
1691 n = self._addtree(
1692 m, transaction, link, m1, m2, readtree, match=match
1692 m, transaction, link, m1, m2, readtree, match=match
1693 )
1693 )
1694 arraytext = None
1694 arraytext = None
1695 else:
1695 else:
1696 text = m.text()
1696 text = m.text()
1697 n = self._revlog.addrevision(text, transaction, link, p1, p2)
1697 n = self._revlog.addrevision(text, transaction, link, p1, p2)
1698 arraytext = bytearray(text)
1698 arraytext = bytearray(text)
1699
1699
1700 if arraytext is not None:
1700 if arraytext is not None:
1701 self.fulltextcache[n] = arraytext
1701 self.fulltextcache[n] = arraytext
1702
1702
1703 return n
1703 return n
1704
1704
1705 def _addtree(self, m, transaction, link, m1, m2, readtree, match):
1705 def _addtree(self, m, transaction, link, m1, m2, readtree, match):
1706 # If the manifest is unchanged compared to one parent,
1706 # If the manifest is unchanged compared to one parent,
1707 # don't write a new revision
1707 # don't write a new revision
1708 if self.tree != b'' and (
1708 if self.tree != b'' and (
1709 m.unmodifiedsince(m1) or m.unmodifiedsince(m2)
1709 m.unmodifiedsince(m1) or m.unmodifiedsince(m2)
1710 ):
1710 ):
1711 return m.node()
1711 return m.node()
1712
1712
1713 def writesubtree(subm, subp1, subp2, match):
1713 def writesubtree(subm, subp1, subp2, match):
1714 sublog = self.dirlog(subm.dir())
1714 sublog = self.dirlog(subm.dir())
1715 sublog.add(
1715 sublog.add(
1716 subm,
1716 subm,
1717 transaction,
1717 transaction,
1718 link,
1718 link,
1719 subp1,
1719 subp1,
1720 subp2,
1720 subp2,
1721 None,
1721 None,
1722 None,
1722 None,
1723 readtree=readtree,
1723 readtree=readtree,
1724 match=match,
1724 match=match,
1725 )
1725 )
1726
1726
1727 m.writesubtrees(m1, m2, writesubtree, match)
1727 m.writesubtrees(m1, m2, writesubtree, match)
1728 text = m.dirtext()
1728 text = m.dirtext()
1729 n = None
1729 n = None
1730 if self.tree != b'':
1730 if self.tree != b'':
1731 # Double-check whether contents are unchanged to one parent
1731 # Double-check whether contents are unchanged to one parent
1732 if text == m1.dirtext():
1732 if text == m1.dirtext():
1733 n = m1.node()
1733 n = m1.node()
1734 elif text == m2.dirtext():
1734 elif text == m2.dirtext():
1735 n = m2.node()
1735 n = m2.node()
1736
1736
1737 if not n:
1737 if not n:
1738 n = self._revlog.addrevision(
1738 n = self._revlog.addrevision(
1739 text, transaction, link, m1.node(), m2.node()
1739 text, transaction, link, m1.node(), m2.node()
1740 )
1740 )
1741
1741
1742 # Save nodeid so parent manifest can calculate its nodeid
1742 # Save nodeid so parent manifest can calculate its nodeid
1743 m.setnode(n)
1743 m.setnode(n)
1744 return n
1744 return n
1745
1745
1746 def __len__(self):
1746 def __len__(self):
1747 return len(self._revlog)
1747 return len(self._revlog)
1748
1748
1749 def __iter__(self):
1749 def __iter__(self):
1750 return self._revlog.__iter__()
1750 return self._revlog.__iter__()
1751
1751
1752 def rev(self, node):
1752 def rev(self, node):
1753 return self._revlog.rev(node)
1753 return self._revlog.rev(node)
1754
1754
1755 def node(self, rev):
1755 def node(self, rev):
1756 return self._revlog.node(rev)
1756 return self._revlog.node(rev)
1757
1757
1758 def lookup(self, value):
1758 def lookup(self, value):
1759 return self._revlog.lookup(value)
1759 return self._revlog.lookup(value)
1760
1760
1761 def parentrevs(self, rev):
1761 def parentrevs(self, rev):
1762 return self._revlog.parentrevs(rev)
1762 return self._revlog.parentrevs(rev)
1763
1763
1764 def parents(self, node):
1764 def parents(self, node):
1765 return self._revlog.parents(node)
1765 return self._revlog.parents(node)
1766
1766
1767 def linkrev(self, rev):
1767 def linkrev(self, rev):
1768 return self._revlog.linkrev(rev)
1768 return self._revlog.linkrev(rev)
1769
1769
1770 def checksize(self):
1770 def checksize(self):
1771 return self._revlog.checksize()
1771 return self._revlog.checksize()
1772
1772
1773 def revision(self, node, _df=None, raw=False):
1773 def revision(self, node, _df=None, raw=False):
1774 return self._revlog.revision(node, _df=_df, raw=raw)
1774 return self._revlog.revision(node, _df=_df, raw=raw)
1775
1775
1776 def rawdata(self, node, _df=None):
1776 def rawdata(self, node, _df=None):
1777 return self._revlog.rawdata(node, _df=_df)
1777 return self._revlog.rawdata(node, _df=_df)
1778
1778
1779 def revdiff(self, rev1, rev2):
1779 def revdiff(self, rev1, rev2):
1780 return self._revlog.revdiff(rev1, rev2)
1780 return self._revlog.revdiff(rev1, rev2)
1781
1781
1782 def cmp(self, node, text):
1782 def cmp(self, node, text):
1783 return self._revlog.cmp(node, text)
1783 return self._revlog.cmp(node, text)
1784
1784
1785 def deltaparent(self, rev):
1785 def deltaparent(self, rev):
1786 return self._revlog.deltaparent(rev)
1786 return self._revlog.deltaparent(rev)
1787
1787
1788 def emitrevisions(
1788 def emitrevisions(
1789 self,
1789 self,
1790 nodes,
1790 nodes,
1791 nodesorder=None,
1791 nodesorder=None,
1792 revisiondata=False,
1792 revisiondata=False,
1793 assumehaveparentrevisions=False,
1793 assumehaveparentrevisions=False,
1794 deltamode=repository.CG_DELTAMODE_STD,
1794 deltamode=repository.CG_DELTAMODE_STD,
1795 ):
1795 ):
1796 return self._revlog.emitrevisions(
1796 return self._revlog.emitrevisions(
1797 nodes,
1797 nodes,
1798 nodesorder=nodesorder,
1798 nodesorder=nodesorder,
1799 revisiondata=revisiondata,
1799 revisiondata=revisiondata,
1800 assumehaveparentrevisions=assumehaveparentrevisions,
1800 assumehaveparentrevisions=assumehaveparentrevisions,
1801 deltamode=deltamode,
1801 deltamode=deltamode,
1802 )
1802 )
1803
1803
1804 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
1804 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
1805 return self._revlog.addgroup(
1805 return self._revlog.addgroup(
1806 deltas, linkmapper, transaction, addrevisioncb=addrevisioncb
1806 deltas, linkmapper, transaction, addrevisioncb=addrevisioncb
1807 )
1807 )
1808
1808
1809 def rawsize(self, rev):
1809 def rawsize(self, rev):
1810 return self._revlog.rawsize(rev)
1810 return self._revlog.rawsize(rev)
1811
1811
1812 def getstrippoint(self, minlink):
1812 def getstrippoint(self, minlink):
1813 return self._revlog.getstrippoint(minlink)
1813 return self._revlog.getstrippoint(minlink)
1814
1814
1815 def strip(self, minlink, transaction):
1815 def strip(self, minlink, transaction):
1816 return self._revlog.strip(minlink, transaction)
1816 return self._revlog.strip(minlink, transaction)
1817
1817
1818 def files(self):
1818 def files(self):
1819 return self._revlog.files()
1819 return self._revlog.files()
1820
1820
1821 def clone(self, tr, destrevlog, **kwargs):
1821 def clone(self, tr, destrevlog, **kwargs):
1822 if not isinstance(destrevlog, manifestrevlog):
1822 if not isinstance(destrevlog, manifestrevlog):
1823 raise error.ProgrammingError(b'expected manifestrevlog to clone()')
1823 raise error.ProgrammingError(b'expected manifestrevlog to clone()')
1824
1824
1825 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
1825 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
1826
1826
1827 def storageinfo(
1827 def storageinfo(
1828 self,
1828 self,
1829 exclusivefiles=False,
1829 exclusivefiles=False,
1830 sharedfiles=False,
1830 sharedfiles=False,
1831 revisionscount=False,
1831 revisionscount=False,
1832 trackedsize=False,
1832 trackedsize=False,
1833 storedsize=False,
1833 storedsize=False,
1834 ):
1834 ):
1835 return self._revlog.storageinfo(
1835 return self._revlog.storageinfo(
1836 exclusivefiles=exclusivefiles,
1836 exclusivefiles=exclusivefiles,
1837 sharedfiles=sharedfiles,
1837 sharedfiles=sharedfiles,
1838 revisionscount=revisionscount,
1838 revisionscount=revisionscount,
1839 trackedsize=trackedsize,
1839 trackedsize=trackedsize,
1840 storedsize=storedsize,
1840 storedsize=storedsize,
1841 )
1841 )
1842
1842
1843 @property
1843 @property
1844 def indexfile(self):
1844 def indexfile(self):
1845 return self._revlog.indexfile
1845 return self._revlog.indexfile
1846
1846
1847 @indexfile.setter
1847 @indexfile.setter
1848 def indexfile(self, value):
1848 def indexfile(self, value):
1849 self._revlog.indexfile = value
1849 self._revlog.indexfile = value
1850
1850
1851 @property
1851 @property
1852 def opener(self):
1852 def opener(self):
1853 return self._revlog.opener
1853 return self._revlog.opener
1854
1854
1855 @opener.setter
1855 @opener.setter
1856 def opener(self, value):
1856 def opener(self, value):
1857 self._revlog.opener = value
1857 self._revlog.opener = value
1858
1858
1859
1859
1860 @interfaceutil.implementer(repository.imanifestlog)
1860 @interfaceutil.implementer(repository.imanifestlog)
1861 class manifestlog(object):
1861 class manifestlog(object):
1862 """A collection class representing the collection of manifest snapshots
1862 """A collection class representing the collection of manifest snapshots
1863 referenced by commits in the repository.
1863 referenced by commits in the repository.
1864
1864
1865 In this situation, 'manifest' refers to the abstract concept of a snapshot
1865 In this situation, 'manifest' refers to the abstract concept of a snapshot
1866 of the list of files in the given commit. Consumers of the output of this
1866 of the list of files in the given commit. Consumers of the output of this
1867 class do not care about the implementation details of the actual manifests
1867 class do not care about the implementation details of the actual manifests
1868 they receive (i.e. tree or flat or lazily loaded, etc)."""
1868 they receive (i.e. tree or flat or lazily loaded, etc)."""
1869
1869
1870 def __init__(self, opener, repo, rootstore, narrowmatch):
1870 def __init__(self, opener, repo, rootstore, narrowmatch):
1871 usetreemanifest = False
1871 usetreemanifest = False
1872 cachesize = 4
1872 cachesize = 4
1873
1873
1874 opts = getattr(opener, 'options', None)
1874 opts = getattr(opener, 'options', None)
1875 if opts is not None:
1875 if opts is not None:
1876 usetreemanifest = opts.get(b'treemanifest', usetreemanifest)
1876 usetreemanifest = opts.get(b'treemanifest', usetreemanifest)
1877 cachesize = opts.get(b'manifestcachesize', cachesize)
1877 cachesize = opts.get(b'manifestcachesize', cachesize)
1878
1878
1879 self._treemanifests = usetreemanifest
1879 self._treemanifests = usetreemanifest
1880
1880
1881 self._rootstore = rootstore
1881 self._rootstore = rootstore
1882 self._rootstore._setupmanifestcachehooks(repo)
1882 self._rootstore._setupmanifestcachehooks(repo)
1883 self._narrowmatch = narrowmatch
1883 self._narrowmatch = narrowmatch
1884
1884
1885 # A cache of the manifestctx or treemanifestctx for each directory
1885 # A cache of the manifestctx or treemanifestctx for each directory
1886 self._dirmancache = {}
1886 self._dirmancache = {}
1887 self._dirmancache[b''] = util.lrucachedict(cachesize)
1887 self._dirmancache[b''] = util.lrucachedict(cachesize)
1888
1888
1889 self._cachesize = cachesize
1889 self._cachesize = cachesize
1890
1890
1891 def __getitem__(self, node):
1891 def __getitem__(self, node):
1892 """Retrieves the manifest instance for the given node. Throws a
1892 """Retrieves the manifest instance for the given node. Throws a
1893 LookupError if not found.
1893 LookupError if not found.
1894 """
1894 """
1895 return self.get(b'', node)
1895 return self.get(b'', node)
1896
1896
1897 def get(self, tree, node, verify=True):
1897 def get(self, tree, node, verify=True):
1898 """Retrieves the manifest instance for the given node. Throws a
1898 """Retrieves the manifest instance for the given node. Throws a
1899 LookupError if not found.
1899 LookupError if not found.
1900
1900
1901 `verify` - if True an exception will be thrown if the node is not in
1901 `verify` - if True an exception will be thrown if the node is not in
1902 the revlog
1902 the revlog
1903 """
1903 """
1904 if node in self._dirmancache.get(tree, ()):
1904 if node in self._dirmancache.get(tree, ()):
1905 return self._dirmancache[tree][node]
1905 return self._dirmancache[tree][node]
1906
1906
1907 if not self._narrowmatch.always():
1907 if not self._narrowmatch.always():
1908 if not self._narrowmatch.visitdir(tree[:-1]):
1908 if not self._narrowmatch.visitdir(tree[:-1]):
1909 return excludeddirmanifestctx(tree, node)
1909 return excludeddirmanifestctx(tree, node)
1910 if tree:
1910 if tree:
1911 if self._rootstore._treeondisk:
1911 if self._rootstore._treeondisk:
1912 if verify:
1912 if verify:
1913 # Side-effect is LookupError is raised if node doesn't
1913 # Side-effect is LookupError is raised if node doesn't
1914 # exist.
1914 # exist.
1915 self.getstorage(tree).rev(node)
1915 self.getstorage(tree).rev(node)
1916
1916
1917 m = treemanifestctx(self, tree, node)
1917 m = treemanifestctx(self, tree, node)
1918 else:
1918 else:
1919 raise error.Abort(
1919 raise error.Abort(
1920 _(
1920 _(
1921 b"cannot ask for manifest directory '%s' in a flat "
1921 b"cannot ask for manifest directory '%s' in a flat "
1922 b"manifest"
1922 b"manifest"
1923 )
1923 )
1924 % tree
1924 % tree
1925 )
1925 )
1926 else:
1926 else:
1927 if verify:
1927 if verify:
1928 # Side-effect is LookupError is raised if node doesn't exist.
1928 # Side-effect is LookupError is raised if node doesn't exist.
1929 self._rootstore.rev(node)
1929 self._rootstore.rev(node)
1930
1930
1931 if self._treemanifests:
1931 if self._treemanifests:
1932 m = treemanifestctx(self, b'', node)
1932 m = treemanifestctx(self, b'', node)
1933 else:
1933 else:
1934 m = manifestctx(self, node)
1934 m = manifestctx(self, node)
1935
1935
1936 if node != nullid:
1936 if node != nullid:
1937 mancache = self._dirmancache.get(tree)
1937 mancache = self._dirmancache.get(tree)
1938 if not mancache:
1938 if not mancache:
1939 mancache = util.lrucachedict(self._cachesize)
1939 mancache = util.lrucachedict(self._cachesize)
1940 self._dirmancache[tree] = mancache
1940 self._dirmancache[tree] = mancache
1941 mancache[node] = m
1941 mancache[node] = m
1942 return m
1942 return m
1943
1943
1944 def getstorage(self, tree):
1944 def getstorage(self, tree):
1945 return self._rootstore.dirlog(tree)
1945 return self._rootstore.dirlog(tree)
1946
1946
1947 def clearcaches(self, clear_persisted_data=False):
1947 def clearcaches(self, clear_persisted_data=False):
1948 self._dirmancache.clear()
1948 self._dirmancache.clear()
1949 self._rootstore.clearcaches(clear_persisted_data=clear_persisted_data)
1949 self._rootstore.clearcaches(clear_persisted_data=clear_persisted_data)
1950
1950
1951 def rev(self, node):
1951 def rev(self, node):
1952 return self._rootstore.rev(node)
1952 return self._rootstore.rev(node)
1953
1953
1954 def update_caches(self, transaction):
1955 return self._rootstore._revlog.update_caches(transaction=transaction)
1956
1954
1957
1955 @interfaceutil.implementer(repository.imanifestrevisionwritable)
1958 @interfaceutil.implementer(repository.imanifestrevisionwritable)
1956 class memmanifestctx(object):
1959 class memmanifestctx(object):
1957 def __init__(self, manifestlog):
1960 def __init__(self, manifestlog):
1958 self._manifestlog = manifestlog
1961 self._manifestlog = manifestlog
1959 self._manifestdict = manifestdict()
1962 self._manifestdict = manifestdict()
1960
1963
1961 def _storage(self):
1964 def _storage(self):
1962 return self._manifestlog.getstorage(b'')
1965 return self._manifestlog.getstorage(b'')
1963
1966
1964 def copy(self):
1967 def copy(self):
1965 memmf = memmanifestctx(self._manifestlog)
1968 memmf = memmanifestctx(self._manifestlog)
1966 memmf._manifestdict = self.read().copy()
1969 memmf._manifestdict = self.read().copy()
1967 return memmf
1970 return memmf
1968
1971
1969 def read(self):
1972 def read(self):
1970 return self._manifestdict
1973 return self._manifestdict
1971
1974
1972 def write(self, transaction, link, p1, p2, added, removed, match=None):
1975 def write(self, transaction, link, p1, p2, added, removed, match=None):
1973 return self._storage().add(
1976 return self._storage().add(
1974 self._manifestdict,
1977 self._manifestdict,
1975 transaction,
1978 transaction,
1976 link,
1979 link,
1977 p1,
1980 p1,
1978 p2,
1981 p2,
1979 added,
1982 added,
1980 removed,
1983 removed,
1981 match=match,
1984 match=match,
1982 )
1985 )
1983
1986
1984
1987
1985 @interfaceutil.implementer(repository.imanifestrevisionstored)
1988 @interfaceutil.implementer(repository.imanifestrevisionstored)
1986 class manifestctx(object):
1989 class manifestctx(object):
1987 """A class representing a single revision of a manifest, including its
1990 """A class representing a single revision of a manifest, including its
1988 contents, its parent revs, and its linkrev.
1991 contents, its parent revs, and its linkrev.
1989 """
1992 """
1990
1993
1991 def __init__(self, manifestlog, node):
1994 def __init__(self, manifestlog, node):
1992 self._manifestlog = manifestlog
1995 self._manifestlog = manifestlog
1993 self._data = None
1996 self._data = None
1994
1997
1995 self._node = node
1998 self._node = node
1996
1999
1997 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
2000 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
1998 # but let's add it later when something needs it and we can load it
2001 # but let's add it later when something needs it and we can load it
1999 # lazily.
2002 # lazily.
2000 # self.p1, self.p2 = store.parents(node)
2003 # self.p1, self.p2 = store.parents(node)
2001 # rev = store.rev(node)
2004 # rev = store.rev(node)
2002 # self.linkrev = store.linkrev(rev)
2005 # self.linkrev = store.linkrev(rev)
2003
2006
2004 def _storage(self):
2007 def _storage(self):
2005 return self._manifestlog.getstorage(b'')
2008 return self._manifestlog.getstorage(b'')
2006
2009
2007 def node(self):
2010 def node(self):
2008 return self._node
2011 return self._node
2009
2012
2010 def copy(self):
2013 def copy(self):
2011 memmf = memmanifestctx(self._manifestlog)
2014 memmf = memmanifestctx(self._manifestlog)
2012 memmf._manifestdict = self.read().copy()
2015 memmf._manifestdict = self.read().copy()
2013 return memmf
2016 return memmf
2014
2017
2015 @propertycache
2018 @propertycache
2016 def parents(self):
2019 def parents(self):
2017 return self._storage().parents(self._node)
2020 return self._storage().parents(self._node)
2018
2021
2019 def read(self):
2022 def read(self):
2020 if self._data is None:
2023 if self._data is None:
2021 if self._node == nullid:
2024 if self._node == nullid:
2022 self._data = manifestdict()
2025 self._data = manifestdict()
2023 else:
2026 else:
2024 store = self._storage()
2027 store = self._storage()
2025 if self._node in store.fulltextcache:
2028 if self._node in store.fulltextcache:
2026 text = pycompat.bytestr(store.fulltextcache[self._node])
2029 text = pycompat.bytestr(store.fulltextcache[self._node])
2027 else:
2030 else:
2028 text = store.revision(self._node)
2031 text = store.revision(self._node)
2029 arraytext = bytearray(text)
2032 arraytext = bytearray(text)
2030 store.fulltextcache[self._node] = arraytext
2033 store.fulltextcache[self._node] = arraytext
2031 self._data = manifestdict(text)
2034 self._data = manifestdict(text)
2032 return self._data
2035 return self._data
2033
2036
2034 def readfast(self, shallow=False):
2037 def readfast(self, shallow=False):
2035 '''Calls either readdelta or read, based on which would be less work.
2038 '''Calls either readdelta or read, based on which would be less work.
2036 readdelta is called if the delta is against the p1, and therefore can be
2039 readdelta is called if the delta is against the p1, and therefore can be
2037 read quickly.
2040 read quickly.
2038
2041
2039 If `shallow` is True, nothing changes since this is a flat manifest.
2042 If `shallow` is True, nothing changes since this is a flat manifest.
2040 '''
2043 '''
2041 store = self._storage()
2044 store = self._storage()
2042 r = store.rev(self._node)
2045 r = store.rev(self._node)
2043 deltaparent = store.deltaparent(r)
2046 deltaparent = store.deltaparent(r)
2044 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
2047 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
2045 return self.readdelta()
2048 return self.readdelta()
2046 return self.read()
2049 return self.read()
2047
2050
2048 def readdelta(self, shallow=False):
2051 def readdelta(self, shallow=False):
2049 '''Returns a manifest containing just the entries that are present
2052 '''Returns a manifest containing just the entries that are present
2050 in this manifest, but not in its p1 manifest. This is efficient to read
2053 in this manifest, but not in its p1 manifest. This is efficient to read
2051 if the revlog delta is already p1.
2054 if the revlog delta is already p1.
2052
2055
2053 Changing the value of `shallow` has no effect on flat manifests.
2056 Changing the value of `shallow` has no effect on flat manifests.
2054 '''
2057 '''
2055 store = self._storage()
2058 store = self._storage()
2056 r = store.rev(self._node)
2059 r = store.rev(self._node)
2057 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
2060 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
2058 return manifestdict(d)
2061 return manifestdict(d)
2059
2062
2060 def find(self, key):
2063 def find(self, key):
2061 return self.read().find(key)
2064 return self.read().find(key)
2062
2065
2063
2066
2064 @interfaceutil.implementer(repository.imanifestrevisionwritable)
2067 @interfaceutil.implementer(repository.imanifestrevisionwritable)
2065 class memtreemanifestctx(object):
2068 class memtreemanifestctx(object):
2066 def __init__(self, manifestlog, dir=b''):
2069 def __init__(self, manifestlog, dir=b''):
2067 self._manifestlog = manifestlog
2070 self._manifestlog = manifestlog
2068 self._dir = dir
2071 self._dir = dir
2069 self._treemanifest = treemanifest()
2072 self._treemanifest = treemanifest()
2070
2073
2071 def _storage(self):
2074 def _storage(self):
2072 return self._manifestlog.getstorage(b'')
2075 return self._manifestlog.getstorage(b'')
2073
2076
2074 def copy(self):
2077 def copy(self):
2075 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
2078 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
2076 memmf._treemanifest = self._treemanifest.copy()
2079 memmf._treemanifest = self._treemanifest.copy()
2077 return memmf
2080 return memmf
2078
2081
2079 def read(self):
2082 def read(self):
2080 return self._treemanifest
2083 return self._treemanifest
2081
2084
2082 def write(self, transaction, link, p1, p2, added, removed, match=None):
2085 def write(self, transaction, link, p1, p2, added, removed, match=None):
2083 def readtree(dir, node):
2086 def readtree(dir, node):
2084 return self._manifestlog.get(dir, node).read()
2087 return self._manifestlog.get(dir, node).read()
2085
2088
2086 return self._storage().add(
2089 return self._storage().add(
2087 self._treemanifest,
2090 self._treemanifest,
2088 transaction,
2091 transaction,
2089 link,
2092 link,
2090 p1,
2093 p1,
2091 p2,
2094 p2,
2092 added,
2095 added,
2093 removed,
2096 removed,
2094 readtree=readtree,
2097 readtree=readtree,
2095 match=match,
2098 match=match,
2096 )
2099 )
2097
2100
2098
2101
2099 @interfaceutil.implementer(repository.imanifestrevisionstored)
2102 @interfaceutil.implementer(repository.imanifestrevisionstored)
2100 class treemanifestctx(object):
2103 class treemanifestctx(object):
2101 def __init__(self, manifestlog, dir, node):
2104 def __init__(self, manifestlog, dir, node):
2102 self._manifestlog = manifestlog
2105 self._manifestlog = manifestlog
2103 self._dir = dir
2106 self._dir = dir
2104 self._data = None
2107 self._data = None
2105
2108
2106 self._node = node
2109 self._node = node
2107
2110
2108 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
2111 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
2109 # we can instantiate treemanifestctx objects for directories we don't
2112 # we can instantiate treemanifestctx objects for directories we don't
2110 # have on disk.
2113 # have on disk.
2111 # self.p1, self.p2 = store.parents(node)
2114 # self.p1, self.p2 = store.parents(node)
2112 # rev = store.rev(node)
2115 # rev = store.rev(node)
2113 # self.linkrev = store.linkrev(rev)
2116 # self.linkrev = store.linkrev(rev)
2114
2117
2115 def _storage(self):
2118 def _storage(self):
2116 narrowmatch = self._manifestlog._narrowmatch
2119 narrowmatch = self._manifestlog._narrowmatch
2117 if not narrowmatch.always():
2120 if not narrowmatch.always():
2118 if not narrowmatch.visitdir(self._dir[:-1]):
2121 if not narrowmatch.visitdir(self._dir[:-1]):
2119 return excludedmanifestrevlog(self._dir)
2122 return excludedmanifestrevlog(self._dir)
2120 return self._manifestlog.getstorage(self._dir)
2123 return self._manifestlog.getstorage(self._dir)
2121
2124
2122 def read(self):
2125 def read(self):
2123 if self._data is None:
2126 if self._data is None:
2124 store = self._storage()
2127 store = self._storage()
2125 if self._node == nullid:
2128 if self._node == nullid:
2126 self._data = treemanifest()
2129 self._data = treemanifest()
2127 # TODO accessing non-public API
2130 # TODO accessing non-public API
2128 elif store._treeondisk:
2131 elif store._treeondisk:
2129 m = treemanifest(dir=self._dir)
2132 m = treemanifest(dir=self._dir)
2130
2133
2131 def gettext():
2134 def gettext():
2132 return store.revision(self._node)
2135 return store.revision(self._node)
2133
2136
2134 def readsubtree(dir, subm):
2137 def readsubtree(dir, subm):
2135 # Set verify to False since we need to be able to create
2138 # Set verify to False since we need to be able to create
2136 # subtrees for trees that don't exist on disk.
2139 # subtrees for trees that don't exist on disk.
2137 return self._manifestlog.get(dir, subm, verify=False).read()
2140 return self._manifestlog.get(dir, subm, verify=False).read()
2138
2141
2139 m.read(gettext, readsubtree)
2142 m.read(gettext, readsubtree)
2140 m.setnode(self._node)
2143 m.setnode(self._node)
2141 self._data = m
2144 self._data = m
2142 else:
2145 else:
2143 if self._node in store.fulltextcache:
2146 if self._node in store.fulltextcache:
2144 text = pycompat.bytestr(store.fulltextcache[self._node])
2147 text = pycompat.bytestr(store.fulltextcache[self._node])
2145 else:
2148 else:
2146 text = store.revision(self._node)
2149 text = store.revision(self._node)
2147 arraytext = bytearray(text)
2150 arraytext = bytearray(text)
2148 store.fulltextcache[self._node] = arraytext
2151 store.fulltextcache[self._node] = arraytext
2149 self._data = treemanifest(dir=self._dir, text=text)
2152 self._data = treemanifest(dir=self._dir, text=text)
2150
2153
2151 return self._data
2154 return self._data
2152
2155
2153 def node(self):
2156 def node(self):
2154 return self._node
2157 return self._node
2155
2158
2156 def copy(self):
2159 def copy(self):
2157 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
2160 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
2158 memmf._treemanifest = self.read().copy()
2161 memmf._treemanifest = self.read().copy()
2159 return memmf
2162 return memmf
2160
2163
2161 @propertycache
2164 @propertycache
2162 def parents(self):
2165 def parents(self):
2163 return self._storage().parents(self._node)
2166 return self._storage().parents(self._node)
2164
2167
2165 def readdelta(self, shallow=False):
2168 def readdelta(self, shallow=False):
2166 '''Returns a manifest containing just the entries that are present
2169 '''Returns a manifest containing just the entries that are present
2167 in this manifest, but not in its p1 manifest. This is efficient to read
2170 in this manifest, but not in its p1 manifest. This is efficient to read
2168 if the revlog delta is already p1.
2171 if the revlog delta is already p1.
2169
2172
2170 If `shallow` is True, this will read the delta for this directory,
2173 If `shallow` is True, this will read the delta for this directory,
2171 without recursively reading subdirectory manifests. Instead, any
2174 without recursively reading subdirectory manifests. Instead, any
2172 subdirectory entry will be reported as it appears in the manifest, i.e.
2175 subdirectory entry will be reported as it appears in the manifest, i.e.
2173 the subdirectory will be reported among files and distinguished only by
2176 the subdirectory will be reported among files and distinguished only by
2174 its 't' flag.
2177 its 't' flag.
2175 '''
2178 '''
2176 store = self._storage()
2179 store = self._storage()
2177 if shallow:
2180 if shallow:
2178 r = store.rev(self._node)
2181 r = store.rev(self._node)
2179 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
2182 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
2180 return manifestdict(d)
2183 return manifestdict(d)
2181 else:
2184 else:
2182 # Need to perform a slow delta
2185 # Need to perform a slow delta
2183 r0 = store.deltaparent(store.rev(self._node))
2186 r0 = store.deltaparent(store.rev(self._node))
2184 m0 = self._manifestlog.get(self._dir, store.node(r0)).read()
2187 m0 = self._manifestlog.get(self._dir, store.node(r0)).read()
2185 m1 = self.read()
2188 m1 = self.read()
2186 md = treemanifest(dir=self._dir)
2189 md = treemanifest(dir=self._dir)
2187 for f, ((n0, fl0), (n1, fl1)) in pycompat.iteritems(m0.diff(m1)):
2190 for f, ((n0, fl0), (n1, fl1)) in pycompat.iteritems(m0.diff(m1)):
2188 if n1:
2191 if n1:
2189 md[f] = n1
2192 md[f] = n1
2190 if fl1:
2193 if fl1:
2191 md.setflag(f, fl1)
2194 md.setflag(f, fl1)
2192 return md
2195 return md
2193
2196
2194 def readfast(self, shallow=False):
2197 def readfast(self, shallow=False):
2195 '''Calls either readdelta or read, based on which would be less work.
2198 '''Calls either readdelta or read, based on which would be less work.
2196 readdelta is called if the delta is against the p1, and therefore can be
2199 readdelta is called if the delta is against the p1, and therefore can be
2197 read quickly.
2200 read quickly.
2198
2201
2199 If `shallow` is True, it only returns the entries from this manifest,
2202 If `shallow` is True, it only returns the entries from this manifest,
2200 and not any submanifests.
2203 and not any submanifests.
2201 '''
2204 '''
2202 store = self._storage()
2205 store = self._storage()
2203 r = store.rev(self._node)
2206 r = store.rev(self._node)
2204 deltaparent = store.deltaparent(r)
2207 deltaparent = store.deltaparent(r)
2205 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
2208 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
2206 return self.readdelta(shallow=shallow)
2209 return self.readdelta(shallow=shallow)
2207
2210
2208 if shallow:
2211 if shallow:
2209 return manifestdict(store.revision(self._node))
2212 return manifestdict(store.revision(self._node))
2210 else:
2213 else:
2211 return self.read()
2214 return self.read()
2212
2215
2213 def find(self, key):
2216 def find(self, key):
2214 return self.read().find(key)
2217 return self.read().find(key)
2215
2218
2216
2219
2217 class excludeddir(treemanifest):
2220 class excludeddir(treemanifest):
2218 """Stand-in for a directory that is excluded from the repository.
2221 """Stand-in for a directory that is excluded from the repository.
2219
2222
2220 With narrowing active on a repository that uses treemanifests,
2223 With narrowing active on a repository that uses treemanifests,
2221 some of the directory revlogs will be excluded from the resulting
2224 some of the directory revlogs will be excluded from the resulting
2222 clone. This is a huge storage win for clients, but means we need
2225 clone. This is a huge storage win for clients, but means we need
2223 some sort of pseudo-manifest to surface to internals so we can
2226 some sort of pseudo-manifest to surface to internals so we can
2224 detect a merge conflict outside the narrowspec. That's what this
2227 detect a merge conflict outside the narrowspec. That's what this
2225 class is: it stands in for a directory whose node is known, but
2228 class is: it stands in for a directory whose node is known, but
2226 whose contents are unknown.
2229 whose contents are unknown.
2227 """
2230 """
2228
2231
2229 def __init__(self, dir, node):
2232 def __init__(self, dir, node):
2230 super(excludeddir, self).__init__(dir)
2233 super(excludeddir, self).__init__(dir)
2231 self._node = node
2234 self._node = node
2232 # Add an empty file, which will be included by iterators and such,
2235 # Add an empty file, which will be included by iterators and such,
2233 # appearing as the directory itself (i.e. something like "dir/")
2236 # appearing as the directory itself (i.e. something like "dir/")
2234 self._files[b''] = node
2237 self._files[b''] = node
2235 self._flags[b''] = b't'
2238 self._flags[b''] = b't'
2236
2239
2237 # Manifests outside the narrowspec should never be modified, so avoid
2240 # Manifests outside the narrowspec should never be modified, so avoid
2238 # copying. This makes a noticeable difference when there are very many
2241 # copying. This makes a noticeable difference when there are very many
2239 # directories outside the narrowspec. Also, it makes sense for the copy to
2242 # directories outside the narrowspec. Also, it makes sense for the copy to
2240 # be of the same type as the original, which would not happen with the
2243 # be of the same type as the original, which would not happen with the
2241 # super type's copy().
2244 # super type's copy().
2242 def copy(self):
2245 def copy(self):
2243 return self
2246 return self
2244
2247
2245
2248
2246 class excludeddirmanifestctx(treemanifestctx):
2249 class excludeddirmanifestctx(treemanifestctx):
2247 """context wrapper for excludeddir - see that docstring for rationale"""
2250 """context wrapper for excludeddir - see that docstring for rationale"""
2248
2251
2249 def __init__(self, dir, node):
2252 def __init__(self, dir, node):
2250 self._dir = dir
2253 self._dir = dir
2251 self._node = node
2254 self._node = node
2252
2255
2253 def read(self):
2256 def read(self):
2254 return excludeddir(self._dir, self._node)
2257 return excludeddir(self._dir, self._node)
2255
2258
2256 def write(self, *args):
2259 def write(self, *args):
2257 raise error.ProgrammingError(
2260 raise error.ProgrammingError(
2258 b'attempt to write manifest from excluded dir %s' % self._dir
2261 b'attempt to write manifest from excluded dir %s' % self._dir
2259 )
2262 )
2260
2263
2261
2264
2262 class excludedmanifestrevlog(manifestrevlog):
2265 class excludedmanifestrevlog(manifestrevlog):
2263 """Stand-in for excluded treemanifest revlogs.
2266 """Stand-in for excluded treemanifest revlogs.
2264
2267
2265 When narrowing is active on a treemanifest repository, we'll have
2268 When narrowing is active on a treemanifest repository, we'll have
2266 references to directories we can't see due to the revlog being
2269 references to directories we can't see due to the revlog being
2267 skipped. This class exists to conform to the manifestrevlog
2270 skipped. This class exists to conform to the manifestrevlog
2268 interface for those directories and proactively prevent writes to
2271 interface for those directories and proactively prevent writes to
2269 outside the narrowspec.
2272 outside the narrowspec.
2270 """
2273 """
2271
2274
2272 def __init__(self, dir):
2275 def __init__(self, dir):
2273 self._dir = dir
2276 self._dir = dir
2274
2277
2275 def __len__(self):
2278 def __len__(self):
2276 raise error.ProgrammingError(
2279 raise error.ProgrammingError(
2277 b'attempt to get length of excluded dir %s' % self._dir
2280 b'attempt to get length of excluded dir %s' % self._dir
2278 )
2281 )
2279
2282
2280 def rev(self, node):
2283 def rev(self, node):
2281 raise error.ProgrammingError(
2284 raise error.ProgrammingError(
2282 b'attempt to get rev from excluded dir %s' % self._dir
2285 b'attempt to get rev from excluded dir %s' % self._dir
2283 )
2286 )
2284
2287
2285 def linkrev(self, node):
2288 def linkrev(self, node):
2286 raise error.ProgrammingError(
2289 raise error.ProgrammingError(
2287 b'attempt to get linkrev from excluded dir %s' % self._dir
2290 b'attempt to get linkrev from excluded dir %s' % self._dir
2288 )
2291 )
2289
2292
2290 def node(self, rev):
2293 def node(self, rev):
2291 raise error.ProgrammingError(
2294 raise error.ProgrammingError(
2292 b'attempt to get node from excluded dir %s' % self._dir
2295 b'attempt to get node from excluded dir %s' % self._dir
2293 )
2296 )
2294
2297
2295 def add(self, *args, **kwargs):
2298 def add(self, *args, **kwargs):
2296 # We should never write entries in dirlogs outside the narrow clone.
2299 # We should never write entries in dirlogs outside the narrow clone.
2297 # However, the method still gets called from writesubtree() in
2300 # However, the method still gets called from writesubtree() in
2298 # _addtree(), so we need to handle it. We should possibly make that
2301 # _addtree(), so we need to handle it. We should possibly make that
2299 # avoid calling add() with a clean manifest (_dirty is always False
2302 # avoid calling add() with a clean manifest (_dirty is always False
2300 # in excludeddir instances).
2303 # in excludeddir instances).
2301 pass
2304 pass
General Comments 0
You need to be logged in to leave comments. Login now