##// END OF EJS Templates
manifest: remove `.new()` from the interface...
Augie Fackler -
r44734:c86256bd default
parent child Browse files
Show More
@@ -1,1986 +1,1978
1 # repository.py - Interfaces and base classes for repositories and peers.
1 # repository.py - Interfaces and base classes for repositories and peers.
2 #
2 #
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from ..i18n import _
10 from ..i18n import _
11 from .. import error
11 from .. import error
12 from . import util as interfaceutil
12 from . import util as interfaceutil
13
13
14 # When narrowing is finalized and no longer subject to format changes,
14 # When narrowing is finalized and no longer subject to format changes,
15 # we should move this to just "narrow" or similar.
15 # we should move this to just "narrow" or similar.
16 NARROW_REQUIREMENT = b'narrowhg-experimental'
16 NARROW_REQUIREMENT = b'narrowhg-experimental'
17
17
18 # Local repository feature string.
18 # Local repository feature string.
19
19
20 # Revlogs are being used for file storage.
20 # Revlogs are being used for file storage.
21 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
21 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
22 # The storage part of the repository is shared from an external source.
22 # The storage part of the repository is shared from an external source.
23 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
23 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
24 # LFS supported for backing file storage.
24 # LFS supported for backing file storage.
25 REPO_FEATURE_LFS = b'lfs'
25 REPO_FEATURE_LFS = b'lfs'
26 # Repository supports being stream cloned.
26 # Repository supports being stream cloned.
27 REPO_FEATURE_STREAM_CLONE = b'streamclone'
27 REPO_FEATURE_STREAM_CLONE = b'streamclone'
28 # Files storage may lack data for all ancestors.
28 # Files storage may lack data for all ancestors.
29 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
29 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
30
30
31 REVISION_FLAG_CENSORED = 1 << 15
31 REVISION_FLAG_CENSORED = 1 << 15
32 REVISION_FLAG_ELLIPSIS = 1 << 14
32 REVISION_FLAG_ELLIPSIS = 1 << 14
33 REVISION_FLAG_EXTSTORED = 1 << 13
33 REVISION_FLAG_EXTSTORED = 1 << 13
34 REVISION_FLAG_SIDEDATA = 1 << 12
34 REVISION_FLAG_SIDEDATA = 1 << 12
35
35
36 REVISION_FLAGS_KNOWN = (
36 REVISION_FLAGS_KNOWN = (
37 REVISION_FLAG_CENSORED
37 REVISION_FLAG_CENSORED
38 | REVISION_FLAG_ELLIPSIS
38 | REVISION_FLAG_ELLIPSIS
39 | REVISION_FLAG_EXTSTORED
39 | REVISION_FLAG_EXTSTORED
40 | REVISION_FLAG_SIDEDATA
40 | REVISION_FLAG_SIDEDATA
41 )
41 )
42
42
43 CG_DELTAMODE_STD = b'default'
43 CG_DELTAMODE_STD = b'default'
44 CG_DELTAMODE_PREV = b'previous'
44 CG_DELTAMODE_PREV = b'previous'
45 CG_DELTAMODE_FULL = b'fulltext'
45 CG_DELTAMODE_FULL = b'fulltext'
46 CG_DELTAMODE_P1 = b'p1'
46 CG_DELTAMODE_P1 = b'p1'
47
47
48
48
49 class ipeerconnection(interfaceutil.Interface):
49 class ipeerconnection(interfaceutil.Interface):
50 """Represents a "connection" to a repository.
50 """Represents a "connection" to a repository.
51
51
52 This is the base interface for representing a connection to a repository.
52 This is the base interface for representing a connection to a repository.
53 It holds basic properties and methods applicable to all peer types.
53 It holds basic properties and methods applicable to all peer types.
54
54
55 This is not a complete interface definition and should not be used
55 This is not a complete interface definition and should not be used
56 outside of this module.
56 outside of this module.
57 """
57 """
58
58
59 ui = interfaceutil.Attribute("""ui.ui instance""")
59 ui = interfaceutil.Attribute("""ui.ui instance""")
60
60
61 def url():
61 def url():
62 """Returns a URL string representing this peer.
62 """Returns a URL string representing this peer.
63
63
64 Currently, implementations expose the raw URL used to construct the
64 Currently, implementations expose the raw URL used to construct the
65 instance. It may contain credentials as part of the URL. The
65 instance. It may contain credentials as part of the URL. The
66 expectations of the value aren't well-defined and this could lead to
66 expectations of the value aren't well-defined and this could lead to
67 data leakage.
67 data leakage.
68
68
69 TODO audit/clean consumers and more clearly define the contents of this
69 TODO audit/clean consumers and more clearly define the contents of this
70 value.
70 value.
71 """
71 """
72
72
73 def local():
73 def local():
74 """Returns a local repository instance.
74 """Returns a local repository instance.
75
75
76 If the peer represents a local repository, returns an object that
76 If the peer represents a local repository, returns an object that
77 can be used to interface with it. Otherwise returns ``None``.
77 can be used to interface with it. Otherwise returns ``None``.
78 """
78 """
79
79
80 def peer():
80 def peer():
81 """Returns an object conforming to this interface.
81 """Returns an object conforming to this interface.
82
82
83 Most implementations will ``return self``.
83 Most implementations will ``return self``.
84 """
84 """
85
85
86 def canpush():
86 def canpush():
87 """Returns a boolean indicating if this peer can be pushed to."""
87 """Returns a boolean indicating if this peer can be pushed to."""
88
88
89 def close():
89 def close():
90 """Close the connection to this peer.
90 """Close the connection to this peer.
91
91
92 This is called when the peer will no longer be used. Resources
92 This is called when the peer will no longer be used. Resources
93 associated with the peer should be cleaned up.
93 associated with the peer should be cleaned up.
94 """
94 """
95
95
96
96
97 class ipeercapabilities(interfaceutil.Interface):
97 class ipeercapabilities(interfaceutil.Interface):
98 """Peer sub-interface related to capabilities."""
98 """Peer sub-interface related to capabilities."""
99
99
100 def capable(name):
100 def capable(name):
101 """Determine support for a named capability.
101 """Determine support for a named capability.
102
102
103 Returns ``False`` if capability not supported.
103 Returns ``False`` if capability not supported.
104
104
105 Returns ``True`` if boolean capability is supported. Returns a string
105 Returns ``True`` if boolean capability is supported. Returns a string
106 if capability support is non-boolean.
106 if capability support is non-boolean.
107
107
108 Capability strings may or may not map to wire protocol capabilities.
108 Capability strings may or may not map to wire protocol capabilities.
109 """
109 """
110
110
111 def requirecap(name, purpose):
111 def requirecap(name, purpose):
112 """Require a capability to be present.
112 """Require a capability to be present.
113
113
114 Raises a ``CapabilityError`` if the capability isn't present.
114 Raises a ``CapabilityError`` if the capability isn't present.
115 """
115 """
116
116
117
117
118 class ipeercommands(interfaceutil.Interface):
118 class ipeercommands(interfaceutil.Interface):
119 """Client-side interface for communicating over the wire protocol.
119 """Client-side interface for communicating over the wire protocol.
120
120
121 This interface is used as a gateway to the Mercurial wire protocol.
121 This interface is used as a gateway to the Mercurial wire protocol.
122 methods commonly call wire protocol commands of the same name.
122 methods commonly call wire protocol commands of the same name.
123 """
123 """
124
124
125 def branchmap():
125 def branchmap():
126 """Obtain heads in named branches.
126 """Obtain heads in named branches.
127
127
128 Returns a dict mapping branch name to an iterable of nodes that are
128 Returns a dict mapping branch name to an iterable of nodes that are
129 heads on that branch.
129 heads on that branch.
130 """
130 """
131
131
132 def capabilities():
132 def capabilities():
133 """Obtain capabilities of the peer.
133 """Obtain capabilities of the peer.
134
134
135 Returns a set of string capabilities.
135 Returns a set of string capabilities.
136 """
136 """
137
137
138 def clonebundles():
138 def clonebundles():
139 """Obtains the clone bundles manifest for the repo.
139 """Obtains the clone bundles manifest for the repo.
140
140
141 Returns the manifest as unparsed bytes.
141 Returns the manifest as unparsed bytes.
142 """
142 """
143
143
144 def debugwireargs(one, two, three=None, four=None, five=None):
144 def debugwireargs(one, two, three=None, four=None, five=None):
145 """Used to facilitate debugging of arguments passed over the wire."""
145 """Used to facilitate debugging of arguments passed over the wire."""
146
146
147 def getbundle(source, **kwargs):
147 def getbundle(source, **kwargs):
148 """Obtain remote repository data as a bundle.
148 """Obtain remote repository data as a bundle.
149
149
150 This command is how the bulk of repository data is transferred from
150 This command is how the bulk of repository data is transferred from
151 the peer to the local repository
151 the peer to the local repository
152
152
153 Returns a generator of bundle data.
153 Returns a generator of bundle data.
154 """
154 """
155
155
156 def heads():
156 def heads():
157 """Determine all known head revisions in the peer.
157 """Determine all known head revisions in the peer.
158
158
159 Returns an iterable of binary nodes.
159 Returns an iterable of binary nodes.
160 """
160 """
161
161
162 def known(nodes):
162 def known(nodes):
163 """Determine whether multiple nodes are known.
163 """Determine whether multiple nodes are known.
164
164
165 Accepts an iterable of nodes whose presence to check for.
165 Accepts an iterable of nodes whose presence to check for.
166
166
167 Returns an iterable of booleans indicating of the corresponding node
167 Returns an iterable of booleans indicating of the corresponding node
168 at that index is known to the peer.
168 at that index is known to the peer.
169 """
169 """
170
170
171 def listkeys(namespace):
171 def listkeys(namespace):
172 """Obtain all keys in a pushkey namespace.
172 """Obtain all keys in a pushkey namespace.
173
173
174 Returns an iterable of key names.
174 Returns an iterable of key names.
175 """
175 """
176
176
177 def lookup(key):
177 def lookup(key):
178 """Resolve a value to a known revision.
178 """Resolve a value to a known revision.
179
179
180 Returns a binary node of the resolved revision on success.
180 Returns a binary node of the resolved revision on success.
181 """
181 """
182
182
183 def pushkey(namespace, key, old, new):
183 def pushkey(namespace, key, old, new):
184 """Set a value using the ``pushkey`` protocol.
184 """Set a value using the ``pushkey`` protocol.
185
185
186 Arguments correspond to the pushkey namespace and key to operate on and
186 Arguments correspond to the pushkey namespace and key to operate on and
187 the old and new values for that key.
187 the old and new values for that key.
188
188
189 Returns a string with the peer result. The value inside varies by the
189 Returns a string with the peer result. The value inside varies by the
190 namespace.
190 namespace.
191 """
191 """
192
192
193 def stream_out():
193 def stream_out():
194 """Obtain streaming clone data.
194 """Obtain streaming clone data.
195
195
196 Successful result should be a generator of data chunks.
196 Successful result should be a generator of data chunks.
197 """
197 """
198
198
199 def unbundle(bundle, heads, url):
199 def unbundle(bundle, heads, url):
200 """Transfer repository data to the peer.
200 """Transfer repository data to the peer.
201
201
202 This is how the bulk of data during a push is transferred.
202 This is how the bulk of data during a push is transferred.
203
203
204 Returns the integer number of heads added to the peer.
204 Returns the integer number of heads added to the peer.
205 """
205 """
206
206
207
207
208 class ipeerlegacycommands(interfaceutil.Interface):
208 class ipeerlegacycommands(interfaceutil.Interface):
209 """Interface for implementing support for legacy wire protocol commands.
209 """Interface for implementing support for legacy wire protocol commands.
210
210
211 Wire protocol commands transition to legacy status when they are no longer
211 Wire protocol commands transition to legacy status when they are no longer
212 used by modern clients. To facilitate identifying which commands are
212 used by modern clients. To facilitate identifying which commands are
213 legacy, the interfaces are split.
213 legacy, the interfaces are split.
214 """
214 """
215
215
216 def between(pairs):
216 def between(pairs):
217 """Obtain nodes between pairs of nodes.
217 """Obtain nodes between pairs of nodes.
218
218
219 ``pairs`` is an iterable of node pairs.
219 ``pairs`` is an iterable of node pairs.
220
220
221 Returns an iterable of iterables of nodes corresponding to each
221 Returns an iterable of iterables of nodes corresponding to each
222 requested pair.
222 requested pair.
223 """
223 """
224
224
225 def branches(nodes):
225 def branches(nodes):
226 """Obtain ancestor changesets of specific nodes back to a branch point.
226 """Obtain ancestor changesets of specific nodes back to a branch point.
227
227
228 For each requested node, the peer finds the first ancestor node that is
228 For each requested node, the peer finds the first ancestor node that is
229 a DAG root or is a merge.
229 a DAG root or is a merge.
230
230
231 Returns an iterable of iterables with the resolved values for each node.
231 Returns an iterable of iterables with the resolved values for each node.
232 """
232 """
233
233
234 def changegroup(nodes, source):
234 def changegroup(nodes, source):
235 """Obtain a changegroup with data for descendants of specified nodes."""
235 """Obtain a changegroup with data for descendants of specified nodes."""
236
236
237 def changegroupsubset(bases, heads, source):
237 def changegroupsubset(bases, heads, source):
238 pass
238 pass
239
239
240
240
241 class ipeercommandexecutor(interfaceutil.Interface):
241 class ipeercommandexecutor(interfaceutil.Interface):
242 """Represents a mechanism to execute remote commands.
242 """Represents a mechanism to execute remote commands.
243
243
244 This is the primary interface for requesting that wire protocol commands
244 This is the primary interface for requesting that wire protocol commands
245 be executed. Instances of this interface are active in a context manager
245 be executed. Instances of this interface are active in a context manager
246 and have a well-defined lifetime. When the context manager exits, all
246 and have a well-defined lifetime. When the context manager exits, all
247 outstanding requests are waited on.
247 outstanding requests are waited on.
248 """
248 """
249
249
250 def callcommand(name, args):
250 def callcommand(name, args):
251 """Request that a named command be executed.
251 """Request that a named command be executed.
252
252
253 Receives the command name and a dictionary of command arguments.
253 Receives the command name and a dictionary of command arguments.
254
254
255 Returns a ``concurrent.futures.Future`` that will resolve to the
255 Returns a ``concurrent.futures.Future`` that will resolve to the
256 result of that command request. That exact value is left up to
256 result of that command request. That exact value is left up to
257 the implementation and possibly varies by command.
257 the implementation and possibly varies by command.
258
258
259 Not all commands can coexist with other commands in an executor
259 Not all commands can coexist with other commands in an executor
260 instance: it depends on the underlying wire protocol transport being
260 instance: it depends on the underlying wire protocol transport being
261 used and the command itself.
261 used and the command itself.
262
262
263 Implementations MAY call ``sendcommands()`` automatically if the
263 Implementations MAY call ``sendcommands()`` automatically if the
264 requested command can not coexist with other commands in this executor.
264 requested command can not coexist with other commands in this executor.
265
265
266 Implementations MAY call ``sendcommands()`` automatically when the
266 Implementations MAY call ``sendcommands()`` automatically when the
267 future's ``result()`` is called. So, consumers using multiple
267 future's ``result()`` is called. So, consumers using multiple
268 commands with an executor MUST ensure that ``result()`` is not called
268 commands with an executor MUST ensure that ``result()`` is not called
269 until all command requests have been issued.
269 until all command requests have been issued.
270 """
270 """
271
271
272 def sendcommands():
272 def sendcommands():
273 """Trigger submission of queued command requests.
273 """Trigger submission of queued command requests.
274
274
275 Not all transports submit commands as soon as they are requested to
275 Not all transports submit commands as soon as they are requested to
276 run. When called, this method forces queued command requests to be
276 run. When called, this method forces queued command requests to be
277 issued. It will no-op if all commands have already been sent.
277 issued. It will no-op if all commands have already been sent.
278
278
279 When called, no more new commands may be issued with this executor.
279 When called, no more new commands may be issued with this executor.
280 """
280 """
281
281
282 def close():
282 def close():
283 """Signal that this command request is finished.
283 """Signal that this command request is finished.
284
284
285 When called, no more new commands may be issued. All outstanding
285 When called, no more new commands may be issued. All outstanding
286 commands that have previously been issued are waited on before
286 commands that have previously been issued are waited on before
287 returning. This not only includes waiting for the futures to resolve,
287 returning. This not only includes waiting for the futures to resolve,
288 but also waiting for all response data to arrive. In other words,
288 but also waiting for all response data to arrive. In other words,
289 calling this waits for all on-wire state for issued command requests
289 calling this waits for all on-wire state for issued command requests
290 to finish.
290 to finish.
291
291
292 When used as a context manager, this method is called when exiting the
292 When used as a context manager, this method is called when exiting the
293 context manager.
293 context manager.
294
294
295 This method may call ``sendcommands()`` if there are buffered commands.
295 This method may call ``sendcommands()`` if there are buffered commands.
296 """
296 """
297
297
298
298
299 class ipeerrequests(interfaceutil.Interface):
299 class ipeerrequests(interfaceutil.Interface):
300 """Interface for executing commands on a peer."""
300 """Interface for executing commands on a peer."""
301
301
302 limitedarguments = interfaceutil.Attribute(
302 limitedarguments = interfaceutil.Attribute(
303 """True if the peer cannot receive large argument value for commands."""
303 """True if the peer cannot receive large argument value for commands."""
304 )
304 )
305
305
306 def commandexecutor():
306 def commandexecutor():
307 """A context manager that resolves to an ipeercommandexecutor.
307 """A context manager that resolves to an ipeercommandexecutor.
308
308
309 The object this resolves to can be used to issue command requests
309 The object this resolves to can be used to issue command requests
310 to the peer.
310 to the peer.
311
311
312 Callers should call its ``callcommand`` method to issue command
312 Callers should call its ``callcommand`` method to issue command
313 requests.
313 requests.
314
314
315 A new executor should be obtained for each distinct set of commands
315 A new executor should be obtained for each distinct set of commands
316 (possibly just a single command) that the consumer wants to execute
316 (possibly just a single command) that the consumer wants to execute
317 as part of a single operation or round trip. This is because some
317 as part of a single operation or round trip. This is because some
318 peers are half-duplex and/or don't support persistent connections.
318 peers are half-duplex and/or don't support persistent connections.
319 e.g. in the case of HTTP peers, commands sent to an executor represent
319 e.g. in the case of HTTP peers, commands sent to an executor represent
320 a single HTTP request. While some peers may support multiple command
320 a single HTTP request. While some peers may support multiple command
321 sends over the wire per executor, consumers need to code to the least
321 sends over the wire per executor, consumers need to code to the least
322 capable peer. So it should be assumed that command executors buffer
322 capable peer. So it should be assumed that command executors buffer
323 called commands until they are told to send them and that each
323 called commands until they are told to send them and that each
324 command executor could result in a new connection or wire-level request
324 command executor could result in a new connection or wire-level request
325 being issued.
325 being issued.
326 """
326 """
327
327
328
328
329 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
329 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
330 """Unified interface for peer repositories.
330 """Unified interface for peer repositories.
331
331
332 All peer instances must conform to this interface.
332 All peer instances must conform to this interface.
333 """
333 """
334
334
335
335
336 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
336 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
337 """Unified peer interface for wire protocol version 2 peers."""
337 """Unified peer interface for wire protocol version 2 peers."""
338
338
339 apidescriptor = interfaceutil.Attribute(
339 apidescriptor = interfaceutil.Attribute(
340 """Data structure holding description of server API."""
340 """Data structure holding description of server API."""
341 )
341 )
342
342
343
343
344 @interfaceutil.implementer(ipeerbase)
344 @interfaceutil.implementer(ipeerbase)
345 class peer(object):
345 class peer(object):
346 """Base class for peer repositories."""
346 """Base class for peer repositories."""
347
347
348 limitedarguments = False
348 limitedarguments = False
349
349
350 def capable(self, name):
350 def capable(self, name):
351 caps = self.capabilities()
351 caps = self.capabilities()
352 if name in caps:
352 if name in caps:
353 return True
353 return True
354
354
355 name = b'%s=' % name
355 name = b'%s=' % name
356 for cap in caps:
356 for cap in caps:
357 if cap.startswith(name):
357 if cap.startswith(name):
358 return cap[len(name) :]
358 return cap[len(name) :]
359
359
360 return False
360 return False
361
361
362 def requirecap(self, name, purpose):
362 def requirecap(self, name, purpose):
363 if self.capable(name):
363 if self.capable(name):
364 return
364 return
365
365
366 raise error.CapabilityError(
366 raise error.CapabilityError(
367 _(
367 _(
368 b'cannot %s; remote repository does not support the '
368 b'cannot %s; remote repository does not support the '
369 b'\'%s\' capability'
369 b'\'%s\' capability'
370 )
370 )
371 % (purpose, name)
371 % (purpose, name)
372 )
372 )
373
373
374
374
375 class iverifyproblem(interfaceutil.Interface):
375 class iverifyproblem(interfaceutil.Interface):
376 """Represents a problem with the integrity of the repository.
376 """Represents a problem with the integrity of the repository.
377
377
378 Instances of this interface are emitted to describe an integrity issue
378 Instances of this interface are emitted to describe an integrity issue
379 with a repository (e.g. corrupt storage, missing data, etc).
379 with a repository (e.g. corrupt storage, missing data, etc).
380
380
381 Instances are essentially messages associated with severity.
381 Instances are essentially messages associated with severity.
382 """
382 """
383
383
384 warning = interfaceutil.Attribute(
384 warning = interfaceutil.Attribute(
385 """Message indicating a non-fatal problem."""
385 """Message indicating a non-fatal problem."""
386 )
386 )
387
387
388 error = interfaceutil.Attribute("""Message indicating a fatal problem.""")
388 error = interfaceutil.Attribute("""Message indicating a fatal problem.""")
389
389
390 node = interfaceutil.Attribute(
390 node = interfaceutil.Attribute(
391 """Revision encountering the problem.
391 """Revision encountering the problem.
392
392
393 ``None`` means the problem doesn't apply to a single revision.
393 ``None`` means the problem doesn't apply to a single revision.
394 """
394 """
395 )
395 )
396
396
397
397
398 class irevisiondelta(interfaceutil.Interface):
398 class irevisiondelta(interfaceutil.Interface):
399 """Represents a delta between one revision and another.
399 """Represents a delta between one revision and another.
400
400
401 Instances convey enough information to allow a revision to be exchanged
401 Instances convey enough information to allow a revision to be exchanged
402 with another repository.
402 with another repository.
403
403
404 Instances represent the fulltext revision data or a delta against
404 Instances represent the fulltext revision data or a delta against
405 another revision. Therefore the ``revision`` and ``delta`` attributes
405 another revision. Therefore the ``revision`` and ``delta`` attributes
406 are mutually exclusive.
406 are mutually exclusive.
407
407
408 Typically used for changegroup generation.
408 Typically used for changegroup generation.
409 """
409 """
410
410
411 node = interfaceutil.Attribute("""20 byte node of this revision.""")
411 node = interfaceutil.Attribute("""20 byte node of this revision.""")
412
412
413 p1node = interfaceutil.Attribute(
413 p1node = interfaceutil.Attribute(
414 """20 byte node of 1st parent of this revision."""
414 """20 byte node of 1st parent of this revision."""
415 )
415 )
416
416
417 p2node = interfaceutil.Attribute(
417 p2node = interfaceutil.Attribute(
418 """20 byte node of 2nd parent of this revision."""
418 """20 byte node of 2nd parent of this revision."""
419 )
419 )
420
420
421 linknode = interfaceutil.Attribute(
421 linknode = interfaceutil.Attribute(
422 """20 byte node of the changelog revision this node is linked to."""
422 """20 byte node of the changelog revision this node is linked to."""
423 )
423 )
424
424
425 flags = interfaceutil.Attribute(
425 flags = interfaceutil.Attribute(
426 """2 bytes of integer flags that apply to this revision.
426 """2 bytes of integer flags that apply to this revision.
427
427
428 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
428 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
429 """
429 """
430 )
430 )
431
431
432 basenode = interfaceutil.Attribute(
432 basenode = interfaceutil.Attribute(
433 """20 byte node of the revision this data is a delta against.
433 """20 byte node of the revision this data is a delta against.
434
434
435 ``nullid`` indicates that the revision is a full revision and not
435 ``nullid`` indicates that the revision is a full revision and not
436 a delta.
436 a delta.
437 """
437 """
438 )
438 )
439
439
440 baserevisionsize = interfaceutil.Attribute(
440 baserevisionsize = interfaceutil.Attribute(
441 """Size of base revision this delta is against.
441 """Size of base revision this delta is against.
442
442
443 May be ``None`` if ``basenode`` is ``nullid``.
443 May be ``None`` if ``basenode`` is ``nullid``.
444 """
444 """
445 )
445 )
446
446
447 revision = interfaceutil.Attribute(
447 revision = interfaceutil.Attribute(
448 """Raw fulltext of revision data for this node."""
448 """Raw fulltext of revision data for this node."""
449 )
449 )
450
450
451 delta = interfaceutil.Attribute(
451 delta = interfaceutil.Attribute(
452 """Delta between ``basenode`` and ``node``.
452 """Delta between ``basenode`` and ``node``.
453
453
454 Stored in the bdiff delta format.
454 Stored in the bdiff delta format.
455 """
455 """
456 )
456 )
457
457
458
458
459 class ifilerevisionssequence(interfaceutil.Interface):
459 class ifilerevisionssequence(interfaceutil.Interface):
460 """Contains index data for all revisions of a file.
460 """Contains index data for all revisions of a file.
461
461
462 Types implementing this behave like lists of tuples. The index
462 Types implementing this behave like lists of tuples. The index
463 in the list corresponds to the revision number. The values contain
463 in the list corresponds to the revision number. The values contain
464 index metadata.
464 index metadata.
465
465
466 The *null* revision (revision number -1) is always the last item
466 The *null* revision (revision number -1) is always the last item
467 in the index.
467 in the index.
468 """
468 """
469
469
470 def __len__():
470 def __len__():
471 """The total number of revisions."""
471 """The total number of revisions."""
472
472
473 def __getitem__(rev):
473 def __getitem__(rev):
474 """Returns the object having a specific revision number.
474 """Returns the object having a specific revision number.
475
475
476 Returns an 8-tuple with the following fields:
476 Returns an 8-tuple with the following fields:
477
477
478 offset+flags
478 offset+flags
479 Contains the offset and flags for the revision. 64-bit unsigned
479 Contains the offset and flags for the revision. 64-bit unsigned
480 integer where first 6 bytes are the offset and the next 2 bytes
480 integer where first 6 bytes are the offset and the next 2 bytes
481 are flags. The offset can be 0 if it is not used by the store.
481 are flags. The offset can be 0 if it is not used by the store.
482 compressed size
482 compressed size
483 Size of the revision data in the store. It can be 0 if it isn't
483 Size of the revision data in the store. It can be 0 if it isn't
484 needed by the store.
484 needed by the store.
485 uncompressed size
485 uncompressed size
486 Fulltext size. It can be 0 if it isn't needed by the store.
486 Fulltext size. It can be 0 if it isn't needed by the store.
487 base revision
487 base revision
488 Revision number of revision the delta for storage is encoded
488 Revision number of revision the delta for storage is encoded
489 against. -1 indicates not encoded against a base revision.
489 against. -1 indicates not encoded against a base revision.
490 link revision
490 link revision
491 Revision number of changelog revision this entry is related to.
491 Revision number of changelog revision this entry is related to.
492 p1 revision
492 p1 revision
493 Revision number of 1st parent. -1 if no 1st parent.
493 Revision number of 1st parent. -1 if no 1st parent.
494 p2 revision
494 p2 revision
495 Revision number of 2nd parent. -1 if no 1st parent.
495 Revision number of 2nd parent. -1 if no 1st parent.
496 node
496 node
497 Binary node value for this revision number.
497 Binary node value for this revision number.
498
498
499 Negative values should index off the end of the sequence. ``-1``
499 Negative values should index off the end of the sequence. ``-1``
500 should return the null revision. ``-2`` should return the most
500 should return the null revision. ``-2`` should return the most
501 recent revision.
501 recent revision.
502 """
502 """
503
503
504 def __contains__(rev):
504 def __contains__(rev):
505 """Whether a revision number exists."""
505 """Whether a revision number exists."""
506
506
507 def insert(self, i, entry):
507 def insert(self, i, entry):
508 """Add an item to the index at specific revision."""
508 """Add an item to the index at specific revision."""
509
509
510
510
511 class ifileindex(interfaceutil.Interface):
511 class ifileindex(interfaceutil.Interface):
512 """Storage interface for index data of a single file.
512 """Storage interface for index data of a single file.
513
513
514 File storage data is divided into index metadata and data storage.
514 File storage data is divided into index metadata and data storage.
515 This interface defines the index portion of the interface.
515 This interface defines the index portion of the interface.
516
516
517 The index logically consists of:
517 The index logically consists of:
518
518
519 * A mapping between revision numbers and nodes.
519 * A mapping between revision numbers and nodes.
520 * DAG data (storing and querying the relationship between nodes).
520 * DAG data (storing and querying the relationship between nodes).
521 * Metadata to facilitate storage.
521 * Metadata to facilitate storage.
522 """
522 """
523
523
524 def __len__():
524 def __len__():
525 """Obtain the number of revisions stored for this file."""
525 """Obtain the number of revisions stored for this file."""
526
526
527 def __iter__():
527 def __iter__():
528 """Iterate over revision numbers for this file."""
528 """Iterate over revision numbers for this file."""
529
529
530 def hasnode(node):
530 def hasnode(node):
531 """Returns a bool indicating if a node is known to this store.
531 """Returns a bool indicating if a node is known to this store.
532
532
533 Implementations must only return True for full, binary node values:
533 Implementations must only return True for full, binary node values:
534 hex nodes, revision numbers, and partial node matches must be
534 hex nodes, revision numbers, and partial node matches must be
535 rejected.
535 rejected.
536
536
537 The null node is never present.
537 The null node is never present.
538 """
538 """
539
539
540 def revs(start=0, stop=None):
540 def revs(start=0, stop=None):
541 """Iterate over revision numbers for this file, with control."""
541 """Iterate over revision numbers for this file, with control."""
542
542
543 def parents(node):
543 def parents(node):
544 """Returns a 2-tuple of parent nodes for a revision.
544 """Returns a 2-tuple of parent nodes for a revision.
545
545
546 Values will be ``nullid`` if the parent is empty.
546 Values will be ``nullid`` if the parent is empty.
547 """
547 """
548
548
549 def parentrevs(rev):
549 def parentrevs(rev):
550 """Like parents() but operates on revision numbers."""
550 """Like parents() but operates on revision numbers."""
551
551
552 def rev(node):
552 def rev(node):
553 """Obtain the revision number given a node.
553 """Obtain the revision number given a node.
554
554
555 Raises ``error.LookupError`` if the node is not known.
555 Raises ``error.LookupError`` if the node is not known.
556 """
556 """
557
557
558 def node(rev):
558 def node(rev):
559 """Obtain the node value given a revision number.
559 """Obtain the node value given a revision number.
560
560
561 Raises ``IndexError`` if the node is not known.
561 Raises ``IndexError`` if the node is not known.
562 """
562 """
563
563
564 def lookup(node):
564 def lookup(node):
565 """Attempt to resolve a value to a node.
565 """Attempt to resolve a value to a node.
566
566
567 Value can be a binary node, hex node, revision number, or a string
567 Value can be a binary node, hex node, revision number, or a string
568 that can be converted to an integer.
568 that can be converted to an integer.
569
569
570 Raises ``error.LookupError`` if a node could not be resolved.
570 Raises ``error.LookupError`` if a node could not be resolved.
571 """
571 """
572
572
573 def linkrev(rev):
573 def linkrev(rev):
574 """Obtain the changeset revision number a revision is linked to."""
574 """Obtain the changeset revision number a revision is linked to."""
575
575
576 def iscensored(rev):
576 def iscensored(rev):
577 """Return whether a revision's content has been censored."""
577 """Return whether a revision's content has been censored."""
578
578
579 def commonancestorsheads(node1, node2):
579 def commonancestorsheads(node1, node2):
580 """Obtain an iterable of nodes containing heads of common ancestors.
580 """Obtain an iterable of nodes containing heads of common ancestors.
581
581
582 See ``ancestor.commonancestorsheads()``.
582 See ``ancestor.commonancestorsheads()``.
583 """
583 """
584
584
585 def descendants(revs):
585 def descendants(revs):
586 """Obtain descendant revision numbers for a set of revision numbers.
586 """Obtain descendant revision numbers for a set of revision numbers.
587
587
588 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
588 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
589 """
589 """
590
590
591 def heads(start=None, stop=None):
591 def heads(start=None, stop=None):
592 """Obtain a list of nodes that are DAG heads, with control.
592 """Obtain a list of nodes that are DAG heads, with control.
593
593
594 The set of revisions examined can be limited by specifying
594 The set of revisions examined can be limited by specifying
595 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
595 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
596 iterable of nodes. DAG traversal starts at earlier revision
596 iterable of nodes. DAG traversal starts at earlier revision
597 ``start`` and iterates forward until any node in ``stop`` is
597 ``start`` and iterates forward until any node in ``stop`` is
598 encountered.
598 encountered.
599 """
599 """
600
600
601 def children(node):
601 def children(node):
602 """Obtain nodes that are children of a node.
602 """Obtain nodes that are children of a node.
603
603
604 Returns a list of nodes.
604 Returns a list of nodes.
605 """
605 """
606
606
607
607
608 class ifiledata(interfaceutil.Interface):
608 class ifiledata(interfaceutil.Interface):
609 """Storage interface for data storage of a specific file.
609 """Storage interface for data storage of a specific file.
610
610
611 This complements ``ifileindex`` and provides an interface for accessing
611 This complements ``ifileindex`` and provides an interface for accessing
612 data for a tracked file.
612 data for a tracked file.
613 """
613 """
614
614
615 def size(rev):
615 def size(rev):
616 """Obtain the fulltext size of file data.
616 """Obtain the fulltext size of file data.
617
617
618 Any metadata is excluded from size measurements.
618 Any metadata is excluded from size measurements.
619 """
619 """
620
620
621 def revision(node, raw=False):
621 def revision(node, raw=False):
622 """"Obtain fulltext data for a node.
622 """"Obtain fulltext data for a node.
623
623
624 By default, any storage transformations are applied before the data
624 By default, any storage transformations are applied before the data
625 is returned. If ``raw`` is True, non-raw storage transformations
625 is returned. If ``raw`` is True, non-raw storage transformations
626 are not applied.
626 are not applied.
627
627
628 The fulltext data may contain a header containing metadata. Most
628 The fulltext data may contain a header containing metadata. Most
629 consumers should use ``read()`` to obtain the actual file data.
629 consumers should use ``read()`` to obtain the actual file data.
630 """
630 """
631
631
632 def rawdata(node):
632 def rawdata(node):
633 """Obtain raw data for a node.
633 """Obtain raw data for a node.
634 """
634 """
635
635
636 def read(node):
636 def read(node):
637 """Resolve file fulltext data.
637 """Resolve file fulltext data.
638
638
639 This is similar to ``revision()`` except any metadata in the data
639 This is similar to ``revision()`` except any metadata in the data
640 headers is stripped.
640 headers is stripped.
641 """
641 """
642
642
643 def renamed(node):
643 def renamed(node):
644 """Obtain copy metadata for a node.
644 """Obtain copy metadata for a node.
645
645
646 Returns ``False`` if no copy metadata is stored or a 2-tuple of
646 Returns ``False`` if no copy metadata is stored or a 2-tuple of
647 (path, node) from which this revision was copied.
647 (path, node) from which this revision was copied.
648 """
648 """
649
649
650 def cmp(node, fulltext):
650 def cmp(node, fulltext):
651 """Compare fulltext to another revision.
651 """Compare fulltext to another revision.
652
652
653 Returns True if the fulltext is different from what is stored.
653 Returns True if the fulltext is different from what is stored.
654
654
655 This takes copy metadata into account.
655 This takes copy metadata into account.
656
656
657 TODO better document the copy metadata and censoring logic.
657 TODO better document the copy metadata and censoring logic.
658 """
658 """
659
659
660 def emitrevisions(
660 def emitrevisions(
661 nodes,
661 nodes,
662 nodesorder=None,
662 nodesorder=None,
663 revisiondata=False,
663 revisiondata=False,
664 assumehaveparentrevisions=False,
664 assumehaveparentrevisions=False,
665 deltamode=CG_DELTAMODE_STD,
665 deltamode=CG_DELTAMODE_STD,
666 ):
666 ):
667 """Produce ``irevisiondelta`` for revisions.
667 """Produce ``irevisiondelta`` for revisions.
668
668
669 Given an iterable of nodes, emits objects conforming to the
669 Given an iterable of nodes, emits objects conforming to the
670 ``irevisiondelta`` interface that describe revisions in storage.
670 ``irevisiondelta`` interface that describe revisions in storage.
671
671
672 This method is a generator.
672 This method is a generator.
673
673
674 The input nodes may be unordered. Implementations must ensure that a
674 The input nodes may be unordered. Implementations must ensure that a
675 node's parents are emitted before the node itself. Transitively, this
675 node's parents are emitted before the node itself. Transitively, this
676 means that a node may only be emitted once all its ancestors in
676 means that a node may only be emitted once all its ancestors in
677 ``nodes`` have also been emitted.
677 ``nodes`` have also been emitted.
678
678
679 By default, emits "index" data (the ``node``, ``p1node``, and
679 By default, emits "index" data (the ``node``, ``p1node``, and
680 ``p2node`` attributes). If ``revisiondata`` is set, revision data
680 ``p2node`` attributes). If ``revisiondata`` is set, revision data
681 will also be present on the emitted objects.
681 will also be present on the emitted objects.
682
682
683 With default argument values, implementations can choose to emit
683 With default argument values, implementations can choose to emit
684 either fulltext revision data or a delta. When emitting deltas,
684 either fulltext revision data or a delta. When emitting deltas,
685 implementations must consider whether the delta's base revision
685 implementations must consider whether the delta's base revision
686 fulltext is available to the receiver.
686 fulltext is available to the receiver.
687
687
688 The base revision fulltext is guaranteed to be available if any of
688 The base revision fulltext is guaranteed to be available if any of
689 the following are met:
689 the following are met:
690
690
691 * Its fulltext revision was emitted by this method call.
691 * Its fulltext revision was emitted by this method call.
692 * A delta for that revision was emitted by this method call.
692 * A delta for that revision was emitted by this method call.
693 * ``assumehaveparentrevisions`` is True and the base revision is a
693 * ``assumehaveparentrevisions`` is True and the base revision is a
694 parent of the node.
694 parent of the node.
695
695
696 ``nodesorder`` can be used to control the order that revisions are
696 ``nodesorder`` can be used to control the order that revisions are
697 emitted. By default, revisions can be reordered as long as they are
697 emitted. By default, revisions can be reordered as long as they are
698 in DAG topological order (see above). If the value is ``nodes``,
698 in DAG topological order (see above). If the value is ``nodes``,
699 the iteration order from ``nodes`` should be used. If the value is
699 the iteration order from ``nodes`` should be used. If the value is
700 ``storage``, then the native order from the backing storage layer
700 ``storage``, then the native order from the backing storage layer
701 is used. (Not all storage layers will have strong ordering and behavior
701 is used. (Not all storage layers will have strong ordering and behavior
702 of this mode is storage-dependent.) ``nodes`` ordering can force
702 of this mode is storage-dependent.) ``nodes`` ordering can force
703 revisions to be emitted before their ancestors, so consumers should
703 revisions to be emitted before their ancestors, so consumers should
704 use it with care.
704 use it with care.
705
705
706 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
706 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
707 be set and it is the caller's responsibility to resolve it, if needed.
707 be set and it is the caller's responsibility to resolve it, if needed.
708
708
709 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
709 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
710 all revision data should be emitted as deltas against the revision
710 all revision data should be emitted as deltas against the revision
711 emitted just prior. The initial revision should be a delta against its
711 emitted just prior. The initial revision should be a delta against its
712 1st parent.
712 1st parent.
713 """
713 """
714
714
715
715
716 class ifilemutation(interfaceutil.Interface):
716 class ifilemutation(interfaceutil.Interface):
717 """Storage interface for mutation events of a tracked file."""
717 """Storage interface for mutation events of a tracked file."""
718
718
719 def add(filedata, meta, transaction, linkrev, p1, p2):
719 def add(filedata, meta, transaction, linkrev, p1, p2):
720 """Add a new revision to the store.
720 """Add a new revision to the store.
721
721
722 Takes file data, dictionary of metadata, a transaction, linkrev,
722 Takes file data, dictionary of metadata, a transaction, linkrev,
723 and parent nodes.
723 and parent nodes.
724
724
725 Returns the node that was added.
725 Returns the node that was added.
726
726
727 May no-op if a revision matching the supplied data is already stored.
727 May no-op if a revision matching the supplied data is already stored.
728 """
728 """
729
729
730 def addrevision(
730 def addrevision(
731 revisiondata,
731 revisiondata,
732 transaction,
732 transaction,
733 linkrev,
733 linkrev,
734 p1,
734 p1,
735 p2,
735 p2,
736 node=None,
736 node=None,
737 flags=0,
737 flags=0,
738 cachedelta=None,
738 cachedelta=None,
739 ):
739 ):
740 """Add a new revision to the store.
740 """Add a new revision to the store.
741
741
742 This is similar to ``add()`` except it operates at a lower level.
742 This is similar to ``add()`` except it operates at a lower level.
743
743
744 The data passed in already contains a metadata header, if any.
744 The data passed in already contains a metadata header, if any.
745
745
746 ``node`` and ``flags`` can be used to define the expected node and
746 ``node`` and ``flags`` can be used to define the expected node and
747 the flags to use with storage. ``flags`` is a bitwise value composed
747 the flags to use with storage. ``flags`` is a bitwise value composed
748 of the various ``REVISION_FLAG_*`` constants.
748 of the various ``REVISION_FLAG_*`` constants.
749
749
750 ``add()`` is usually called when adding files from e.g. the working
750 ``add()`` is usually called when adding files from e.g. the working
751 directory. ``addrevision()`` is often called by ``add()`` and for
751 directory. ``addrevision()`` is often called by ``add()`` and for
752 scenarios where revision data has already been computed, such as when
752 scenarios where revision data has already been computed, such as when
753 applying raw data from a peer repo.
753 applying raw data from a peer repo.
754 """
754 """
755
755
756 def addgroup(
756 def addgroup(
757 deltas,
757 deltas,
758 linkmapper,
758 linkmapper,
759 transaction,
759 transaction,
760 addrevisioncb=None,
760 addrevisioncb=None,
761 maybemissingparents=False,
761 maybemissingparents=False,
762 ):
762 ):
763 """Process a series of deltas for storage.
763 """Process a series of deltas for storage.
764
764
765 ``deltas`` is an iterable of 7-tuples of
765 ``deltas`` is an iterable of 7-tuples of
766 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
766 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
767 to add.
767 to add.
768
768
769 The ``delta`` field contains ``mpatch`` data to apply to a base
769 The ``delta`` field contains ``mpatch`` data to apply to a base
770 revision, identified by ``deltabase``. The base node can be
770 revision, identified by ``deltabase``. The base node can be
771 ``nullid``, in which case the header from the delta can be ignored
771 ``nullid``, in which case the header from the delta can be ignored
772 and the delta used as the fulltext.
772 and the delta used as the fulltext.
773
773
774 ``addrevisioncb`` should be called for each node as it is committed.
774 ``addrevisioncb`` should be called for each node as it is committed.
775
775
776 ``maybemissingparents`` is a bool indicating whether the incoming
776 ``maybemissingparents`` is a bool indicating whether the incoming
777 data may reference parents/ancestor revisions that aren't present.
777 data may reference parents/ancestor revisions that aren't present.
778 This flag is set when receiving data into a "shallow" store that
778 This flag is set when receiving data into a "shallow" store that
779 doesn't hold all history.
779 doesn't hold all history.
780
780
781 Returns a list of nodes that were processed. A node will be in the list
781 Returns a list of nodes that were processed. A node will be in the list
782 even if it existed in the store previously.
782 even if it existed in the store previously.
783 """
783 """
784
784
785 def censorrevision(tr, node, tombstone=b''):
785 def censorrevision(tr, node, tombstone=b''):
786 """Remove the content of a single revision.
786 """Remove the content of a single revision.
787
787
788 The specified ``node`` will have its content purged from storage.
788 The specified ``node`` will have its content purged from storage.
789 Future attempts to access the revision data for this node will
789 Future attempts to access the revision data for this node will
790 result in failure.
790 result in failure.
791
791
792 A ``tombstone`` message can optionally be stored. This message may be
792 A ``tombstone`` message can optionally be stored. This message may be
793 displayed to users when they attempt to access the missing revision
793 displayed to users when they attempt to access the missing revision
794 data.
794 data.
795
795
796 Storage backends may have stored deltas against the previous content
796 Storage backends may have stored deltas against the previous content
797 in this revision. As part of censoring a revision, these storage
797 in this revision. As part of censoring a revision, these storage
798 backends are expected to rewrite any internally stored deltas such
798 backends are expected to rewrite any internally stored deltas such
799 that they no longer reference the deleted content.
799 that they no longer reference the deleted content.
800 """
800 """
801
801
802 def getstrippoint(minlink):
802 def getstrippoint(minlink):
803 """Find the minimum revision that must be stripped to strip a linkrev.
803 """Find the minimum revision that must be stripped to strip a linkrev.
804
804
805 Returns a 2-tuple containing the minimum revision number and a set
805 Returns a 2-tuple containing the minimum revision number and a set
806 of all revisions numbers that would be broken by this strip.
806 of all revisions numbers that would be broken by this strip.
807
807
808 TODO this is highly revlog centric and should be abstracted into
808 TODO this is highly revlog centric and should be abstracted into
809 a higher-level deletion API. ``repair.strip()`` relies on this.
809 a higher-level deletion API. ``repair.strip()`` relies on this.
810 """
810 """
811
811
812 def strip(minlink, transaction):
812 def strip(minlink, transaction):
813 """Remove storage of items starting at a linkrev.
813 """Remove storage of items starting at a linkrev.
814
814
815 This uses ``getstrippoint()`` to determine the first node to remove.
815 This uses ``getstrippoint()`` to determine the first node to remove.
816 Then it effectively truncates storage for all revisions after that.
816 Then it effectively truncates storage for all revisions after that.
817
817
818 TODO this is highly revlog centric and should be abstracted into a
818 TODO this is highly revlog centric and should be abstracted into a
819 higher-level deletion API.
819 higher-level deletion API.
820 """
820 """
821
821
822
822
823 class ifilestorage(ifileindex, ifiledata, ifilemutation):
823 class ifilestorage(ifileindex, ifiledata, ifilemutation):
824 """Complete storage interface for a single tracked file."""
824 """Complete storage interface for a single tracked file."""
825
825
826 def files():
826 def files():
827 """Obtain paths that are backing storage for this file.
827 """Obtain paths that are backing storage for this file.
828
828
829 TODO this is used heavily by verify code and there should probably
829 TODO this is used heavily by verify code and there should probably
830 be a better API for that.
830 be a better API for that.
831 """
831 """
832
832
833 def storageinfo(
833 def storageinfo(
834 exclusivefiles=False,
834 exclusivefiles=False,
835 sharedfiles=False,
835 sharedfiles=False,
836 revisionscount=False,
836 revisionscount=False,
837 trackedsize=False,
837 trackedsize=False,
838 storedsize=False,
838 storedsize=False,
839 ):
839 ):
840 """Obtain information about storage for this file's data.
840 """Obtain information about storage for this file's data.
841
841
842 Returns a dict describing storage for this tracked path. The keys
842 Returns a dict describing storage for this tracked path. The keys
843 in the dict map to arguments of the same. The arguments are bools
843 in the dict map to arguments of the same. The arguments are bools
844 indicating whether to calculate and obtain that data.
844 indicating whether to calculate and obtain that data.
845
845
846 exclusivefiles
846 exclusivefiles
847 Iterable of (vfs, path) describing files that are exclusively
847 Iterable of (vfs, path) describing files that are exclusively
848 used to back storage for this tracked path.
848 used to back storage for this tracked path.
849
849
850 sharedfiles
850 sharedfiles
851 Iterable of (vfs, path) describing files that are used to back
851 Iterable of (vfs, path) describing files that are used to back
852 storage for this tracked path. Those files may also provide storage
852 storage for this tracked path. Those files may also provide storage
853 for other stored entities.
853 for other stored entities.
854
854
855 revisionscount
855 revisionscount
856 Number of revisions available for retrieval.
856 Number of revisions available for retrieval.
857
857
858 trackedsize
858 trackedsize
859 Total size in bytes of all tracked revisions. This is a sum of the
859 Total size in bytes of all tracked revisions. This is a sum of the
860 length of the fulltext of all revisions.
860 length of the fulltext of all revisions.
861
861
862 storedsize
862 storedsize
863 Total size in bytes used to store data for all tracked revisions.
863 Total size in bytes used to store data for all tracked revisions.
864 This is commonly less than ``trackedsize`` due to internal usage
864 This is commonly less than ``trackedsize`` due to internal usage
865 of deltas rather than fulltext revisions.
865 of deltas rather than fulltext revisions.
866
866
867 Not all storage backends may support all queries are have a reasonable
867 Not all storage backends may support all queries are have a reasonable
868 value to use. In that case, the value should be set to ``None`` and
868 value to use. In that case, the value should be set to ``None`` and
869 callers are expected to handle this special value.
869 callers are expected to handle this special value.
870 """
870 """
871
871
872 def verifyintegrity(state):
872 def verifyintegrity(state):
873 """Verifies the integrity of file storage.
873 """Verifies the integrity of file storage.
874
874
875 ``state`` is a dict holding state of the verifier process. It can be
875 ``state`` is a dict holding state of the verifier process. It can be
876 used to communicate data between invocations of multiple storage
876 used to communicate data between invocations of multiple storage
877 primitives.
877 primitives.
878
878
879 If individual revisions cannot have their revision content resolved,
879 If individual revisions cannot have their revision content resolved,
880 the method is expected to set the ``skipread`` key to a set of nodes
880 the method is expected to set the ``skipread`` key to a set of nodes
881 that encountered problems. If set, the method can also add the node(s)
881 that encountered problems. If set, the method can also add the node(s)
882 to ``safe_renamed`` in order to indicate nodes that may perform the
882 to ``safe_renamed`` in order to indicate nodes that may perform the
883 rename checks with currently accessible data.
883 rename checks with currently accessible data.
884
884
885 The method yields objects conforming to the ``iverifyproblem``
885 The method yields objects conforming to the ``iverifyproblem``
886 interface.
886 interface.
887 """
887 """
888
888
889
889
890 class idirs(interfaceutil.Interface):
890 class idirs(interfaceutil.Interface):
891 """Interface representing a collection of directories from paths.
891 """Interface representing a collection of directories from paths.
892
892
893 This interface is essentially a derived data structure representing
893 This interface is essentially a derived data structure representing
894 directories from a collection of paths.
894 directories from a collection of paths.
895 """
895 """
896
896
897 def addpath(path):
897 def addpath(path):
898 """Add a path to the collection.
898 """Add a path to the collection.
899
899
900 All directories in the path will be added to the collection.
900 All directories in the path will be added to the collection.
901 """
901 """
902
902
903 def delpath(path):
903 def delpath(path):
904 """Remove a path from the collection.
904 """Remove a path from the collection.
905
905
906 If the removal was the last path in a particular directory, the
906 If the removal was the last path in a particular directory, the
907 directory is removed from the collection.
907 directory is removed from the collection.
908 """
908 """
909
909
910 def __iter__():
910 def __iter__():
911 """Iterate over the directories in this collection of paths."""
911 """Iterate over the directories in this collection of paths."""
912
912
913 def __contains__(path):
913 def __contains__(path):
914 """Whether a specific directory is in this collection."""
914 """Whether a specific directory is in this collection."""
915
915
916
916
917 class imanifestdict(interfaceutil.Interface):
917 class imanifestdict(interfaceutil.Interface):
918 """Interface representing a manifest data structure.
918 """Interface representing a manifest data structure.
919
919
920 A manifest is effectively a dict mapping paths to entries. Each entry
920 A manifest is effectively a dict mapping paths to entries. Each entry
921 consists of a binary node and extra flags affecting that entry.
921 consists of a binary node and extra flags affecting that entry.
922 """
922 """
923
923
924 def __getitem__(path):
924 def __getitem__(path):
925 """Returns the binary node value for a path in the manifest.
925 """Returns the binary node value for a path in the manifest.
926
926
927 Raises ``KeyError`` if the path does not exist in the manifest.
927 Raises ``KeyError`` if the path does not exist in the manifest.
928
928
929 Equivalent to ``self.find(path)[0]``.
929 Equivalent to ``self.find(path)[0]``.
930 """
930 """
931
931
932 def find(path):
932 def find(path):
933 """Returns the entry for a path in the manifest.
933 """Returns the entry for a path in the manifest.
934
934
935 Returns a 2-tuple of (node, flags).
935 Returns a 2-tuple of (node, flags).
936
936
937 Raises ``KeyError`` if the path does not exist in the manifest.
937 Raises ``KeyError`` if the path does not exist in the manifest.
938 """
938 """
939
939
940 def __len__():
940 def __len__():
941 """Return the number of entries in the manifest."""
941 """Return the number of entries in the manifest."""
942
942
943 def __nonzero__():
943 def __nonzero__():
944 """Returns True if the manifest has entries, False otherwise."""
944 """Returns True if the manifest has entries, False otherwise."""
945
945
946 __bool__ = __nonzero__
946 __bool__ = __nonzero__
947
947
948 def __setitem__(path, node):
948 def __setitem__(path, node):
949 """Define the node value for a path in the manifest.
949 """Define the node value for a path in the manifest.
950
950
951 If the path is already in the manifest, its flags will be copied to
951 If the path is already in the manifest, its flags will be copied to
952 the new entry.
952 the new entry.
953 """
953 """
954
954
955 def __contains__(path):
955 def __contains__(path):
956 """Whether a path exists in the manifest."""
956 """Whether a path exists in the manifest."""
957
957
958 def __delitem__(path):
958 def __delitem__(path):
959 """Remove a path from the manifest.
959 """Remove a path from the manifest.
960
960
961 Raises ``KeyError`` if the path is not in the manifest.
961 Raises ``KeyError`` if the path is not in the manifest.
962 """
962 """
963
963
964 def __iter__():
964 def __iter__():
965 """Iterate over paths in the manifest."""
965 """Iterate over paths in the manifest."""
966
966
967 def iterkeys():
967 def iterkeys():
968 """Iterate over paths in the manifest."""
968 """Iterate over paths in the manifest."""
969
969
970 def keys():
970 def keys():
971 """Obtain a list of paths in the manifest."""
971 """Obtain a list of paths in the manifest."""
972
972
973 def filesnotin(other, match=None):
973 def filesnotin(other, match=None):
974 """Obtain the set of paths in this manifest but not in another.
974 """Obtain the set of paths in this manifest but not in another.
975
975
976 ``match`` is an optional matcher function to be applied to both
976 ``match`` is an optional matcher function to be applied to both
977 manifests.
977 manifests.
978
978
979 Returns a set of paths.
979 Returns a set of paths.
980 """
980 """
981
981
982 def dirs():
982 def dirs():
983 """Returns an object implementing the ``idirs`` interface."""
983 """Returns an object implementing the ``idirs`` interface."""
984
984
985 def hasdir(dir):
985 def hasdir(dir):
986 """Returns a bool indicating if a directory is in this manifest."""
986 """Returns a bool indicating if a directory is in this manifest."""
987
987
988 def matches(match):
988 def matches(match):
989 """Generate a new manifest filtered through a matcher.
989 """Generate a new manifest filtered through a matcher.
990
990
991 Returns an object conforming to the ``imanifestdict`` interface.
991 Returns an object conforming to the ``imanifestdict`` interface.
992 """
992 """
993
993
994 def walk(match):
994 def walk(match):
995 """Generator of paths in manifest satisfying a matcher.
995 """Generator of paths in manifest satisfying a matcher.
996
996
997 This is equivalent to ``self.matches(match).iterkeys()`` except a new
997 This is equivalent to ``self.matches(match).iterkeys()`` except a new
998 manifest object is not created.
998 manifest object is not created.
999
999
1000 If the matcher has explicit files listed and they don't exist in
1000 If the matcher has explicit files listed and they don't exist in
1001 the manifest, ``match.bad()`` is called for each missing file.
1001 the manifest, ``match.bad()`` is called for each missing file.
1002 """
1002 """
1003
1003
1004 def diff(other, match=None, clean=False):
1004 def diff(other, match=None, clean=False):
1005 """Find differences between this manifest and another.
1005 """Find differences between this manifest and another.
1006
1006
1007 This manifest is compared to ``other``.
1007 This manifest is compared to ``other``.
1008
1008
1009 If ``match`` is provided, the two manifests are filtered against this
1009 If ``match`` is provided, the two manifests are filtered against this
1010 matcher and only entries satisfying the matcher are compared.
1010 matcher and only entries satisfying the matcher are compared.
1011
1011
1012 If ``clean`` is True, unchanged files are included in the returned
1012 If ``clean`` is True, unchanged files are included in the returned
1013 object.
1013 object.
1014
1014
1015 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
1015 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
1016 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
1016 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
1017 represents the node and flags for this manifest and ``(node2, flag2)``
1017 represents the node and flags for this manifest and ``(node2, flag2)``
1018 are the same for the other manifest.
1018 are the same for the other manifest.
1019 """
1019 """
1020
1020
1021 def setflag(path, flag):
1021 def setflag(path, flag):
1022 """Set the flag value for a given path.
1022 """Set the flag value for a given path.
1023
1023
1024 Raises ``KeyError`` if the path is not already in the manifest.
1024 Raises ``KeyError`` if the path is not already in the manifest.
1025 """
1025 """
1026
1026
1027 def get(path, default=None):
1027 def get(path, default=None):
1028 """Obtain the node value for a path or a default value if missing."""
1028 """Obtain the node value for a path or a default value if missing."""
1029
1029
1030 def flags(path):
1030 def flags(path):
1031 """Return the flags value for a path (default: empty bytestring)."""
1031 """Return the flags value for a path (default: empty bytestring)."""
1032
1032
1033 def copy():
1033 def copy():
1034 """Return a copy of this manifest."""
1034 """Return a copy of this manifest."""
1035
1035
1036 def items():
1036 def items():
1037 """Returns an iterable of (path, node) for items in this manifest."""
1037 """Returns an iterable of (path, node) for items in this manifest."""
1038
1038
1039 def iteritems():
1039 def iteritems():
1040 """Identical to items()."""
1040 """Identical to items()."""
1041
1041
1042 def iterentries():
1042 def iterentries():
1043 """Returns an iterable of (path, node, flags) for this manifest.
1043 """Returns an iterable of (path, node, flags) for this manifest.
1044
1044
1045 Similar to ``iteritems()`` except items are a 3-tuple and include
1045 Similar to ``iteritems()`` except items are a 3-tuple and include
1046 flags.
1046 flags.
1047 """
1047 """
1048
1048
1049 def text():
1049 def text():
1050 """Obtain the raw data representation for this manifest.
1050 """Obtain the raw data representation for this manifest.
1051
1051
1052 Result is used to create a manifest revision.
1052 Result is used to create a manifest revision.
1053 """
1053 """
1054
1054
1055 def fastdelta(base, changes):
1055 def fastdelta(base, changes):
1056 """Obtain a delta between this manifest and another given changes.
1056 """Obtain a delta between this manifest and another given changes.
1057
1057
1058 ``base`` in the raw data representation for another manifest.
1058 ``base`` in the raw data representation for another manifest.
1059
1059
1060 ``changes`` is an iterable of ``(path, to_delete)``.
1060 ``changes`` is an iterable of ``(path, to_delete)``.
1061
1061
1062 Returns a 2-tuple containing ``bytearray(self.text())`` and the
1062 Returns a 2-tuple containing ``bytearray(self.text())`` and the
1063 delta between ``base`` and this manifest.
1063 delta between ``base`` and this manifest.
1064 """
1064 """
1065
1065
1066
1066
1067 class imanifestrevisionbase(interfaceutil.Interface):
1067 class imanifestrevisionbase(interfaceutil.Interface):
1068 """Base interface representing a single revision of a manifest.
1068 """Base interface representing a single revision of a manifest.
1069
1069
1070 Should not be used as a primary interface: should always be inherited
1070 Should not be used as a primary interface: should always be inherited
1071 as part of a larger interface.
1071 as part of a larger interface.
1072 """
1072 """
1073
1073
1074 def new():
1075 """Obtain a new manifest instance.
1076
1077 Returns an object conforming to the ``imanifestrevisionwritable``
1078 interface. The instance will be associated with the same
1079 ``imanifestlog`` collection as this instance.
1080 """
1081
1082 def copy():
1074 def copy():
1083 """Obtain a copy of this manifest instance.
1075 """Obtain a copy of this manifest instance.
1084
1076
1085 Returns an object conforming to the ``imanifestrevisionwritable``
1077 Returns an object conforming to the ``imanifestrevisionwritable``
1086 interface. The instance will be associated with the same
1078 interface. The instance will be associated with the same
1087 ``imanifestlog`` collection as this instance.
1079 ``imanifestlog`` collection as this instance.
1088 """
1080 """
1089
1081
1090 def read():
1082 def read():
1091 """Obtain the parsed manifest data structure.
1083 """Obtain the parsed manifest data structure.
1092
1084
1093 The returned object conforms to the ``imanifestdict`` interface.
1085 The returned object conforms to the ``imanifestdict`` interface.
1094 """
1086 """
1095
1087
1096
1088
1097 class imanifestrevisionstored(imanifestrevisionbase):
1089 class imanifestrevisionstored(imanifestrevisionbase):
1098 """Interface representing a manifest revision committed to storage."""
1090 """Interface representing a manifest revision committed to storage."""
1099
1091
1100 def node():
1092 def node():
1101 """The binary node for this manifest."""
1093 """The binary node for this manifest."""
1102
1094
1103 parents = interfaceutil.Attribute(
1095 parents = interfaceutil.Attribute(
1104 """List of binary nodes that are parents for this manifest revision."""
1096 """List of binary nodes that are parents for this manifest revision."""
1105 )
1097 )
1106
1098
1107 def readdelta(shallow=False):
1099 def readdelta(shallow=False):
1108 """Obtain the manifest data structure representing changes from parent.
1100 """Obtain the manifest data structure representing changes from parent.
1109
1101
1110 This manifest is compared to its 1st parent. A new manifest representing
1102 This manifest is compared to its 1st parent. A new manifest representing
1111 those differences is constructed.
1103 those differences is constructed.
1112
1104
1113 The returned object conforms to the ``imanifestdict`` interface.
1105 The returned object conforms to the ``imanifestdict`` interface.
1114 """
1106 """
1115
1107
1116 def readfast(shallow=False):
1108 def readfast(shallow=False):
1117 """Calls either ``read()`` or ``readdelta()``.
1109 """Calls either ``read()`` or ``readdelta()``.
1118
1110
1119 The faster of the two options is called.
1111 The faster of the two options is called.
1120 """
1112 """
1121
1113
1122 def find(key):
1114 def find(key):
1123 """Calls self.read().find(key)``.
1115 """Calls self.read().find(key)``.
1124
1116
1125 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1117 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1126 """
1118 """
1127
1119
1128
1120
1129 class imanifestrevisionwritable(imanifestrevisionbase):
1121 class imanifestrevisionwritable(imanifestrevisionbase):
1130 """Interface representing a manifest revision that can be committed."""
1122 """Interface representing a manifest revision that can be committed."""
1131
1123
1132 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1124 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1133 """Add this revision to storage.
1125 """Add this revision to storage.
1134
1126
1135 Takes a transaction object, the changeset revision number it will
1127 Takes a transaction object, the changeset revision number it will
1136 be associated with, its parent nodes, and lists of added and
1128 be associated with, its parent nodes, and lists of added and
1137 removed paths.
1129 removed paths.
1138
1130
1139 If match is provided, storage can choose not to inspect or write out
1131 If match is provided, storage can choose not to inspect or write out
1140 items that do not match. Storage is still required to be able to provide
1132 items that do not match. Storage is still required to be able to provide
1141 the full manifest in the future for any directories written (these
1133 the full manifest in the future for any directories written (these
1142 manifests should not be "narrowed on disk").
1134 manifests should not be "narrowed on disk").
1143
1135
1144 Returns the binary node of the created revision.
1136 Returns the binary node of the created revision.
1145 """
1137 """
1146
1138
1147
1139
1148 class imanifeststorage(interfaceutil.Interface):
1140 class imanifeststorage(interfaceutil.Interface):
1149 """Storage interface for manifest data."""
1141 """Storage interface for manifest data."""
1150
1142
1151 tree = interfaceutil.Attribute(
1143 tree = interfaceutil.Attribute(
1152 """The path to the directory this manifest tracks.
1144 """The path to the directory this manifest tracks.
1153
1145
1154 The empty bytestring represents the root manifest.
1146 The empty bytestring represents the root manifest.
1155 """
1147 """
1156 )
1148 )
1157
1149
1158 index = interfaceutil.Attribute(
1150 index = interfaceutil.Attribute(
1159 """An ``ifilerevisionssequence`` instance."""
1151 """An ``ifilerevisionssequence`` instance."""
1160 )
1152 )
1161
1153
1162 indexfile = interfaceutil.Attribute(
1154 indexfile = interfaceutil.Attribute(
1163 """Path of revlog index file.
1155 """Path of revlog index file.
1164
1156
1165 TODO this is revlog specific and should not be exposed.
1157 TODO this is revlog specific and should not be exposed.
1166 """
1158 """
1167 )
1159 )
1168
1160
1169 opener = interfaceutil.Attribute(
1161 opener = interfaceutil.Attribute(
1170 """VFS opener to use to access underlying files used for storage.
1162 """VFS opener to use to access underlying files used for storage.
1171
1163
1172 TODO this is revlog specific and should not be exposed.
1164 TODO this is revlog specific and should not be exposed.
1173 """
1165 """
1174 )
1166 )
1175
1167
1176 version = interfaceutil.Attribute(
1168 version = interfaceutil.Attribute(
1177 """Revlog version number.
1169 """Revlog version number.
1178
1170
1179 TODO this is revlog specific and should not be exposed.
1171 TODO this is revlog specific and should not be exposed.
1180 """
1172 """
1181 )
1173 )
1182
1174
1183 _generaldelta = interfaceutil.Attribute(
1175 _generaldelta = interfaceutil.Attribute(
1184 """Whether generaldelta storage is being used.
1176 """Whether generaldelta storage is being used.
1185
1177
1186 TODO this is revlog specific and should not be exposed.
1178 TODO this is revlog specific and should not be exposed.
1187 """
1179 """
1188 )
1180 )
1189
1181
1190 fulltextcache = interfaceutil.Attribute(
1182 fulltextcache = interfaceutil.Attribute(
1191 """Dict with cache of fulltexts.
1183 """Dict with cache of fulltexts.
1192
1184
1193 TODO this doesn't feel appropriate for the storage interface.
1185 TODO this doesn't feel appropriate for the storage interface.
1194 """
1186 """
1195 )
1187 )
1196
1188
1197 def __len__():
1189 def __len__():
1198 """Obtain the number of revisions stored for this manifest."""
1190 """Obtain the number of revisions stored for this manifest."""
1199
1191
1200 def __iter__():
1192 def __iter__():
1201 """Iterate over revision numbers for this manifest."""
1193 """Iterate over revision numbers for this manifest."""
1202
1194
1203 def rev(node):
1195 def rev(node):
1204 """Obtain the revision number given a binary node.
1196 """Obtain the revision number given a binary node.
1205
1197
1206 Raises ``error.LookupError`` if the node is not known.
1198 Raises ``error.LookupError`` if the node is not known.
1207 """
1199 """
1208
1200
1209 def node(rev):
1201 def node(rev):
1210 """Obtain the node value given a revision number.
1202 """Obtain the node value given a revision number.
1211
1203
1212 Raises ``error.LookupError`` if the revision is not known.
1204 Raises ``error.LookupError`` if the revision is not known.
1213 """
1205 """
1214
1206
1215 def lookup(value):
1207 def lookup(value):
1216 """Attempt to resolve a value to a node.
1208 """Attempt to resolve a value to a node.
1217
1209
1218 Value can be a binary node, hex node, revision number, or a bytes
1210 Value can be a binary node, hex node, revision number, or a bytes
1219 that can be converted to an integer.
1211 that can be converted to an integer.
1220
1212
1221 Raises ``error.LookupError`` if a ndoe could not be resolved.
1213 Raises ``error.LookupError`` if a ndoe could not be resolved.
1222 """
1214 """
1223
1215
1224 def parents(node):
1216 def parents(node):
1225 """Returns a 2-tuple of parent nodes for a node.
1217 """Returns a 2-tuple of parent nodes for a node.
1226
1218
1227 Values will be ``nullid`` if the parent is empty.
1219 Values will be ``nullid`` if the parent is empty.
1228 """
1220 """
1229
1221
1230 def parentrevs(rev):
1222 def parentrevs(rev):
1231 """Like parents() but operates on revision numbers."""
1223 """Like parents() but operates on revision numbers."""
1232
1224
1233 def linkrev(rev):
1225 def linkrev(rev):
1234 """Obtain the changeset revision number a revision is linked to."""
1226 """Obtain the changeset revision number a revision is linked to."""
1235
1227
1236 def revision(node, _df=None, raw=False):
1228 def revision(node, _df=None, raw=False):
1237 """Obtain fulltext data for a node."""
1229 """Obtain fulltext data for a node."""
1238
1230
1239 def rawdata(node, _df=None):
1231 def rawdata(node, _df=None):
1240 """Obtain raw data for a node."""
1232 """Obtain raw data for a node."""
1241
1233
1242 def revdiff(rev1, rev2):
1234 def revdiff(rev1, rev2):
1243 """Obtain a delta between two revision numbers.
1235 """Obtain a delta between two revision numbers.
1244
1236
1245 The returned data is the result of ``bdiff.bdiff()`` on the raw
1237 The returned data is the result of ``bdiff.bdiff()`` on the raw
1246 revision data.
1238 revision data.
1247 """
1239 """
1248
1240
1249 def cmp(node, fulltext):
1241 def cmp(node, fulltext):
1250 """Compare fulltext to another revision.
1242 """Compare fulltext to another revision.
1251
1243
1252 Returns True if the fulltext is different from what is stored.
1244 Returns True if the fulltext is different from what is stored.
1253 """
1245 """
1254
1246
1255 def emitrevisions(
1247 def emitrevisions(
1256 nodes,
1248 nodes,
1257 nodesorder=None,
1249 nodesorder=None,
1258 revisiondata=False,
1250 revisiondata=False,
1259 assumehaveparentrevisions=False,
1251 assumehaveparentrevisions=False,
1260 ):
1252 ):
1261 """Produce ``irevisiondelta`` describing revisions.
1253 """Produce ``irevisiondelta`` describing revisions.
1262
1254
1263 See the documentation for ``ifiledata`` for more.
1255 See the documentation for ``ifiledata`` for more.
1264 """
1256 """
1265
1257
1266 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1258 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1267 """Process a series of deltas for storage.
1259 """Process a series of deltas for storage.
1268
1260
1269 See the documentation in ``ifilemutation`` for more.
1261 See the documentation in ``ifilemutation`` for more.
1270 """
1262 """
1271
1263
1272 def rawsize(rev):
1264 def rawsize(rev):
1273 """Obtain the size of tracked data.
1265 """Obtain the size of tracked data.
1274
1266
1275 Is equivalent to ``len(m.rawdata(node))``.
1267 Is equivalent to ``len(m.rawdata(node))``.
1276
1268
1277 TODO this method is only used by upgrade code and may be removed.
1269 TODO this method is only used by upgrade code and may be removed.
1278 """
1270 """
1279
1271
1280 def getstrippoint(minlink):
1272 def getstrippoint(minlink):
1281 """Find minimum revision that must be stripped to strip a linkrev.
1273 """Find minimum revision that must be stripped to strip a linkrev.
1282
1274
1283 See the documentation in ``ifilemutation`` for more.
1275 See the documentation in ``ifilemutation`` for more.
1284 """
1276 """
1285
1277
1286 def strip(minlink, transaction):
1278 def strip(minlink, transaction):
1287 """Remove storage of items starting at a linkrev.
1279 """Remove storage of items starting at a linkrev.
1288
1280
1289 See the documentation in ``ifilemutation`` for more.
1281 See the documentation in ``ifilemutation`` for more.
1290 """
1282 """
1291
1283
1292 def checksize():
1284 def checksize():
1293 """Obtain the expected sizes of backing files.
1285 """Obtain the expected sizes of backing files.
1294
1286
1295 TODO this is used by verify and it should not be part of the interface.
1287 TODO this is used by verify and it should not be part of the interface.
1296 """
1288 """
1297
1289
1298 def files():
1290 def files():
1299 """Obtain paths that are backing storage for this manifest.
1291 """Obtain paths that are backing storage for this manifest.
1300
1292
1301 TODO this is used by verify and there should probably be a better API
1293 TODO this is used by verify and there should probably be a better API
1302 for this functionality.
1294 for this functionality.
1303 """
1295 """
1304
1296
1305 def deltaparent(rev):
1297 def deltaparent(rev):
1306 """Obtain the revision that a revision is delta'd against.
1298 """Obtain the revision that a revision is delta'd against.
1307
1299
1308 TODO delta encoding is an implementation detail of storage and should
1300 TODO delta encoding is an implementation detail of storage and should
1309 not be exposed to the storage interface.
1301 not be exposed to the storage interface.
1310 """
1302 """
1311
1303
1312 def clone(tr, dest, **kwargs):
1304 def clone(tr, dest, **kwargs):
1313 """Clone this instance to another."""
1305 """Clone this instance to another."""
1314
1306
1315 def clearcaches(clear_persisted_data=False):
1307 def clearcaches(clear_persisted_data=False):
1316 """Clear any caches associated with this instance."""
1308 """Clear any caches associated with this instance."""
1317
1309
1318 def dirlog(d):
1310 def dirlog(d):
1319 """Obtain a manifest storage instance for a tree."""
1311 """Obtain a manifest storage instance for a tree."""
1320
1312
1321 def add(
1313 def add(
1322 m, transaction, link, p1, p2, added, removed, readtree=None, match=None
1314 m, transaction, link, p1, p2, added, removed, readtree=None, match=None
1323 ):
1315 ):
1324 """Add a revision to storage.
1316 """Add a revision to storage.
1325
1317
1326 ``m`` is an object conforming to ``imanifestdict``.
1318 ``m`` is an object conforming to ``imanifestdict``.
1327
1319
1328 ``link`` is the linkrev revision number.
1320 ``link`` is the linkrev revision number.
1329
1321
1330 ``p1`` and ``p2`` are the parent revision numbers.
1322 ``p1`` and ``p2`` are the parent revision numbers.
1331
1323
1332 ``added`` and ``removed`` are iterables of added and removed paths,
1324 ``added`` and ``removed`` are iterables of added and removed paths,
1333 respectively.
1325 respectively.
1334
1326
1335 ``readtree`` is a function that can be used to read the child tree(s)
1327 ``readtree`` is a function that can be used to read the child tree(s)
1336 when recursively writing the full tree structure when using
1328 when recursively writing the full tree structure when using
1337 treemanifets.
1329 treemanifets.
1338
1330
1339 ``match`` is a matcher that can be used to hint to storage that not all
1331 ``match`` is a matcher that can be used to hint to storage that not all
1340 paths must be inspected; this is an optimization and can be safely
1332 paths must be inspected; this is an optimization and can be safely
1341 ignored. Note that the storage must still be able to reproduce a full
1333 ignored. Note that the storage must still be able to reproduce a full
1342 manifest including files that did not match.
1334 manifest including files that did not match.
1343 """
1335 """
1344
1336
1345 def storageinfo(
1337 def storageinfo(
1346 exclusivefiles=False,
1338 exclusivefiles=False,
1347 sharedfiles=False,
1339 sharedfiles=False,
1348 revisionscount=False,
1340 revisionscount=False,
1349 trackedsize=False,
1341 trackedsize=False,
1350 storedsize=False,
1342 storedsize=False,
1351 ):
1343 ):
1352 """Obtain information about storage for this manifest's data.
1344 """Obtain information about storage for this manifest's data.
1353
1345
1354 See ``ifilestorage.storageinfo()`` for a description of this method.
1346 See ``ifilestorage.storageinfo()`` for a description of this method.
1355 This one behaves the same way, except for manifest data.
1347 This one behaves the same way, except for manifest data.
1356 """
1348 """
1357
1349
1358
1350
1359 class imanifestlog(interfaceutil.Interface):
1351 class imanifestlog(interfaceutil.Interface):
1360 """Interface representing a collection of manifest snapshots.
1352 """Interface representing a collection of manifest snapshots.
1361
1353
1362 Represents the root manifest in a repository.
1354 Represents the root manifest in a repository.
1363
1355
1364 Also serves as a means to access nested tree manifests and to cache
1356 Also serves as a means to access nested tree manifests and to cache
1365 tree manifests.
1357 tree manifests.
1366 """
1358 """
1367
1359
1368 def __getitem__(node):
1360 def __getitem__(node):
1369 """Obtain a manifest instance for a given binary node.
1361 """Obtain a manifest instance for a given binary node.
1370
1362
1371 Equivalent to calling ``self.get('', node)``.
1363 Equivalent to calling ``self.get('', node)``.
1372
1364
1373 The returned object conforms to the ``imanifestrevisionstored``
1365 The returned object conforms to the ``imanifestrevisionstored``
1374 interface.
1366 interface.
1375 """
1367 """
1376
1368
1377 def get(tree, node, verify=True):
1369 def get(tree, node, verify=True):
1378 """Retrieve the manifest instance for a given directory and binary node.
1370 """Retrieve the manifest instance for a given directory and binary node.
1379
1371
1380 ``node`` always refers to the node of the root manifest (which will be
1372 ``node`` always refers to the node of the root manifest (which will be
1381 the only manifest if flat manifests are being used).
1373 the only manifest if flat manifests are being used).
1382
1374
1383 If ``tree`` is the empty string, the root manifest is returned.
1375 If ``tree`` is the empty string, the root manifest is returned.
1384 Otherwise the manifest for the specified directory will be returned
1376 Otherwise the manifest for the specified directory will be returned
1385 (requires tree manifests).
1377 (requires tree manifests).
1386
1378
1387 If ``verify`` is True, ``LookupError`` is raised if the node is not
1379 If ``verify`` is True, ``LookupError`` is raised if the node is not
1388 known.
1380 known.
1389
1381
1390 The returned object conforms to the ``imanifestrevisionstored``
1382 The returned object conforms to the ``imanifestrevisionstored``
1391 interface.
1383 interface.
1392 """
1384 """
1393
1385
1394 def getstorage(tree):
1386 def getstorage(tree):
1395 """Retrieve an interface to storage for a particular tree.
1387 """Retrieve an interface to storage for a particular tree.
1396
1388
1397 If ``tree`` is the empty bytestring, storage for the root manifest will
1389 If ``tree`` is the empty bytestring, storage for the root manifest will
1398 be returned. Otherwise storage for a tree manifest is returned.
1390 be returned. Otherwise storage for a tree manifest is returned.
1399
1391
1400 TODO formalize interface for returned object.
1392 TODO formalize interface for returned object.
1401 """
1393 """
1402
1394
1403 def clearcaches():
1395 def clearcaches():
1404 """Clear caches associated with this collection."""
1396 """Clear caches associated with this collection."""
1405
1397
1406 def rev(node):
1398 def rev(node):
1407 """Obtain the revision number for a binary node.
1399 """Obtain the revision number for a binary node.
1408
1400
1409 Raises ``error.LookupError`` if the node is not known.
1401 Raises ``error.LookupError`` if the node is not known.
1410 """
1402 """
1411
1403
1412
1404
1413 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1405 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1414 """Local repository sub-interface providing access to tracked file storage.
1406 """Local repository sub-interface providing access to tracked file storage.
1415
1407
1416 This interface defines how a repository accesses storage for a single
1408 This interface defines how a repository accesses storage for a single
1417 tracked file path.
1409 tracked file path.
1418 """
1410 """
1419
1411
1420 def file(f):
1412 def file(f):
1421 """Obtain a filelog for a tracked path.
1413 """Obtain a filelog for a tracked path.
1422
1414
1423 The returned type conforms to the ``ifilestorage`` interface.
1415 The returned type conforms to the ``ifilestorage`` interface.
1424 """
1416 """
1425
1417
1426
1418
1427 class ilocalrepositorymain(interfaceutil.Interface):
1419 class ilocalrepositorymain(interfaceutil.Interface):
1428 """Main interface for local repositories.
1420 """Main interface for local repositories.
1429
1421
1430 This currently captures the reality of things - not how things should be.
1422 This currently captures the reality of things - not how things should be.
1431 """
1423 """
1432
1424
1433 supportedformats = interfaceutil.Attribute(
1425 supportedformats = interfaceutil.Attribute(
1434 """Set of requirements that apply to stream clone.
1426 """Set of requirements that apply to stream clone.
1435
1427
1436 This is actually a class attribute and is shared among all instances.
1428 This is actually a class attribute and is shared among all instances.
1437 """
1429 """
1438 )
1430 )
1439
1431
1440 supported = interfaceutil.Attribute(
1432 supported = interfaceutil.Attribute(
1441 """Set of requirements that this repo is capable of opening."""
1433 """Set of requirements that this repo is capable of opening."""
1442 )
1434 )
1443
1435
1444 requirements = interfaceutil.Attribute(
1436 requirements = interfaceutil.Attribute(
1445 """Set of requirements this repo uses."""
1437 """Set of requirements this repo uses."""
1446 )
1438 )
1447
1439
1448 features = interfaceutil.Attribute(
1440 features = interfaceutil.Attribute(
1449 """Set of "features" this repository supports.
1441 """Set of "features" this repository supports.
1450
1442
1451 A "feature" is a loosely-defined term. It can refer to a feature
1443 A "feature" is a loosely-defined term. It can refer to a feature
1452 in the classical sense or can describe an implementation detail
1444 in the classical sense or can describe an implementation detail
1453 of the repository. For example, a ``readonly`` feature may denote
1445 of the repository. For example, a ``readonly`` feature may denote
1454 the repository as read-only. Or a ``revlogfilestore`` feature may
1446 the repository as read-only. Or a ``revlogfilestore`` feature may
1455 denote that the repository is using revlogs for file storage.
1447 denote that the repository is using revlogs for file storage.
1456
1448
1457 The intent of features is to provide a machine-queryable mechanism
1449 The intent of features is to provide a machine-queryable mechanism
1458 for repo consumers to test for various repository characteristics.
1450 for repo consumers to test for various repository characteristics.
1459
1451
1460 Features are similar to ``requirements``. The main difference is that
1452 Features are similar to ``requirements``. The main difference is that
1461 requirements are stored on-disk and represent requirements to open the
1453 requirements are stored on-disk and represent requirements to open the
1462 repository. Features are more run-time capabilities of the repository
1454 repository. Features are more run-time capabilities of the repository
1463 and more granular capabilities (which may be derived from requirements).
1455 and more granular capabilities (which may be derived from requirements).
1464 """
1456 """
1465 )
1457 )
1466
1458
1467 filtername = interfaceutil.Attribute(
1459 filtername = interfaceutil.Attribute(
1468 """Name of the repoview that is active on this repo."""
1460 """Name of the repoview that is active on this repo."""
1469 )
1461 )
1470
1462
1471 wvfs = interfaceutil.Attribute(
1463 wvfs = interfaceutil.Attribute(
1472 """VFS used to access the working directory."""
1464 """VFS used to access the working directory."""
1473 )
1465 )
1474
1466
1475 vfs = interfaceutil.Attribute(
1467 vfs = interfaceutil.Attribute(
1476 """VFS rooted at the .hg directory.
1468 """VFS rooted at the .hg directory.
1477
1469
1478 Used to access repository data not in the store.
1470 Used to access repository data not in the store.
1479 """
1471 """
1480 )
1472 )
1481
1473
1482 svfs = interfaceutil.Attribute(
1474 svfs = interfaceutil.Attribute(
1483 """VFS rooted at the store.
1475 """VFS rooted at the store.
1484
1476
1485 Used to access repository data in the store. Typically .hg/store.
1477 Used to access repository data in the store. Typically .hg/store.
1486 But can point elsewhere if the store is shared.
1478 But can point elsewhere if the store is shared.
1487 """
1479 """
1488 )
1480 )
1489
1481
1490 root = interfaceutil.Attribute(
1482 root = interfaceutil.Attribute(
1491 """Path to the root of the working directory."""
1483 """Path to the root of the working directory."""
1492 )
1484 )
1493
1485
1494 path = interfaceutil.Attribute("""Path to the .hg directory.""")
1486 path = interfaceutil.Attribute("""Path to the .hg directory.""")
1495
1487
1496 origroot = interfaceutil.Attribute(
1488 origroot = interfaceutil.Attribute(
1497 """The filesystem path that was used to construct the repo."""
1489 """The filesystem path that was used to construct the repo."""
1498 )
1490 )
1499
1491
1500 auditor = interfaceutil.Attribute(
1492 auditor = interfaceutil.Attribute(
1501 """A pathauditor for the working directory.
1493 """A pathauditor for the working directory.
1502
1494
1503 This checks if a path refers to a nested repository.
1495 This checks if a path refers to a nested repository.
1504
1496
1505 Operates on the filesystem.
1497 Operates on the filesystem.
1506 """
1498 """
1507 )
1499 )
1508
1500
1509 nofsauditor = interfaceutil.Attribute(
1501 nofsauditor = interfaceutil.Attribute(
1510 """A pathauditor for the working directory.
1502 """A pathauditor for the working directory.
1511
1503
1512 This is like ``auditor`` except it doesn't do filesystem checks.
1504 This is like ``auditor`` except it doesn't do filesystem checks.
1513 """
1505 """
1514 )
1506 )
1515
1507
1516 baseui = interfaceutil.Attribute(
1508 baseui = interfaceutil.Attribute(
1517 """Original ui instance passed into constructor."""
1509 """Original ui instance passed into constructor."""
1518 )
1510 )
1519
1511
1520 ui = interfaceutil.Attribute("""Main ui instance for this instance.""")
1512 ui = interfaceutil.Attribute("""Main ui instance for this instance.""")
1521
1513
1522 sharedpath = interfaceutil.Attribute(
1514 sharedpath = interfaceutil.Attribute(
1523 """Path to the .hg directory of the repo this repo was shared from."""
1515 """Path to the .hg directory of the repo this repo was shared from."""
1524 )
1516 )
1525
1517
1526 store = interfaceutil.Attribute("""A store instance.""")
1518 store = interfaceutil.Attribute("""A store instance.""")
1527
1519
1528 spath = interfaceutil.Attribute("""Path to the store.""")
1520 spath = interfaceutil.Attribute("""Path to the store.""")
1529
1521
1530 sjoin = interfaceutil.Attribute("""Alias to self.store.join.""")
1522 sjoin = interfaceutil.Attribute("""Alias to self.store.join.""")
1531
1523
1532 cachevfs = interfaceutil.Attribute(
1524 cachevfs = interfaceutil.Attribute(
1533 """A VFS used to access the cache directory.
1525 """A VFS used to access the cache directory.
1534
1526
1535 Typically .hg/cache.
1527 Typically .hg/cache.
1536 """
1528 """
1537 )
1529 )
1538
1530
1539 wcachevfs = interfaceutil.Attribute(
1531 wcachevfs = interfaceutil.Attribute(
1540 """A VFS used to access the cache directory dedicated to working copy
1532 """A VFS used to access the cache directory dedicated to working copy
1541
1533
1542 Typically .hg/wcache.
1534 Typically .hg/wcache.
1543 """
1535 """
1544 )
1536 )
1545
1537
1546 filteredrevcache = interfaceutil.Attribute(
1538 filteredrevcache = interfaceutil.Attribute(
1547 """Holds sets of revisions to be filtered."""
1539 """Holds sets of revisions to be filtered."""
1548 )
1540 )
1549
1541
1550 names = interfaceutil.Attribute("""A ``namespaces`` instance.""")
1542 names = interfaceutil.Attribute("""A ``namespaces`` instance.""")
1551
1543
1552 filecopiesmode = interfaceutil.Attribute(
1544 filecopiesmode = interfaceutil.Attribute(
1553 """The way files copies should be dealt with in this repo."""
1545 """The way files copies should be dealt with in this repo."""
1554 )
1546 )
1555
1547
1556 def close():
1548 def close():
1557 """Close the handle on this repository."""
1549 """Close the handle on this repository."""
1558
1550
1559 def peer():
1551 def peer():
1560 """Obtain an object conforming to the ``peer`` interface."""
1552 """Obtain an object conforming to the ``peer`` interface."""
1561
1553
1562 def unfiltered():
1554 def unfiltered():
1563 """Obtain an unfiltered/raw view of this repo."""
1555 """Obtain an unfiltered/raw view of this repo."""
1564
1556
1565 def filtered(name, visibilityexceptions=None):
1557 def filtered(name, visibilityexceptions=None):
1566 """Obtain a named view of this repository."""
1558 """Obtain a named view of this repository."""
1567
1559
1568 obsstore = interfaceutil.Attribute("""A store of obsolescence data.""")
1560 obsstore = interfaceutil.Attribute("""A store of obsolescence data.""")
1569
1561
1570 changelog = interfaceutil.Attribute("""A handle on the changelog revlog.""")
1562 changelog = interfaceutil.Attribute("""A handle on the changelog revlog.""")
1571
1563
1572 manifestlog = interfaceutil.Attribute(
1564 manifestlog = interfaceutil.Attribute(
1573 """An instance conforming to the ``imanifestlog`` interface.
1565 """An instance conforming to the ``imanifestlog`` interface.
1574
1566
1575 Provides access to manifests for the repository.
1567 Provides access to manifests for the repository.
1576 """
1568 """
1577 )
1569 )
1578
1570
1579 dirstate = interfaceutil.Attribute("""Working directory state.""")
1571 dirstate = interfaceutil.Attribute("""Working directory state.""")
1580
1572
1581 narrowpats = interfaceutil.Attribute(
1573 narrowpats = interfaceutil.Attribute(
1582 """Matcher patterns for this repository's narrowspec."""
1574 """Matcher patterns for this repository's narrowspec."""
1583 )
1575 )
1584
1576
1585 def narrowmatch(match=None, includeexact=False):
1577 def narrowmatch(match=None, includeexact=False):
1586 """Obtain a matcher for the narrowspec."""
1578 """Obtain a matcher for the narrowspec."""
1587
1579
1588 def setnarrowpats(newincludes, newexcludes):
1580 def setnarrowpats(newincludes, newexcludes):
1589 """Define the narrowspec for this repository."""
1581 """Define the narrowspec for this repository."""
1590
1582
1591 def __getitem__(changeid):
1583 def __getitem__(changeid):
1592 """Try to resolve a changectx."""
1584 """Try to resolve a changectx."""
1593
1585
1594 def __contains__(changeid):
1586 def __contains__(changeid):
1595 """Whether a changeset exists."""
1587 """Whether a changeset exists."""
1596
1588
1597 def __nonzero__():
1589 def __nonzero__():
1598 """Always returns True."""
1590 """Always returns True."""
1599 return True
1591 return True
1600
1592
1601 __bool__ = __nonzero__
1593 __bool__ = __nonzero__
1602
1594
1603 def __len__():
1595 def __len__():
1604 """Returns the number of changesets in the repo."""
1596 """Returns the number of changesets in the repo."""
1605
1597
1606 def __iter__():
1598 def __iter__():
1607 """Iterate over revisions in the changelog."""
1599 """Iterate over revisions in the changelog."""
1608
1600
1609 def revs(expr, *args):
1601 def revs(expr, *args):
1610 """Evaluate a revset.
1602 """Evaluate a revset.
1611
1603
1612 Emits revisions.
1604 Emits revisions.
1613 """
1605 """
1614
1606
1615 def set(expr, *args):
1607 def set(expr, *args):
1616 """Evaluate a revset.
1608 """Evaluate a revset.
1617
1609
1618 Emits changectx instances.
1610 Emits changectx instances.
1619 """
1611 """
1620
1612
1621 def anyrevs(specs, user=False, localalias=None):
1613 def anyrevs(specs, user=False, localalias=None):
1622 """Find revisions matching one of the given revsets."""
1614 """Find revisions matching one of the given revsets."""
1623
1615
1624 def url():
1616 def url():
1625 """Returns a string representing the location of this repo."""
1617 """Returns a string representing the location of this repo."""
1626
1618
1627 def hook(name, throw=False, **args):
1619 def hook(name, throw=False, **args):
1628 """Call a hook."""
1620 """Call a hook."""
1629
1621
1630 def tags():
1622 def tags():
1631 """Return a mapping of tag to node."""
1623 """Return a mapping of tag to node."""
1632
1624
1633 def tagtype(tagname):
1625 def tagtype(tagname):
1634 """Return the type of a given tag."""
1626 """Return the type of a given tag."""
1635
1627
1636 def tagslist():
1628 def tagslist():
1637 """Return a list of tags ordered by revision."""
1629 """Return a list of tags ordered by revision."""
1638
1630
1639 def nodetags(node):
1631 def nodetags(node):
1640 """Return the tags associated with a node."""
1632 """Return the tags associated with a node."""
1641
1633
1642 def nodebookmarks(node):
1634 def nodebookmarks(node):
1643 """Return the list of bookmarks pointing to the specified node."""
1635 """Return the list of bookmarks pointing to the specified node."""
1644
1636
1645 def branchmap():
1637 def branchmap():
1646 """Return a mapping of branch to heads in that branch."""
1638 """Return a mapping of branch to heads in that branch."""
1647
1639
1648 def revbranchcache():
1640 def revbranchcache():
1649 pass
1641 pass
1650
1642
1651 def branchtip(branchtip, ignoremissing=False):
1643 def branchtip(branchtip, ignoremissing=False):
1652 """Return the tip node for a given branch."""
1644 """Return the tip node for a given branch."""
1653
1645
1654 def lookup(key):
1646 def lookup(key):
1655 """Resolve the node for a revision."""
1647 """Resolve the node for a revision."""
1656
1648
1657 def lookupbranch(key):
1649 def lookupbranch(key):
1658 """Look up the branch name of the given revision or branch name."""
1650 """Look up the branch name of the given revision or branch name."""
1659
1651
1660 def known(nodes):
1652 def known(nodes):
1661 """Determine whether a series of nodes is known.
1653 """Determine whether a series of nodes is known.
1662
1654
1663 Returns a list of bools.
1655 Returns a list of bools.
1664 """
1656 """
1665
1657
1666 def local():
1658 def local():
1667 """Whether the repository is local."""
1659 """Whether the repository is local."""
1668 return True
1660 return True
1669
1661
1670 def publishing():
1662 def publishing():
1671 """Whether the repository is a publishing repository."""
1663 """Whether the repository is a publishing repository."""
1672
1664
1673 def cancopy():
1665 def cancopy():
1674 pass
1666 pass
1675
1667
1676 def shared():
1668 def shared():
1677 """The type of shared repository or None."""
1669 """The type of shared repository or None."""
1678
1670
1679 def wjoin(f, *insidef):
1671 def wjoin(f, *insidef):
1680 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1672 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1681
1673
1682 def setparents(p1, p2):
1674 def setparents(p1, p2):
1683 """Set the parent nodes of the working directory."""
1675 """Set the parent nodes of the working directory."""
1684
1676
1685 def filectx(path, changeid=None, fileid=None):
1677 def filectx(path, changeid=None, fileid=None):
1686 """Obtain a filectx for the given file revision."""
1678 """Obtain a filectx for the given file revision."""
1687
1679
1688 def getcwd():
1680 def getcwd():
1689 """Obtain the current working directory from the dirstate."""
1681 """Obtain the current working directory from the dirstate."""
1690
1682
1691 def pathto(f, cwd=None):
1683 def pathto(f, cwd=None):
1692 """Obtain the relative path to a file."""
1684 """Obtain the relative path to a file."""
1693
1685
1694 def adddatafilter(name, fltr):
1686 def adddatafilter(name, fltr):
1695 pass
1687 pass
1696
1688
1697 def wread(filename):
1689 def wread(filename):
1698 """Read a file from wvfs, using data filters."""
1690 """Read a file from wvfs, using data filters."""
1699
1691
1700 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1692 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1701 """Write data to a file in the wvfs, using data filters."""
1693 """Write data to a file in the wvfs, using data filters."""
1702
1694
1703 def wwritedata(filename, data):
1695 def wwritedata(filename, data):
1704 """Resolve data for writing to the wvfs, using data filters."""
1696 """Resolve data for writing to the wvfs, using data filters."""
1705
1697
1706 def currenttransaction():
1698 def currenttransaction():
1707 """Obtain the current transaction instance or None."""
1699 """Obtain the current transaction instance or None."""
1708
1700
1709 def transaction(desc, report=None):
1701 def transaction(desc, report=None):
1710 """Open a new transaction to write to the repository."""
1702 """Open a new transaction to write to the repository."""
1711
1703
1712 def undofiles():
1704 def undofiles():
1713 """Returns a list of (vfs, path) for files to undo transactions."""
1705 """Returns a list of (vfs, path) for files to undo transactions."""
1714
1706
1715 def recover():
1707 def recover():
1716 """Roll back an interrupted transaction."""
1708 """Roll back an interrupted transaction."""
1717
1709
1718 def rollback(dryrun=False, force=False):
1710 def rollback(dryrun=False, force=False):
1719 """Undo the last transaction.
1711 """Undo the last transaction.
1720
1712
1721 DANGEROUS.
1713 DANGEROUS.
1722 """
1714 """
1723
1715
1724 def updatecaches(tr=None, full=False):
1716 def updatecaches(tr=None, full=False):
1725 """Warm repo caches."""
1717 """Warm repo caches."""
1726
1718
1727 def invalidatecaches():
1719 def invalidatecaches():
1728 """Invalidate cached data due to the repository mutating."""
1720 """Invalidate cached data due to the repository mutating."""
1729
1721
1730 def invalidatevolatilesets():
1722 def invalidatevolatilesets():
1731 pass
1723 pass
1732
1724
1733 def invalidatedirstate():
1725 def invalidatedirstate():
1734 """Invalidate the dirstate."""
1726 """Invalidate the dirstate."""
1735
1727
1736 def invalidate(clearfilecache=False):
1728 def invalidate(clearfilecache=False):
1737 pass
1729 pass
1738
1730
1739 def invalidateall():
1731 def invalidateall():
1740 pass
1732 pass
1741
1733
1742 def lock(wait=True):
1734 def lock(wait=True):
1743 """Lock the repository store and return a lock instance."""
1735 """Lock the repository store and return a lock instance."""
1744
1736
1745 def wlock(wait=True):
1737 def wlock(wait=True):
1746 """Lock the non-store parts of the repository."""
1738 """Lock the non-store parts of the repository."""
1747
1739
1748 def currentwlock():
1740 def currentwlock():
1749 """Return the wlock if it's held or None."""
1741 """Return the wlock if it's held or None."""
1750
1742
1751 def checkcommitpatterns(wctx, match, status, fail):
1743 def checkcommitpatterns(wctx, match, status, fail):
1752 pass
1744 pass
1753
1745
1754 def commit(
1746 def commit(
1755 text=b'',
1747 text=b'',
1756 user=None,
1748 user=None,
1757 date=None,
1749 date=None,
1758 match=None,
1750 match=None,
1759 force=False,
1751 force=False,
1760 editor=False,
1752 editor=False,
1761 extra=None,
1753 extra=None,
1762 ):
1754 ):
1763 """Add a new revision to the repository."""
1755 """Add a new revision to the repository."""
1764
1756
1765 def commitctx(ctx, error=False, origctx=None):
1757 def commitctx(ctx, error=False, origctx=None):
1766 """Commit a commitctx instance to the repository."""
1758 """Commit a commitctx instance to the repository."""
1767
1759
1768 def destroying():
1760 def destroying():
1769 """Inform the repository that nodes are about to be destroyed."""
1761 """Inform the repository that nodes are about to be destroyed."""
1770
1762
1771 def destroyed():
1763 def destroyed():
1772 """Inform the repository that nodes have been destroyed."""
1764 """Inform the repository that nodes have been destroyed."""
1773
1765
1774 def status(
1766 def status(
1775 node1=b'.',
1767 node1=b'.',
1776 node2=None,
1768 node2=None,
1777 match=None,
1769 match=None,
1778 ignored=False,
1770 ignored=False,
1779 clean=False,
1771 clean=False,
1780 unknown=False,
1772 unknown=False,
1781 listsubrepos=False,
1773 listsubrepos=False,
1782 ):
1774 ):
1783 """Convenience method to call repo[x].status()."""
1775 """Convenience method to call repo[x].status()."""
1784
1776
1785 def addpostdsstatus(ps):
1777 def addpostdsstatus(ps):
1786 pass
1778 pass
1787
1779
1788 def postdsstatus():
1780 def postdsstatus():
1789 pass
1781 pass
1790
1782
1791 def clearpostdsstatus():
1783 def clearpostdsstatus():
1792 pass
1784 pass
1793
1785
1794 def heads(start=None):
1786 def heads(start=None):
1795 """Obtain list of nodes that are DAG heads."""
1787 """Obtain list of nodes that are DAG heads."""
1796
1788
1797 def branchheads(branch=None, start=None, closed=False):
1789 def branchheads(branch=None, start=None, closed=False):
1798 pass
1790 pass
1799
1791
1800 def branches(nodes):
1792 def branches(nodes):
1801 pass
1793 pass
1802
1794
1803 def between(pairs):
1795 def between(pairs):
1804 pass
1796 pass
1805
1797
1806 def checkpush(pushop):
1798 def checkpush(pushop):
1807 pass
1799 pass
1808
1800
1809 prepushoutgoinghooks = interfaceutil.Attribute("""util.hooks instance.""")
1801 prepushoutgoinghooks = interfaceutil.Attribute("""util.hooks instance.""")
1810
1802
1811 def pushkey(namespace, key, old, new):
1803 def pushkey(namespace, key, old, new):
1812 pass
1804 pass
1813
1805
1814 def listkeys(namespace):
1806 def listkeys(namespace):
1815 pass
1807 pass
1816
1808
1817 def debugwireargs(one, two, three=None, four=None, five=None):
1809 def debugwireargs(one, two, three=None, four=None, five=None):
1818 pass
1810 pass
1819
1811
1820 def savecommitmessage(text):
1812 def savecommitmessage(text):
1821 pass
1813 pass
1822
1814
1823
1815
1824 class completelocalrepository(
1816 class completelocalrepository(
1825 ilocalrepositorymain, ilocalrepositoryfilestorage
1817 ilocalrepositorymain, ilocalrepositoryfilestorage
1826 ):
1818 ):
1827 """Complete interface for a local repository."""
1819 """Complete interface for a local repository."""
1828
1820
1829
1821
1830 class iwireprotocolcommandcacher(interfaceutil.Interface):
1822 class iwireprotocolcommandcacher(interfaceutil.Interface):
1831 """Represents a caching backend for wire protocol commands.
1823 """Represents a caching backend for wire protocol commands.
1832
1824
1833 Wire protocol version 2 supports transparent caching of many commands.
1825 Wire protocol version 2 supports transparent caching of many commands.
1834 To leverage this caching, servers can activate objects that cache
1826 To leverage this caching, servers can activate objects that cache
1835 command responses. Objects handle both cache writing and reading.
1827 command responses. Objects handle both cache writing and reading.
1836 This interface defines how that response caching mechanism works.
1828 This interface defines how that response caching mechanism works.
1837
1829
1838 Wire protocol version 2 commands emit a series of objects that are
1830 Wire protocol version 2 commands emit a series of objects that are
1839 serialized and sent to the client. The caching layer exists between
1831 serialized and sent to the client. The caching layer exists between
1840 the invocation of the command function and the sending of its output
1832 the invocation of the command function and the sending of its output
1841 objects to an output layer.
1833 objects to an output layer.
1842
1834
1843 Instances of this interface represent a binding to a cache that
1835 Instances of this interface represent a binding to a cache that
1844 can serve a response (in place of calling a command function) and/or
1836 can serve a response (in place of calling a command function) and/or
1845 write responses to a cache for subsequent use.
1837 write responses to a cache for subsequent use.
1846
1838
1847 When a command request arrives, the following happens with regards
1839 When a command request arrives, the following happens with regards
1848 to this interface:
1840 to this interface:
1849
1841
1850 1. The server determines whether the command request is cacheable.
1842 1. The server determines whether the command request is cacheable.
1851 2. If it is, an instance of this interface is spawned.
1843 2. If it is, an instance of this interface is spawned.
1852 3. The cacher is activated in a context manager (``__enter__`` is called).
1844 3. The cacher is activated in a context manager (``__enter__`` is called).
1853 4. A cache *key* for that request is derived. This will call the
1845 4. A cache *key* for that request is derived. This will call the
1854 instance's ``adjustcachekeystate()`` method so the derivation
1846 instance's ``adjustcachekeystate()`` method so the derivation
1855 can be influenced.
1847 can be influenced.
1856 5. The cacher is informed of the derived cache key via a call to
1848 5. The cacher is informed of the derived cache key via a call to
1857 ``setcachekey()``.
1849 ``setcachekey()``.
1858 6. The cacher's ``lookup()`` method is called to test for presence of
1850 6. The cacher's ``lookup()`` method is called to test for presence of
1859 the derived key in the cache.
1851 the derived key in the cache.
1860 7. If ``lookup()`` returns a hit, that cached result is used in place
1852 7. If ``lookup()`` returns a hit, that cached result is used in place
1861 of invoking the command function. ``__exit__`` is called and the instance
1853 of invoking the command function. ``__exit__`` is called and the instance
1862 is discarded.
1854 is discarded.
1863 8. The command function is invoked.
1855 8. The command function is invoked.
1864 9. ``onobject()`` is called for each object emitted by the command
1856 9. ``onobject()`` is called for each object emitted by the command
1865 function.
1857 function.
1866 10. After the final object is seen, ``onfinished()`` is called.
1858 10. After the final object is seen, ``onfinished()`` is called.
1867 11. ``__exit__`` is called to signal the end of use of the instance.
1859 11. ``__exit__`` is called to signal the end of use of the instance.
1868
1860
1869 Cache *key* derivation can be influenced by the instance.
1861 Cache *key* derivation can be influenced by the instance.
1870
1862
1871 Cache keys are initially derived by a deterministic representation of
1863 Cache keys are initially derived by a deterministic representation of
1872 the command request. This includes the command name, arguments, protocol
1864 the command request. This includes the command name, arguments, protocol
1873 version, etc. This initial key derivation is performed by CBOR-encoding a
1865 version, etc. This initial key derivation is performed by CBOR-encoding a
1874 data structure and feeding that output into a hasher.
1866 data structure and feeding that output into a hasher.
1875
1867
1876 Instances of this interface can influence this initial key derivation
1868 Instances of this interface can influence this initial key derivation
1877 via ``adjustcachekeystate()``.
1869 via ``adjustcachekeystate()``.
1878
1870
1879 The instance is informed of the derived cache key via a call to
1871 The instance is informed of the derived cache key via a call to
1880 ``setcachekey()``. The instance must store the key locally so it can
1872 ``setcachekey()``. The instance must store the key locally so it can
1881 be consulted on subsequent operations that may require it.
1873 be consulted on subsequent operations that may require it.
1882
1874
1883 When constructed, the instance has access to a callable that can be used
1875 When constructed, the instance has access to a callable that can be used
1884 for encoding response objects. This callable receives as its single
1876 for encoding response objects. This callable receives as its single
1885 argument an object emitted by a command function. It returns an iterable
1877 argument an object emitted by a command function. It returns an iterable
1886 of bytes chunks representing the encoded object. Unless the cacher is
1878 of bytes chunks representing the encoded object. Unless the cacher is
1887 caching native Python objects in memory or has a way of reconstructing
1879 caching native Python objects in memory or has a way of reconstructing
1888 the original Python objects, implementations typically call this function
1880 the original Python objects, implementations typically call this function
1889 to produce bytes from the output objects and then store those bytes in
1881 to produce bytes from the output objects and then store those bytes in
1890 the cache. When it comes time to re-emit those bytes, they are wrapped
1882 the cache. When it comes time to re-emit those bytes, they are wrapped
1891 in a ``wireprototypes.encodedresponse`` instance to tell the output
1883 in a ``wireprototypes.encodedresponse`` instance to tell the output
1892 layer that they are pre-encoded.
1884 layer that they are pre-encoded.
1893
1885
1894 When receiving the objects emitted by the command function, instances
1886 When receiving the objects emitted by the command function, instances
1895 can choose what to do with those objects. The simplest thing to do is
1887 can choose what to do with those objects. The simplest thing to do is
1896 re-emit the original objects. They will be forwarded to the output
1888 re-emit the original objects. They will be forwarded to the output
1897 layer and will be processed as if the cacher did not exist.
1889 layer and will be processed as if the cacher did not exist.
1898
1890
1899 Implementations could also choose to not emit objects - instead locally
1891 Implementations could also choose to not emit objects - instead locally
1900 buffering objects or their encoded representation. They could then emit
1892 buffering objects or their encoded representation. They could then emit
1901 a single "coalesced" object when ``onfinished()`` is called. In
1893 a single "coalesced" object when ``onfinished()`` is called. In
1902 this way, the implementation would function as a filtering layer of
1894 this way, the implementation would function as a filtering layer of
1903 sorts.
1895 sorts.
1904
1896
1905 When caching objects, typically the encoded form of the object will
1897 When caching objects, typically the encoded form of the object will
1906 be stored. Keep in mind that if the original object is forwarded to
1898 be stored. Keep in mind that if the original object is forwarded to
1907 the output layer, it will need to be encoded there as well. For large
1899 the output layer, it will need to be encoded there as well. For large
1908 output, this redundant encoding could add overhead. Implementations
1900 output, this redundant encoding could add overhead. Implementations
1909 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1901 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1910 instances to avoid this overhead.
1902 instances to avoid this overhead.
1911 """
1903 """
1912
1904
1913 def __enter__():
1905 def __enter__():
1914 """Marks the instance as active.
1906 """Marks the instance as active.
1915
1907
1916 Should return self.
1908 Should return self.
1917 """
1909 """
1918
1910
1919 def __exit__(exctype, excvalue, exctb):
1911 def __exit__(exctype, excvalue, exctb):
1920 """Called when cacher is no longer used.
1912 """Called when cacher is no longer used.
1921
1913
1922 This can be used by implementations to perform cleanup actions (e.g.
1914 This can be used by implementations to perform cleanup actions (e.g.
1923 disconnecting network sockets, aborting a partially cached response.
1915 disconnecting network sockets, aborting a partially cached response.
1924 """
1916 """
1925
1917
1926 def adjustcachekeystate(state):
1918 def adjustcachekeystate(state):
1927 """Influences cache key derivation by adjusting state to derive key.
1919 """Influences cache key derivation by adjusting state to derive key.
1928
1920
1929 A dict defining the state used to derive the cache key is passed.
1921 A dict defining the state used to derive the cache key is passed.
1930
1922
1931 Implementations can modify this dict to record additional state that
1923 Implementations can modify this dict to record additional state that
1932 is wanted to influence key derivation.
1924 is wanted to influence key derivation.
1933
1925
1934 Implementations are *highly* encouraged to not modify or delete
1926 Implementations are *highly* encouraged to not modify or delete
1935 existing keys.
1927 existing keys.
1936 """
1928 """
1937
1929
1938 def setcachekey(key):
1930 def setcachekey(key):
1939 """Record the derived cache key for this request.
1931 """Record the derived cache key for this request.
1940
1932
1941 Instances may mutate the key for internal usage, as desired. e.g.
1933 Instances may mutate the key for internal usage, as desired. e.g.
1942 instances may wish to prepend the repo name, introduce path
1934 instances may wish to prepend the repo name, introduce path
1943 components for filesystem or URL addressing, etc. Behavior is up to
1935 components for filesystem or URL addressing, etc. Behavior is up to
1944 the cache.
1936 the cache.
1945
1937
1946 Returns a bool indicating if the request is cacheable by this
1938 Returns a bool indicating if the request is cacheable by this
1947 instance.
1939 instance.
1948 """
1940 """
1949
1941
1950 def lookup():
1942 def lookup():
1951 """Attempt to resolve an entry in the cache.
1943 """Attempt to resolve an entry in the cache.
1952
1944
1953 The instance is instructed to look for the cache key that it was
1945 The instance is instructed to look for the cache key that it was
1954 informed about via the call to ``setcachekey()``.
1946 informed about via the call to ``setcachekey()``.
1955
1947
1956 If there's no cache hit or the cacher doesn't wish to use the cached
1948 If there's no cache hit or the cacher doesn't wish to use the cached
1957 entry, ``None`` should be returned.
1949 entry, ``None`` should be returned.
1958
1950
1959 Else, a dict defining the cached result should be returned. The
1951 Else, a dict defining the cached result should be returned. The
1960 dict may have the following keys:
1952 dict may have the following keys:
1961
1953
1962 objs
1954 objs
1963 An iterable of objects that should be sent to the client. That
1955 An iterable of objects that should be sent to the client. That
1964 iterable of objects is expected to be what the command function
1956 iterable of objects is expected to be what the command function
1965 would return if invoked or an equivalent representation thereof.
1957 would return if invoked or an equivalent representation thereof.
1966 """
1958 """
1967
1959
1968 def onobject(obj):
1960 def onobject(obj):
1969 """Called when a new object is emitted from the command function.
1961 """Called when a new object is emitted from the command function.
1970
1962
1971 Receives as its argument the object that was emitted from the
1963 Receives as its argument the object that was emitted from the
1972 command function.
1964 command function.
1973
1965
1974 This method returns an iterator of objects to forward to the output
1966 This method returns an iterator of objects to forward to the output
1975 layer. The easiest implementation is a generator that just
1967 layer. The easiest implementation is a generator that just
1976 ``yield obj``.
1968 ``yield obj``.
1977 """
1969 """
1978
1970
1979 def onfinished():
1971 def onfinished():
1980 """Called after all objects have been emitted from the command function.
1972 """Called after all objects have been emitted from the command function.
1981
1973
1982 Implementations should return an iterator of objects to forward to
1974 Implementations should return an iterator of objects to forward to
1983 the output layer.
1975 the output layer.
1984
1976
1985 This method can be a generator.
1977 This method can be a generator.
1986 """
1978 """
@@ -1,2273 +1,2261
1 # manifest.py - manifest revision class for mercurial
1 # manifest.py - manifest revision class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11 import itertools
11 import itertools
12 import struct
12 import struct
13 import weakref
13 import weakref
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 bin,
17 bin,
18 hex,
18 hex,
19 nullid,
19 nullid,
20 nullrev,
20 nullrev,
21 )
21 )
22 from .pycompat import getattr
22 from .pycompat import getattr
23 from . import (
23 from . import (
24 error,
24 error,
25 mdiff,
25 mdiff,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 revlog,
29 revlog,
30 util,
30 util,
31 )
31 )
32 from .interfaces import (
32 from .interfaces import (
33 repository,
33 repository,
34 util as interfaceutil,
34 util as interfaceutil,
35 )
35 )
36
36
37 parsers = policy.importmod('parsers')
37 parsers = policy.importmod('parsers')
38 propertycache = util.propertycache
38 propertycache = util.propertycache
39
39
40 # Allow tests to more easily test the alternate path in manifestdict.fastdelta()
40 # Allow tests to more easily test the alternate path in manifestdict.fastdelta()
41 FASTDELTA_TEXTDIFF_THRESHOLD = 1000
41 FASTDELTA_TEXTDIFF_THRESHOLD = 1000
42
42
43
43
44 def _parse(data):
44 def _parse(data):
45 # This method does a little bit of excessive-looking
45 # This method does a little bit of excessive-looking
46 # precondition checking. This is so that the behavior of this
46 # precondition checking. This is so that the behavior of this
47 # class exactly matches its C counterpart to try and help
47 # class exactly matches its C counterpart to try and help
48 # prevent surprise breakage for anyone that develops against
48 # prevent surprise breakage for anyone that develops against
49 # the pure version.
49 # the pure version.
50 if data and data[-1:] != b'\n':
50 if data and data[-1:] != b'\n':
51 raise ValueError(b'Manifest did not end in a newline.')
51 raise ValueError(b'Manifest did not end in a newline.')
52 prev = None
52 prev = None
53 for l in data.splitlines():
53 for l in data.splitlines():
54 if prev is not None and prev > l:
54 if prev is not None and prev > l:
55 raise ValueError(b'Manifest lines not in sorted order.')
55 raise ValueError(b'Manifest lines not in sorted order.')
56 prev = l
56 prev = l
57 f, n = l.split(b'\0')
57 f, n = l.split(b'\0')
58 if len(n) > 40:
58 if len(n) > 40:
59 yield f, bin(n[:40]), n[40:]
59 yield f, bin(n[:40]), n[40:]
60 else:
60 else:
61 yield f, bin(n), b''
61 yield f, bin(n), b''
62
62
63
63
64 def _text(it):
64 def _text(it):
65 files = []
65 files = []
66 lines = []
66 lines = []
67 for f, n, fl in it:
67 for f, n, fl in it:
68 files.append(f)
68 files.append(f)
69 # if this is changed to support newlines in filenames,
69 # if this is changed to support newlines in filenames,
70 # be sure to check the templates/ dir again (especially *-raw.tmpl)
70 # be sure to check the templates/ dir again (especially *-raw.tmpl)
71 lines.append(b"%s\0%s%s\n" % (f, hex(n), fl))
71 lines.append(b"%s\0%s%s\n" % (f, hex(n), fl))
72
72
73 _checkforbidden(files)
73 _checkforbidden(files)
74 return b''.join(lines)
74 return b''.join(lines)
75
75
76
76
77 class lazymanifestiter(object):
77 class lazymanifestiter(object):
78 def __init__(self, lm):
78 def __init__(self, lm):
79 self.pos = 0
79 self.pos = 0
80 self.lm = lm
80 self.lm = lm
81
81
82 def __iter__(self):
82 def __iter__(self):
83 return self
83 return self
84
84
85 def next(self):
85 def next(self):
86 try:
86 try:
87 data, pos = self.lm._get(self.pos)
87 data, pos = self.lm._get(self.pos)
88 except IndexError:
88 except IndexError:
89 raise StopIteration
89 raise StopIteration
90 if pos == -1:
90 if pos == -1:
91 self.pos += 1
91 self.pos += 1
92 return data[0]
92 return data[0]
93 self.pos += 1
93 self.pos += 1
94 zeropos = data.find(b'\x00', pos)
94 zeropos = data.find(b'\x00', pos)
95 return data[pos:zeropos]
95 return data[pos:zeropos]
96
96
97 __next__ = next
97 __next__ = next
98
98
99
99
100 class lazymanifestiterentries(object):
100 class lazymanifestiterentries(object):
101 def __init__(self, lm):
101 def __init__(self, lm):
102 self.lm = lm
102 self.lm = lm
103 self.pos = 0
103 self.pos = 0
104
104
105 def __iter__(self):
105 def __iter__(self):
106 return self
106 return self
107
107
108 def next(self):
108 def next(self):
109 try:
109 try:
110 data, pos = self.lm._get(self.pos)
110 data, pos = self.lm._get(self.pos)
111 except IndexError:
111 except IndexError:
112 raise StopIteration
112 raise StopIteration
113 if pos == -1:
113 if pos == -1:
114 self.pos += 1
114 self.pos += 1
115 return data
115 return data
116 zeropos = data.find(b'\x00', pos)
116 zeropos = data.find(b'\x00', pos)
117 hashval = unhexlify(data, self.lm.extrainfo[self.pos], zeropos + 1, 40)
117 hashval = unhexlify(data, self.lm.extrainfo[self.pos], zeropos + 1, 40)
118 flags = self.lm._getflags(data, self.pos, zeropos)
118 flags = self.lm._getflags(data, self.pos, zeropos)
119 self.pos += 1
119 self.pos += 1
120 return (data[pos:zeropos], hashval, flags)
120 return (data[pos:zeropos], hashval, flags)
121
121
122 __next__ = next
122 __next__ = next
123
123
124
124
125 def unhexlify(data, extra, pos, length):
125 def unhexlify(data, extra, pos, length):
126 s = bin(data[pos : pos + length])
126 s = bin(data[pos : pos + length])
127 if extra:
127 if extra:
128 s += chr(extra & 0xFF)
128 s += chr(extra & 0xFF)
129 return s
129 return s
130
130
131
131
132 def _cmp(a, b):
132 def _cmp(a, b):
133 return (a > b) - (a < b)
133 return (a > b) - (a < b)
134
134
135
135
136 class _lazymanifest(object):
136 class _lazymanifest(object):
137 """A pure python manifest backed by a byte string. It is supplimented with
137 """A pure python manifest backed by a byte string. It is supplimented with
138 internal lists as it is modified, until it is compacted back to a pure byte
138 internal lists as it is modified, until it is compacted back to a pure byte
139 string.
139 string.
140
140
141 ``data`` is the initial manifest data.
141 ``data`` is the initial manifest data.
142
142
143 ``positions`` is a list of offsets, one per manifest entry. Positive
143 ``positions`` is a list of offsets, one per manifest entry. Positive
144 values are offsets into ``data``, negative values are offsets into the
144 values are offsets into ``data``, negative values are offsets into the
145 ``extradata`` list. When an entry is removed, its entry is dropped from
145 ``extradata`` list. When an entry is removed, its entry is dropped from
146 ``positions``. The values are encoded such that when walking the list and
146 ``positions``. The values are encoded such that when walking the list and
147 indexing into ``data`` or ``extradata`` as appropriate, the entries are
147 indexing into ``data`` or ``extradata`` as appropriate, the entries are
148 sorted by filename.
148 sorted by filename.
149
149
150 ``extradata`` is a list of (key, hash, flags) for entries that were added or
150 ``extradata`` is a list of (key, hash, flags) for entries that were added or
151 modified since the manifest was created or compacted.
151 modified since the manifest was created or compacted.
152 """
152 """
153
153
154 def __init__(
154 def __init__(
155 self,
155 self,
156 data,
156 data,
157 positions=None,
157 positions=None,
158 extrainfo=None,
158 extrainfo=None,
159 extradata=None,
159 extradata=None,
160 hasremovals=False,
160 hasremovals=False,
161 ):
161 ):
162 if positions is None:
162 if positions is None:
163 self.positions = self.findlines(data)
163 self.positions = self.findlines(data)
164 self.extrainfo = [0] * len(self.positions)
164 self.extrainfo = [0] * len(self.positions)
165 self.data = data
165 self.data = data
166 self.extradata = []
166 self.extradata = []
167 self.hasremovals = False
167 self.hasremovals = False
168 else:
168 else:
169 self.positions = positions[:]
169 self.positions = positions[:]
170 self.extrainfo = extrainfo[:]
170 self.extrainfo = extrainfo[:]
171 self.extradata = extradata[:]
171 self.extradata = extradata[:]
172 self.data = data
172 self.data = data
173 self.hasremovals = hasremovals
173 self.hasremovals = hasremovals
174
174
175 def findlines(self, data):
175 def findlines(self, data):
176 if not data:
176 if not data:
177 return []
177 return []
178 pos = data.find(b"\n")
178 pos = data.find(b"\n")
179 if pos == -1 or data[-1:] != b'\n':
179 if pos == -1 or data[-1:] != b'\n':
180 raise ValueError(b"Manifest did not end in a newline.")
180 raise ValueError(b"Manifest did not end in a newline.")
181 positions = [0]
181 positions = [0]
182 prev = data[: data.find(b'\x00')]
182 prev = data[: data.find(b'\x00')]
183 while pos < len(data) - 1 and pos != -1:
183 while pos < len(data) - 1 and pos != -1:
184 positions.append(pos + 1)
184 positions.append(pos + 1)
185 nexts = data[pos + 1 : data.find(b'\x00', pos + 1)]
185 nexts = data[pos + 1 : data.find(b'\x00', pos + 1)]
186 if nexts < prev:
186 if nexts < prev:
187 raise ValueError(b"Manifest lines not in sorted order.")
187 raise ValueError(b"Manifest lines not in sorted order.")
188 prev = nexts
188 prev = nexts
189 pos = data.find(b"\n", pos + 1)
189 pos = data.find(b"\n", pos + 1)
190 return positions
190 return positions
191
191
192 def _get(self, index):
192 def _get(self, index):
193 # get the position encoded in pos:
193 # get the position encoded in pos:
194 # positive number is an index in 'data'
194 # positive number is an index in 'data'
195 # negative number is in extrapieces
195 # negative number is in extrapieces
196 pos = self.positions[index]
196 pos = self.positions[index]
197 if pos >= 0:
197 if pos >= 0:
198 return self.data, pos
198 return self.data, pos
199 return self.extradata[-pos - 1], -1
199 return self.extradata[-pos - 1], -1
200
200
201 def _getkey(self, pos):
201 def _getkey(self, pos):
202 if pos >= 0:
202 if pos >= 0:
203 return self.data[pos : self.data.find(b'\x00', pos + 1)]
203 return self.data[pos : self.data.find(b'\x00', pos + 1)]
204 return self.extradata[-pos - 1][0]
204 return self.extradata[-pos - 1][0]
205
205
206 def bsearch(self, key):
206 def bsearch(self, key):
207 first = 0
207 first = 0
208 last = len(self.positions) - 1
208 last = len(self.positions) - 1
209
209
210 while first <= last:
210 while first <= last:
211 midpoint = (first + last) // 2
211 midpoint = (first + last) // 2
212 nextpos = self.positions[midpoint]
212 nextpos = self.positions[midpoint]
213 candidate = self._getkey(nextpos)
213 candidate = self._getkey(nextpos)
214 r = _cmp(key, candidate)
214 r = _cmp(key, candidate)
215 if r == 0:
215 if r == 0:
216 return midpoint
216 return midpoint
217 else:
217 else:
218 if r < 0:
218 if r < 0:
219 last = midpoint - 1
219 last = midpoint - 1
220 else:
220 else:
221 first = midpoint + 1
221 first = midpoint + 1
222 return -1
222 return -1
223
223
224 def bsearch2(self, key):
224 def bsearch2(self, key):
225 # same as the above, but will always return the position
225 # same as the above, but will always return the position
226 # done for performance reasons
226 # done for performance reasons
227 first = 0
227 first = 0
228 last = len(self.positions) - 1
228 last = len(self.positions) - 1
229
229
230 while first <= last:
230 while first <= last:
231 midpoint = (first + last) // 2
231 midpoint = (first + last) // 2
232 nextpos = self.positions[midpoint]
232 nextpos = self.positions[midpoint]
233 candidate = self._getkey(nextpos)
233 candidate = self._getkey(nextpos)
234 r = _cmp(key, candidate)
234 r = _cmp(key, candidate)
235 if r == 0:
235 if r == 0:
236 return (midpoint, True)
236 return (midpoint, True)
237 else:
237 else:
238 if r < 0:
238 if r < 0:
239 last = midpoint - 1
239 last = midpoint - 1
240 else:
240 else:
241 first = midpoint + 1
241 first = midpoint + 1
242 return (first, False)
242 return (first, False)
243
243
244 def __contains__(self, key):
244 def __contains__(self, key):
245 return self.bsearch(key) != -1
245 return self.bsearch(key) != -1
246
246
247 def _getflags(self, data, needle, pos):
247 def _getflags(self, data, needle, pos):
248 start = pos + 41
248 start = pos + 41
249 end = data.find(b"\n", start)
249 end = data.find(b"\n", start)
250 if end == -1:
250 if end == -1:
251 end = len(data) - 1
251 end = len(data) - 1
252 if start == end:
252 if start == end:
253 return b''
253 return b''
254 return self.data[start:end]
254 return self.data[start:end]
255
255
256 def __getitem__(self, key):
256 def __getitem__(self, key):
257 if not isinstance(key, bytes):
257 if not isinstance(key, bytes):
258 raise TypeError(b"getitem: manifest keys must be a bytes.")
258 raise TypeError(b"getitem: manifest keys must be a bytes.")
259 needle = self.bsearch(key)
259 needle = self.bsearch(key)
260 if needle == -1:
260 if needle == -1:
261 raise KeyError
261 raise KeyError
262 data, pos = self._get(needle)
262 data, pos = self._get(needle)
263 if pos == -1:
263 if pos == -1:
264 return (data[1], data[2])
264 return (data[1], data[2])
265 zeropos = data.find(b'\x00', pos)
265 zeropos = data.find(b'\x00', pos)
266 assert 0 <= needle <= len(self.positions)
266 assert 0 <= needle <= len(self.positions)
267 assert len(self.extrainfo) == len(self.positions)
267 assert len(self.extrainfo) == len(self.positions)
268 hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, 40)
268 hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, 40)
269 flags = self._getflags(data, needle, zeropos)
269 flags = self._getflags(data, needle, zeropos)
270 return (hashval, flags)
270 return (hashval, flags)
271
271
272 def __delitem__(self, key):
272 def __delitem__(self, key):
273 needle, found = self.bsearch2(key)
273 needle, found = self.bsearch2(key)
274 if not found:
274 if not found:
275 raise KeyError
275 raise KeyError
276 cur = self.positions[needle]
276 cur = self.positions[needle]
277 self.positions = self.positions[:needle] + self.positions[needle + 1 :]
277 self.positions = self.positions[:needle] + self.positions[needle + 1 :]
278 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1 :]
278 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1 :]
279 if cur >= 0:
279 if cur >= 0:
280 # This does NOT unsort the list as far as the search functions are
280 # This does NOT unsort the list as far as the search functions are
281 # concerned, as they only examine lines mapped by self.positions.
281 # concerned, as they only examine lines mapped by self.positions.
282 self.data = self.data[:cur] + b'\x00' + self.data[cur + 1 :]
282 self.data = self.data[:cur] + b'\x00' + self.data[cur + 1 :]
283 self.hasremovals = True
283 self.hasremovals = True
284
284
285 def __setitem__(self, key, value):
285 def __setitem__(self, key, value):
286 if not isinstance(key, bytes):
286 if not isinstance(key, bytes):
287 raise TypeError(b"setitem: manifest keys must be a byte string.")
287 raise TypeError(b"setitem: manifest keys must be a byte string.")
288 if not isinstance(value, tuple) or len(value) != 2:
288 if not isinstance(value, tuple) or len(value) != 2:
289 raise TypeError(
289 raise TypeError(
290 b"Manifest values must be a tuple of (node, flags)."
290 b"Manifest values must be a tuple of (node, flags)."
291 )
291 )
292 hashval = value[0]
292 hashval = value[0]
293 if not isinstance(hashval, bytes) or not 20 <= len(hashval) <= 22:
293 if not isinstance(hashval, bytes) or not 20 <= len(hashval) <= 22:
294 raise TypeError(b"node must be a 20-byte byte string")
294 raise TypeError(b"node must be a 20-byte byte string")
295 flags = value[1]
295 flags = value[1]
296 if len(hashval) == 22:
296 if len(hashval) == 22:
297 hashval = hashval[:-1]
297 hashval = hashval[:-1]
298 if not isinstance(flags, bytes) or len(flags) > 1:
298 if not isinstance(flags, bytes) or len(flags) > 1:
299 raise TypeError(b"flags must a 0 or 1 byte string, got %r", flags)
299 raise TypeError(b"flags must a 0 or 1 byte string, got %r", flags)
300 needle, found = self.bsearch2(key)
300 needle, found = self.bsearch2(key)
301 if found:
301 if found:
302 # put the item
302 # put the item
303 pos = self.positions[needle]
303 pos = self.positions[needle]
304 if pos < 0:
304 if pos < 0:
305 self.extradata[-pos - 1] = (key, hashval, value[1])
305 self.extradata[-pos - 1] = (key, hashval, value[1])
306 else:
306 else:
307 # just don't bother
307 # just don't bother
308 self.extradata.append((key, hashval, value[1]))
308 self.extradata.append((key, hashval, value[1]))
309 self.positions[needle] = -len(self.extradata)
309 self.positions[needle] = -len(self.extradata)
310 else:
310 else:
311 # not found, put it in with extra positions
311 # not found, put it in with extra positions
312 self.extradata.append((key, hashval, value[1]))
312 self.extradata.append((key, hashval, value[1]))
313 self.positions = (
313 self.positions = (
314 self.positions[:needle]
314 self.positions[:needle]
315 + [-len(self.extradata)]
315 + [-len(self.extradata)]
316 + self.positions[needle:]
316 + self.positions[needle:]
317 )
317 )
318 self.extrainfo = (
318 self.extrainfo = (
319 self.extrainfo[:needle] + [0] + self.extrainfo[needle:]
319 self.extrainfo[:needle] + [0] + self.extrainfo[needle:]
320 )
320 )
321
321
322 def copy(self):
322 def copy(self):
323 # XXX call _compact like in C?
323 # XXX call _compact like in C?
324 return _lazymanifest(
324 return _lazymanifest(
325 self.data,
325 self.data,
326 self.positions,
326 self.positions,
327 self.extrainfo,
327 self.extrainfo,
328 self.extradata,
328 self.extradata,
329 self.hasremovals,
329 self.hasremovals,
330 )
330 )
331
331
332 def _compact(self):
332 def _compact(self):
333 # hopefully not called TOO often
333 # hopefully not called TOO often
334 if len(self.extradata) == 0 and not self.hasremovals:
334 if len(self.extradata) == 0 and not self.hasremovals:
335 return
335 return
336 l = []
336 l = []
337 i = 0
337 i = 0
338 offset = 0
338 offset = 0
339 self.extrainfo = [0] * len(self.positions)
339 self.extrainfo = [0] * len(self.positions)
340 while i < len(self.positions):
340 while i < len(self.positions):
341 if self.positions[i] >= 0:
341 if self.positions[i] >= 0:
342 cur = self.positions[i]
342 cur = self.positions[i]
343 last_cut = cur
343 last_cut = cur
344
344
345 # Collect all contiguous entries in the buffer at the current
345 # Collect all contiguous entries in the buffer at the current
346 # offset, breaking out only for added/modified items held in
346 # offset, breaking out only for added/modified items held in
347 # extradata, or a deleted line prior to the next position.
347 # extradata, or a deleted line prior to the next position.
348 while True:
348 while True:
349 self.positions[i] = offset
349 self.positions[i] = offset
350 i += 1
350 i += 1
351 if i == len(self.positions) or self.positions[i] < 0:
351 if i == len(self.positions) or self.positions[i] < 0:
352 break
352 break
353
353
354 # A removed file has no positions[] entry, but does have an
354 # A removed file has no positions[] entry, but does have an
355 # overwritten first byte. Break out and find the end of the
355 # overwritten first byte. Break out and find the end of the
356 # current good entry/entries if there is a removed file
356 # current good entry/entries if there is a removed file
357 # before the next position.
357 # before the next position.
358 if (
358 if (
359 self.hasremovals
359 self.hasremovals
360 and self.data.find(b'\n\x00', cur, self.positions[i])
360 and self.data.find(b'\n\x00', cur, self.positions[i])
361 != -1
361 != -1
362 ):
362 ):
363 break
363 break
364
364
365 offset += self.positions[i] - cur
365 offset += self.positions[i] - cur
366 cur = self.positions[i]
366 cur = self.positions[i]
367 end_cut = self.data.find(b'\n', cur)
367 end_cut = self.data.find(b'\n', cur)
368 if end_cut != -1:
368 if end_cut != -1:
369 end_cut += 1
369 end_cut += 1
370 offset += end_cut - cur
370 offset += end_cut - cur
371 l.append(self.data[last_cut:end_cut])
371 l.append(self.data[last_cut:end_cut])
372 else:
372 else:
373 while i < len(self.positions) and self.positions[i] < 0:
373 while i < len(self.positions) and self.positions[i] < 0:
374 cur = self.positions[i]
374 cur = self.positions[i]
375 t = self.extradata[-cur - 1]
375 t = self.extradata[-cur - 1]
376 l.append(self._pack(t))
376 l.append(self._pack(t))
377 self.positions[i] = offset
377 self.positions[i] = offset
378 if len(t[1]) > 20:
378 if len(t[1]) > 20:
379 self.extrainfo[i] = ord(t[1][21])
379 self.extrainfo[i] = ord(t[1][21])
380 offset += len(l[-1])
380 offset += len(l[-1])
381 i += 1
381 i += 1
382 self.data = b''.join(l)
382 self.data = b''.join(l)
383 self.hasremovals = False
383 self.hasremovals = False
384 self.extradata = []
384 self.extradata = []
385
385
386 def _pack(self, d):
386 def _pack(self, d):
387 return d[0] + b'\x00' + hex(d[1][:20]) + d[2] + b'\n'
387 return d[0] + b'\x00' + hex(d[1][:20]) + d[2] + b'\n'
388
388
389 def text(self):
389 def text(self):
390 self._compact()
390 self._compact()
391 return self.data
391 return self.data
392
392
393 def diff(self, m2, clean=False):
393 def diff(self, m2, clean=False):
394 '''Finds changes between the current manifest and m2.'''
394 '''Finds changes between the current manifest and m2.'''
395 # XXX think whether efficiency matters here
395 # XXX think whether efficiency matters here
396 diff = {}
396 diff = {}
397
397
398 for fn, e1, flags in self.iterentries():
398 for fn, e1, flags in self.iterentries():
399 if fn not in m2:
399 if fn not in m2:
400 diff[fn] = (e1, flags), (None, b'')
400 diff[fn] = (e1, flags), (None, b'')
401 else:
401 else:
402 e2 = m2[fn]
402 e2 = m2[fn]
403 if (e1, flags) != e2:
403 if (e1, flags) != e2:
404 diff[fn] = (e1, flags), e2
404 diff[fn] = (e1, flags), e2
405 elif clean:
405 elif clean:
406 diff[fn] = None
406 diff[fn] = None
407
407
408 for fn, e2, flags in m2.iterentries():
408 for fn, e2, flags in m2.iterentries():
409 if fn not in self:
409 if fn not in self:
410 diff[fn] = (None, b''), (e2, flags)
410 diff[fn] = (None, b''), (e2, flags)
411
411
412 return diff
412 return diff
413
413
414 def iterentries(self):
414 def iterentries(self):
415 return lazymanifestiterentries(self)
415 return lazymanifestiterentries(self)
416
416
417 def iterkeys(self):
417 def iterkeys(self):
418 return lazymanifestiter(self)
418 return lazymanifestiter(self)
419
419
420 def __iter__(self):
420 def __iter__(self):
421 return lazymanifestiter(self)
421 return lazymanifestiter(self)
422
422
423 def __len__(self):
423 def __len__(self):
424 return len(self.positions)
424 return len(self.positions)
425
425
426 def filtercopy(self, filterfn):
426 def filtercopy(self, filterfn):
427 # XXX should be optimized
427 # XXX should be optimized
428 c = _lazymanifest(b'')
428 c = _lazymanifest(b'')
429 for f, n, fl in self.iterentries():
429 for f, n, fl in self.iterentries():
430 if filterfn(f):
430 if filterfn(f):
431 c[f] = n, fl
431 c[f] = n, fl
432 return c
432 return c
433
433
434
434
435 try:
435 try:
436 _lazymanifest = parsers.lazymanifest
436 _lazymanifest = parsers.lazymanifest
437 except AttributeError:
437 except AttributeError:
438 pass
438 pass
439
439
440
440
441 @interfaceutil.implementer(repository.imanifestdict)
441 @interfaceutil.implementer(repository.imanifestdict)
442 class manifestdict(object):
442 class manifestdict(object):
443 def __init__(self, data=b''):
443 def __init__(self, data=b''):
444 self._lm = _lazymanifest(data)
444 self._lm = _lazymanifest(data)
445
445
446 def __getitem__(self, key):
446 def __getitem__(self, key):
447 return self._lm[key][0]
447 return self._lm[key][0]
448
448
449 def find(self, key):
449 def find(self, key):
450 return self._lm[key]
450 return self._lm[key]
451
451
452 def __len__(self):
452 def __len__(self):
453 return len(self._lm)
453 return len(self._lm)
454
454
455 def __nonzero__(self):
455 def __nonzero__(self):
456 # nonzero is covered by the __len__ function, but implementing it here
456 # nonzero is covered by the __len__ function, but implementing it here
457 # makes it easier for extensions to override.
457 # makes it easier for extensions to override.
458 return len(self._lm) != 0
458 return len(self._lm) != 0
459
459
460 __bool__ = __nonzero__
460 __bool__ = __nonzero__
461
461
462 def __setitem__(self, key, node):
462 def __setitem__(self, key, node):
463 self._lm[key] = node, self.flags(key)
463 self._lm[key] = node, self.flags(key)
464
464
465 def __contains__(self, key):
465 def __contains__(self, key):
466 if key is None:
466 if key is None:
467 return False
467 return False
468 return key in self._lm
468 return key in self._lm
469
469
470 def __delitem__(self, key):
470 def __delitem__(self, key):
471 del self._lm[key]
471 del self._lm[key]
472
472
473 def __iter__(self):
473 def __iter__(self):
474 return self._lm.__iter__()
474 return self._lm.__iter__()
475
475
476 def iterkeys(self):
476 def iterkeys(self):
477 return self._lm.iterkeys()
477 return self._lm.iterkeys()
478
478
479 def keys(self):
479 def keys(self):
480 return list(self.iterkeys())
480 return list(self.iterkeys())
481
481
482 def filesnotin(self, m2, match=None):
482 def filesnotin(self, m2, match=None):
483 '''Set of files in this manifest that are not in the other'''
483 '''Set of files in this manifest that are not in the other'''
484 if match:
484 if match:
485 m1 = self.matches(match)
485 m1 = self.matches(match)
486 m2 = m2.matches(match)
486 m2 = m2.matches(match)
487 return m1.filesnotin(m2)
487 return m1.filesnotin(m2)
488 diff = self.diff(m2)
488 diff = self.diff(m2)
489 files = set(
489 files = set(
490 filepath
490 filepath
491 for filepath, hashflags in pycompat.iteritems(diff)
491 for filepath, hashflags in pycompat.iteritems(diff)
492 if hashflags[1][0] is None
492 if hashflags[1][0] is None
493 )
493 )
494 return files
494 return files
495
495
496 @propertycache
496 @propertycache
497 def _dirs(self):
497 def _dirs(self):
498 return pathutil.dirs(self)
498 return pathutil.dirs(self)
499
499
500 def dirs(self):
500 def dirs(self):
501 return self._dirs
501 return self._dirs
502
502
503 def hasdir(self, dir):
503 def hasdir(self, dir):
504 return dir in self._dirs
504 return dir in self._dirs
505
505
506 def _filesfastpath(self, match):
506 def _filesfastpath(self, match):
507 '''Checks whether we can correctly and quickly iterate over matcher
507 '''Checks whether we can correctly and quickly iterate over matcher
508 files instead of over manifest files.'''
508 files instead of over manifest files.'''
509 files = match.files()
509 files = match.files()
510 return len(files) < 100 and (
510 return len(files) < 100 and (
511 match.isexact()
511 match.isexact()
512 or (match.prefix() and all(fn in self for fn in files))
512 or (match.prefix() and all(fn in self for fn in files))
513 )
513 )
514
514
515 def walk(self, match):
515 def walk(self, match):
516 '''Generates matching file names.
516 '''Generates matching file names.
517
517
518 Equivalent to manifest.matches(match).iterkeys(), but without creating
518 Equivalent to manifest.matches(match).iterkeys(), but without creating
519 an entirely new manifest.
519 an entirely new manifest.
520
520
521 It also reports nonexistent files by marking them bad with match.bad().
521 It also reports nonexistent files by marking them bad with match.bad().
522 '''
522 '''
523 if match.always():
523 if match.always():
524 for f in iter(self):
524 for f in iter(self):
525 yield f
525 yield f
526 return
526 return
527
527
528 fset = set(match.files())
528 fset = set(match.files())
529
529
530 # avoid the entire walk if we're only looking for specific files
530 # avoid the entire walk if we're only looking for specific files
531 if self._filesfastpath(match):
531 if self._filesfastpath(match):
532 for fn in sorted(fset):
532 for fn in sorted(fset):
533 yield fn
533 yield fn
534 return
534 return
535
535
536 for fn in self:
536 for fn in self:
537 if fn in fset:
537 if fn in fset:
538 # specified pattern is the exact name
538 # specified pattern is the exact name
539 fset.remove(fn)
539 fset.remove(fn)
540 if match(fn):
540 if match(fn):
541 yield fn
541 yield fn
542
542
543 # for dirstate.walk, files=[''] means "walk the whole tree".
543 # for dirstate.walk, files=[''] means "walk the whole tree".
544 # follow that here, too
544 # follow that here, too
545 fset.discard(b'')
545 fset.discard(b'')
546
546
547 for fn in sorted(fset):
547 for fn in sorted(fset):
548 if not self.hasdir(fn):
548 if not self.hasdir(fn):
549 match.bad(fn, None)
549 match.bad(fn, None)
550
550
551 def matches(self, match):
551 def matches(self, match):
552 '''generate a new manifest filtered by the match argument'''
552 '''generate a new manifest filtered by the match argument'''
553 if match.always():
553 if match.always():
554 return self.copy()
554 return self.copy()
555
555
556 if self._filesfastpath(match):
556 if self._filesfastpath(match):
557 m = manifestdict()
557 m = manifestdict()
558 lm = self._lm
558 lm = self._lm
559 for fn in match.files():
559 for fn in match.files():
560 if fn in lm:
560 if fn in lm:
561 m._lm[fn] = lm[fn]
561 m._lm[fn] = lm[fn]
562 return m
562 return m
563
563
564 m = manifestdict()
564 m = manifestdict()
565 m._lm = self._lm.filtercopy(match)
565 m._lm = self._lm.filtercopy(match)
566 return m
566 return m
567
567
568 def diff(self, m2, match=None, clean=False):
568 def diff(self, m2, match=None, clean=False):
569 '''Finds changes between the current manifest and m2.
569 '''Finds changes between the current manifest and m2.
570
570
571 Args:
571 Args:
572 m2: the manifest to which this manifest should be compared.
572 m2: the manifest to which this manifest should be compared.
573 clean: if true, include files unchanged between these manifests
573 clean: if true, include files unchanged between these manifests
574 with a None value in the returned dictionary.
574 with a None value in the returned dictionary.
575
575
576 The result is returned as a dict with filename as key and
576 The result is returned as a dict with filename as key and
577 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
577 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
578 nodeid in the current/other manifest and fl1/fl2 is the flag
578 nodeid in the current/other manifest and fl1/fl2 is the flag
579 in the current/other manifest. Where the file does not exist,
579 in the current/other manifest. Where the file does not exist,
580 the nodeid will be None and the flags will be the empty
580 the nodeid will be None and the flags will be the empty
581 string.
581 string.
582 '''
582 '''
583 if match:
583 if match:
584 m1 = self.matches(match)
584 m1 = self.matches(match)
585 m2 = m2.matches(match)
585 m2 = m2.matches(match)
586 return m1.diff(m2, clean=clean)
586 return m1.diff(m2, clean=clean)
587 return self._lm.diff(m2._lm, clean)
587 return self._lm.diff(m2._lm, clean)
588
588
589 def setflag(self, key, flag):
589 def setflag(self, key, flag):
590 self._lm[key] = self[key], flag
590 self._lm[key] = self[key], flag
591
591
592 def get(self, key, default=None):
592 def get(self, key, default=None):
593 try:
593 try:
594 return self._lm[key][0]
594 return self._lm[key][0]
595 except KeyError:
595 except KeyError:
596 return default
596 return default
597
597
598 def flags(self, key):
598 def flags(self, key):
599 try:
599 try:
600 return self._lm[key][1]
600 return self._lm[key][1]
601 except KeyError:
601 except KeyError:
602 return b''
602 return b''
603
603
604 def copy(self):
604 def copy(self):
605 c = manifestdict()
605 c = manifestdict()
606 c._lm = self._lm.copy()
606 c._lm = self._lm.copy()
607 return c
607 return c
608
608
609 def items(self):
609 def items(self):
610 return (x[:2] for x in self._lm.iterentries())
610 return (x[:2] for x in self._lm.iterentries())
611
611
612 def iteritems(self):
612 def iteritems(self):
613 return (x[:2] for x in self._lm.iterentries())
613 return (x[:2] for x in self._lm.iterentries())
614
614
615 def iterentries(self):
615 def iterentries(self):
616 return self._lm.iterentries()
616 return self._lm.iterentries()
617
617
618 def text(self):
618 def text(self):
619 # most likely uses native version
619 # most likely uses native version
620 return self._lm.text()
620 return self._lm.text()
621
621
622 def fastdelta(self, base, changes):
622 def fastdelta(self, base, changes):
623 """Given a base manifest text as a bytearray and a list of changes
623 """Given a base manifest text as a bytearray and a list of changes
624 relative to that text, compute a delta that can be used by revlog.
624 relative to that text, compute a delta that can be used by revlog.
625 """
625 """
626 delta = []
626 delta = []
627 dstart = None
627 dstart = None
628 dend = None
628 dend = None
629 dline = [b""]
629 dline = [b""]
630 start = 0
630 start = 0
631 # zero copy representation of base as a buffer
631 # zero copy representation of base as a buffer
632 addbuf = util.buffer(base)
632 addbuf = util.buffer(base)
633
633
634 changes = list(changes)
634 changes = list(changes)
635 if len(changes) < FASTDELTA_TEXTDIFF_THRESHOLD:
635 if len(changes) < FASTDELTA_TEXTDIFF_THRESHOLD:
636 # start with a readonly loop that finds the offset of
636 # start with a readonly loop that finds the offset of
637 # each line and creates the deltas
637 # each line and creates the deltas
638 for f, todelete in changes:
638 for f, todelete in changes:
639 # bs will either be the index of the item or the insert point
639 # bs will either be the index of the item or the insert point
640 start, end = _msearch(addbuf, f, start)
640 start, end = _msearch(addbuf, f, start)
641 if not todelete:
641 if not todelete:
642 h, fl = self._lm[f]
642 h, fl = self._lm[f]
643 l = b"%s\0%s%s\n" % (f, hex(h), fl)
643 l = b"%s\0%s%s\n" % (f, hex(h), fl)
644 else:
644 else:
645 if start == end:
645 if start == end:
646 # item we want to delete was not found, error out
646 # item we want to delete was not found, error out
647 raise AssertionError(
647 raise AssertionError(
648 _(b"failed to remove %s from manifest") % f
648 _(b"failed to remove %s from manifest") % f
649 )
649 )
650 l = b""
650 l = b""
651 if dstart is not None and dstart <= start and dend >= start:
651 if dstart is not None and dstart <= start and dend >= start:
652 if dend < end:
652 if dend < end:
653 dend = end
653 dend = end
654 if l:
654 if l:
655 dline.append(l)
655 dline.append(l)
656 else:
656 else:
657 if dstart is not None:
657 if dstart is not None:
658 delta.append([dstart, dend, b"".join(dline)])
658 delta.append([dstart, dend, b"".join(dline)])
659 dstart = start
659 dstart = start
660 dend = end
660 dend = end
661 dline = [l]
661 dline = [l]
662
662
663 if dstart is not None:
663 if dstart is not None:
664 delta.append([dstart, dend, b"".join(dline)])
664 delta.append([dstart, dend, b"".join(dline)])
665 # apply the delta to the base, and get a delta for addrevision
665 # apply the delta to the base, and get a delta for addrevision
666 deltatext, arraytext = _addlistdelta(base, delta)
666 deltatext, arraytext = _addlistdelta(base, delta)
667 else:
667 else:
668 # For large changes, it's much cheaper to just build the text and
668 # For large changes, it's much cheaper to just build the text and
669 # diff it.
669 # diff it.
670 arraytext = bytearray(self.text())
670 arraytext = bytearray(self.text())
671 deltatext = mdiff.textdiff(
671 deltatext = mdiff.textdiff(
672 util.buffer(base), util.buffer(arraytext)
672 util.buffer(base), util.buffer(arraytext)
673 )
673 )
674
674
675 return arraytext, deltatext
675 return arraytext, deltatext
676
676
677
677
678 def _msearch(m, s, lo=0, hi=None):
678 def _msearch(m, s, lo=0, hi=None):
679 '''return a tuple (start, end) that says where to find s within m.
679 '''return a tuple (start, end) that says where to find s within m.
680
680
681 If the string is found m[start:end] are the line containing
681 If the string is found m[start:end] are the line containing
682 that string. If start == end the string was not found and
682 that string. If start == end the string was not found and
683 they indicate the proper sorted insertion point.
683 they indicate the proper sorted insertion point.
684
684
685 m should be a buffer, a memoryview or a byte string.
685 m should be a buffer, a memoryview or a byte string.
686 s is a byte string'''
686 s is a byte string'''
687
687
688 def advance(i, c):
688 def advance(i, c):
689 while i < lenm and m[i : i + 1] != c:
689 while i < lenm and m[i : i + 1] != c:
690 i += 1
690 i += 1
691 return i
691 return i
692
692
693 if not s:
693 if not s:
694 return (lo, lo)
694 return (lo, lo)
695 lenm = len(m)
695 lenm = len(m)
696 if not hi:
696 if not hi:
697 hi = lenm
697 hi = lenm
698 while lo < hi:
698 while lo < hi:
699 mid = (lo + hi) // 2
699 mid = (lo + hi) // 2
700 start = mid
700 start = mid
701 while start > 0 and m[start - 1 : start] != b'\n':
701 while start > 0 and m[start - 1 : start] != b'\n':
702 start -= 1
702 start -= 1
703 end = advance(start, b'\0')
703 end = advance(start, b'\0')
704 if bytes(m[start:end]) < s:
704 if bytes(m[start:end]) < s:
705 # we know that after the null there are 40 bytes of sha1
705 # we know that after the null there are 40 bytes of sha1
706 # this translates to the bisect lo = mid + 1
706 # this translates to the bisect lo = mid + 1
707 lo = advance(end + 40, b'\n') + 1
707 lo = advance(end + 40, b'\n') + 1
708 else:
708 else:
709 # this translates to the bisect hi = mid
709 # this translates to the bisect hi = mid
710 hi = start
710 hi = start
711 end = advance(lo, b'\0')
711 end = advance(lo, b'\0')
712 found = m[lo:end]
712 found = m[lo:end]
713 if s == found:
713 if s == found:
714 # we know that after the null there are 40 bytes of sha1
714 # we know that after the null there are 40 bytes of sha1
715 end = advance(end + 40, b'\n')
715 end = advance(end + 40, b'\n')
716 return (lo, end + 1)
716 return (lo, end + 1)
717 else:
717 else:
718 return (lo, lo)
718 return (lo, lo)
719
719
720
720
721 def _checkforbidden(l):
721 def _checkforbidden(l):
722 """Check filenames for illegal characters."""
722 """Check filenames for illegal characters."""
723 for f in l:
723 for f in l:
724 if b'\n' in f or b'\r' in f:
724 if b'\n' in f or b'\r' in f:
725 raise error.StorageError(
725 raise error.StorageError(
726 _(b"'\\n' and '\\r' disallowed in filenames: %r")
726 _(b"'\\n' and '\\r' disallowed in filenames: %r")
727 % pycompat.bytestr(f)
727 % pycompat.bytestr(f)
728 )
728 )
729
729
730
730
731 # apply the changes collected during the bisect loop to our addlist
731 # apply the changes collected during the bisect loop to our addlist
732 # return a delta suitable for addrevision
732 # return a delta suitable for addrevision
733 def _addlistdelta(addlist, x):
733 def _addlistdelta(addlist, x):
734 # for large addlist arrays, building a new array is cheaper
734 # for large addlist arrays, building a new array is cheaper
735 # than repeatedly modifying the existing one
735 # than repeatedly modifying the existing one
736 currentposition = 0
736 currentposition = 0
737 newaddlist = bytearray()
737 newaddlist = bytearray()
738
738
739 for start, end, content in x:
739 for start, end, content in x:
740 newaddlist += addlist[currentposition:start]
740 newaddlist += addlist[currentposition:start]
741 if content:
741 if content:
742 newaddlist += bytearray(content)
742 newaddlist += bytearray(content)
743
743
744 currentposition = end
744 currentposition = end
745
745
746 newaddlist += addlist[currentposition:]
746 newaddlist += addlist[currentposition:]
747
747
748 deltatext = b"".join(
748 deltatext = b"".join(
749 struct.pack(b">lll", start, end, len(content)) + content
749 struct.pack(b">lll", start, end, len(content)) + content
750 for start, end, content in x
750 for start, end, content in x
751 )
751 )
752 return deltatext, newaddlist
752 return deltatext, newaddlist
753
753
754
754
755 def _splittopdir(f):
755 def _splittopdir(f):
756 if b'/' in f:
756 if b'/' in f:
757 dir, subpath = f.split(b'/', 1)
757 dir, subpath = f.split(b'/', 1)
758 return dir + b'/', subpath
758 return dir + b'/', subpath
759 else:
759 else:
760 return b'', f
760 return b'', f
761
761
762
762
763 _noop = lambda s: None
763 _noop = lambda s: None
764
764
765
765
766 class treemanifest(object):
766 class treemanifest(object):
767 def __init__(self, dir=b'', text=b''):
767 def __init__(self, dir=b'', text=b''):
768 self._dir = dir
768 self._dir = dir
769 self._node = nullid
769 self._node = nullid
770 self._loadfunc = _noop
770 self._loadfunc = _noop
771 self._copyfunc = _noop
771 self._copyfunc = _noop
772 self._dirty = False
772 self._dirty = False
773 self._dirs = {}
773 self._dirs = {}
774 self._lazydirs = {}
774 self._lazydirs = {}
775 # Using _lazymanifest here is a little slower than plain old dicts
775 # Using _lazymanifest here is a little slower than plain old dicts
776 self._files = {}
776 self._files = {}
777 self._flags = {}
777 self._flags = {}
778 if text:
778 if text:
779
779
780 def readsubtree(subdir, subm):
780 def readsubtree(subdir, subm):
781 raise AssertionError(
781 raise AssertionError(
782 b'treemanifest constructor only accepts flat manifests'
782 b'treemanifest constructor only accepts flat manifests'
783 )
783 )
784
784
785 self.parse(text, readsubtree)
785 self.parse(text, readsubtree)
786 self._dirty = True # Mark flat manifest dirty after parsing
786 self._dirty = True # Mark flat manifest dirty after parsing
787
787
788 def _subpath(self, path):
788 def _subpath(self, path):
789 return self._dir + path
789 return self._dir + path
790
790
791 def _loadalllazy(self):
791 def _loadalllazy(self):
792 selfdirs = self._dirs
792 selfdirs = self._dirs
793 for d, (path, node, readsubtree, docopy) in pycompat.iteritems(
793 for d, (path, node, readsubtree, docopy) in pycompat.iteritems(
794 self._lazydirs
794 self._lazydirs
795 ):
795 ):
796 if docopy:
796 if docopy:
797 selfdirs[d] = readsubtree(path, node).copy()
797 selfdirs[d] = readsubtree(path, node).copy()
798 else:
798 else:
799 selfdirs[d] = readsubtree(path, node)
799 selfdirs[d] = readsubtree(path, node)
800 self._lazydirs = {}
800 self._lazydirs = {}
801
801
802 def _loadlazy(self, d):
802 def _loadlazy(self, d):
803 v = self._lazydirs.get(d)
803 v = self._lazydirs.get(d)
804 if v:
804 if v:
805 path, node, readsubtree, docopy = v
805 path, node, readsubtree, docopy = v
806 if docopy:
806 if docopy:
807 self._dirs[d] = readsubtree(path, node).copy()
807 self._dirs[d] = readsubtree(path, node).copy()
808 else:
808 else:
809 self._dirs[d] = readsubtree(path, node)
809 self._dirs[d] = readsubtree(path, node)
810 del self._lazydirs[d]
810 del self._lazydirs[d]
811
811
812 def _loadchildrensetlazy(self, visit):
812 def _loadchildrensetlazy(self, visit):
813 if not visit:
813 if not visit:
814 return None
814 return None
815 if visit == b'all' or visit == b'this':
815 if visit == b'all' or visit == b'this':
816 self._loadalllazy()
816 self._loadalllazy()
817 return None
817 return None
818
818
819 loadlazy = self._loadlazy
819 loadlazy = self._loadlazy
820 for k in visit:
820 for k in visit:
821 loadlazy(k + b'/')
821 loadlazy(k + b'/')
822 return visit
822 return visit
823
823
824 def _loaddifflazy(self, t1, t2):
824 def _loaddifflazy(self, t1, t2):
825 """load items in t1 and t2 if they're needed for diffing.
825 """load items in t1 and t2 if they're needed for diffing.
826
826
827 The criteria currently is:
827 The criteria currently is:
828 - if it's not present in _lazydirs in either t1 or t2, load it in the
828 - if it's not present in _lazydirs in either t1 or t2, load it in the
829 other (it may already be loaded or it may not exist, doesn't matter)
829 other (it may already be loaded or it may not exist, doesn't matter)
830 - if it's present in _lazydirs in both, compare the nodeid; if it
830 - if it's present in _lazydirs in both, compare the nodeid; if it
831 differs, load it in both
831 differs, load it in both
832 """
832 """
833 toloadlazy = []
833 toloadlazy = []
834 for d, v1 in pycompat.iteritems(t1._lazydirs):
834 for d, v1 in pycompat.iteritems(t1._lazydirs):
835 v2 = t2._lazydirs.get(d)
835 v2 = t2._lazydirs.get(d)
836 if not v2 or v2[1] != v1[1]:
836 if not v2 or v2[1] != v1[1]:
837 toloadlazy.append(d)
837 toloadlazy.append(d)
838 for d, v1 in pycompat.iteritems(t2._lazydirs):
838 for d, v1 in pycompat.iteritems(t2._lazydirs):
839 if d not in t1._lazydirs:
839 if d not in t1._lazydirs:
840 toloadlazy.append(d)
840 toloadlazy.append(d)
841
841
842 for d in toloadlazy:
842 for d in toloadlazy:
843 t1._loadlazy(d)
843 t1._loadlazy(d)
844 t2._loadlazy(d)
844 t2._loadlazy(d)
845
845
846 def __len__(self):
846 def __len__(self):
847 self._load()
847 self._load()
848 size = len(self._files)
848 size = len(self._files)
849 self._loadalllazy()
849 self._loadalllazy()
850 for m in self._dirs.values():
850 for m in self._dirs.values():
851 size += m.__len__()
851 size += m.__len__()
852 return size
852 return size
853
853
854 def __nonzero__(self):
854 def __nonzero__(self):
855 # Faster than "__len() != 0" since it avoids loading sub-manifests
855 # Faster than "__len() != 0" since it avoids loading sub-manifests
856 return not self._isempty()
856 return not self._isempty()
857
857
858 __bool__ = __nonzero__
858 __bool__ = __nonzero__
859
859
860 def _isempty(self):
860 def _isempty(self):
861 self._load() # for consistency; already loaded by all callers
861 self._load() # for consistency; already loaded by all callers
862 # See if we can skip loading everything.
862 # See if we can skip loading everything.
863 if self._files or (
863 if self._files or (
864 self._dirs and any(not m._isempty() for m in self._dirs.values())
864 self._dirs and any(not m._isempty() for m in self._dirs.values())
865 ):
865 ):
866 return False
866 return False
867 self._loadalllazy()
867 self._loadalllazy()
868 return not self._dirs or all(m._isempty() for m in self._dirs.values())
868 return not self._dirs or all(m._isempty() for m in self._dirs.values())
869
869
870 def __repr__(self):
870 def __repr__(self):
871 return (
871 return (
872 b'<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>'
872 b'<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>'
873 % (
873 % (
874 self._dir,
874 self._dir,
875 hex(self._node),
875 hex(self._node),
876 bool(self._loadfunc is _noop),
876 bool(self._loadfunc is _noop),
877 self._dirty,
877 self._dirty,
878 id(self),
878 id(self),
879 )
879 )
880 )
880 )
881
881
882 def dir(self):
882 def dir(self):
883 '''The directory that this tree manifest represents, including a
883 '''The directory that this tree manifest represents, including a
884 trailing '/'. Empty string for the repo root directory.'''
884 trailing '/'. Empty string for the repo root directory.'''
885 return self._dir
885 return self._dir
886
886
887 def node(self):
887 def node(self):
888 '''This node of this instance. nullid for unsaved instances. Should
888 '''This node of this instance. nullid for unsaved instances. Should
889 be updated when the instance is read or written from a revlog.
889 be updated when the instance is read or written from a revlog.
890 '''
890 '''
891 assert not self._dirty
891 assert not self._dirty
892 return self._node
892 return self._node
893
893
894 def setnode(self, node):
894 def setnode(self, node):
895 self._node = node
895 self._node = node
896 self._dirty = False
896 self._dirty = False
897
897
898 def iterentries(self):
898 def iterentries(self):
899 self._load()
899 self._load()
900 self._loadalllazy()
900 self._loadalllazy()
901 for p, n in sorted(
901 for p, n in sorted(
902 itertools.chain(self._dirs.items(), self._files.items())
902 itertools.chain(self._dirs.items(), self._files.items())
903 ):
903 ):
904 if p in self._files:
904 if p in self._files:
905 yield self._subpath(p), n, self._flags.get(p, b'')
905 yield self._subpath(p), n, self._flags.get(p, b'')
906 else:
906 else:
907 for x in n.iterentries():
907 for x in n.iterentries():
908 yield x
908 yield x
909
909
910 def items(self):
910 def items(self):
911 self._load()
911 self._load()
912 self._loadalllazy()
912 self._loadalllazy()
913 for p, n in sorted(
913 for p, n in sorted(
914 itertools.chain(self._dirs.items(), self._files.items())
914 itertools.chain(self._dirs.items(), self._files.items())
915 ):
915 ):
916 if p in self._files:
916 if p in self._files:
917 yield self._subpath(p), n
917 yield self._subpath(p), n
918 else:
918 else:
919 for f, sn in pycompat.iteritems(n):
919 for f, sn in pycompat.iteritems(n):
920 yield f, sn
920 yield f, sn
921
921
922 iteritems = items
922 iteritems = items
923
923
924 def iterkeys(self):
924 def iterkeys(self):
925 self._load()
925 self._load()
926 self._loadalllazy()
926 self._loadalllazy()
927 for p in sorted(itertools.chain(self._dirs, self._files)):
927 for p in sorted(itertools.chain(self._dirs, self._files)):
928 if p in self._files:
928 if p in self._files:
929 yield self._subpath(p)
929 yield self._subpath(p)
930 else:
930 else:
931 for f in self._dirs[p]:
931 for f in self._dirs[p]:
932 yield f
932 yield f
933
933
934 def keys(self):
934 def keys(self):
935 return list(self.iterkeys())
935 return list(self.iterkeys())
936
936
937 def __iter__(self):
937 def __iter__(self):
938 return self.iterkeys()
938 return self.iterkeys()
939
939
940 def __contains__(self, f):
940 def __contains__(self, f):
941 if f is None:
941 if f is None:
942 return False
942 return False
943 self._load()
943 self._load()
944 dir, subpath = _splittopdir(f)
944 dir, subpath = _splittopdir(f)
945 if dir:
945 if dir:
946 self._loadlazy(dir)
946 self._loadlazy(dir)
947
947
948 if dir not in self._dirs:
948 if dir not in self._dirs:
949 return False
949 return False
950
950
951 return self._dirs[dir].__contains__(subpath)
951 return self._dirs[dir].__contains__(subpath)
952 else:
952 else:
953 return f in self._files
953 return f in self._files
954
954
955 def get(self, f, default=None):
955 def get(self, f, default=None):
956 self._load()
956 self._load()
957 dir, subpath = _splittopdir(f)
957 dir, subpath = _splittopdir(f)
958 if dir:
958 if dir:
959 self._loadlazy(dir)
959 self._loadlazy(dir)
960
960
961 if dir not in self._dirs:
961 if dir not in self._dirs:
962 return default
962 return default
963 return self._dirs[dir].get(subpath, default)
963 return self._dirs[dir].get(subpath, default)
964 else:
964 else:
965 return self._files.get(f, default)
965 return self._files.get(f, default)
966
966
967 def __getitem__(self, f):
967 def __getitem__(self, f):
968 self._load()
968 self._load()
969 dir, subpath = _splittopdir(f)
969 dir, subpath = _splittopdir(f)
970 if dir:
970 if dir:
971 self._loadlazy(dir)
971 self._loadlazy(dir)
972
972
973 return self._dirs[dir].__getitem__(subpath)
973 return self._dirs[dir].__getitem__(subpath)
974 else:
974 else:
975 return self._files[f]
975 return self._files[f]
976
976
977 def flags(self, f):
977 def flags(self, f):
978 self._load()
978 self._load()
979 dir, subpath = _splittopdir(f)
979 dir, subpath = _splittopdir(f)
980 if dir:
980 if dir:
981 self._loadlazy(dir)
981 self._loadlazy(dir)
982
982
983 if dir not in self._dirs:
983 if dir not in self._dirs:
984 return b''
984 return b''
985 return self._dirs[dir].flags(subpath)
985 return self._dirs[dir].flags(subpath)
986 else:
986 else:
987 if f in self._lazydirs or f in self._dirs:
987 if f in self._lazydirs or f in self._dirs:
988 return b''
988 return b''
989 return self._flags.get(f, b'')
989 return self._flags.get(f, b'')
990
990
991 def find(self, f):
991 def find(self, f):
992 self._load()
992 self._load()
993 dir, subpath = _splittopdir(f)
993 dir, subpath = _splittopdir(f)
994 if dir:
994 if dir:
995 self._loadlazy(dir)
995 self._loadlazy(dir)
996
996
997 return self._dirs[dir].find(subpath)
997 return self._dirs[dir].find(subpath)
998 else:
998 else:
999 return self._files[f], self._flags.get(f, b'')
999 return self._files[f], self._flags.get(f, b'')
1000
1000
1001 def __delitem__(self, f):
1001 def __delitem__(self, f):
1002 self._load()
1002 self._load()
1003 dir, subpath = _splittopdir(f)
1003 dir, subpath = _splittopdir(f)
1004 if dir:
1004 if dir:
1005 self._loadlazy(dir)
1005 self._loadlazy(dir)
1006
1006
1007 self._dirs[dir].__delitem__(subpath)
1007 self._dirs[dir].__delitem__(subpath)
1008 # If the directory is now empty, remove it
1008 # If the directory is now empty, remove it
1009 if self._dirs[dir]._isempty():
1009 if self._dirs[dir]._isempty():
1010 del self._dirs[dir]
1010 del self._dirs[dir]
1011 else:
1011 else:
1012 del self._files[f]
1012 del self._files[f]
1013 if f in self._flags:
1013 if f in self._flags:
1014 del self._flags[f]
1014 del self._flags[f]
1015 self._dirty = True
1015 self._dirty = True
1016
1016
1017 def __setitem__(self, f, n):
1017 def __setitem__(self, f, n):
1018 assert n is not None
1018 assert n is not None
1019 self._load()
1019 self._load()
1020 dir, subpath = _splittopdir(f)
1020 dir, subpath = _splittopdir(f)
1021 if dir:
1021 if dir:
1022 self._loadlazy(dir)
1022 self._loadlazy(dir)
1023 if dir not in self._dirs:
1023 if dir not in self._dirs:
1024 self._dirs[dir] = treemanifest(self._subpath(dir))
1024 self._dirs[dir] = treemanifest(self._subpath(dir))
1025 self._dirs[dir].__setitem__(subpath, n)
1025 self._dirs[dir].__setitem__(subpath, n)
1026 else:
1026 else:
1027 self._files[f] = n[:21] # to match manifestdict's behavior
1027 self._files[f] = n[:21] # to match manifestdict's behavior
1028 self._dirty = True
1028 self._dirty = True
1029
1029
1030 def _load(self):
1030 def _load(self):
1031 if self._loadfunc is not _noop:
1031 if self._loadfunc is not _noop:
1032 lf, self._loadfunc = self._loadfunc, _noop
1032 lf, self._loadfunc = self._loadfunc, _noop
1033 lf(self)
1033 lf(self)
1034 elif self._copyfunc is not _noop:
1034 elif self._copyfunc is not _noop:
1035 cf, self._copyfunc = self._copyfunc, _noop
1035 cf, self._copyfunc = self._copyfunc, _noop
1036 cf(self)
1036 cf(self)
1037
1037
1038 def setflag(self, f, flags):
1038 def setflag(self, f, flags):
1039 """Set the flags (symlink, executable) for path f."""
1039 """Set the flags (symlink, executable) for path f."""
1040 self._load()
1040 self._load()
1041 dir, subpath = _splittopdir(f)
1041 dir, subpath = _splittopdir(f)
1042 if dir:
1042 if dir:
1043 self._loadlazy(dir)
1043 self._loadlazy(dir)
1044 if dir not in self._dirs:
1044 if dir not in self._dirs:
1045 self._dirs[dir] = treemanifest(self._subpath(dir))
1045 self._dirs[dir] = treemanifest(self._subpath(dir))
1046 self._dirs[dir].setflag(subpath, flags)
1046 self._dirs[dir].setflag(subpath, flags)
1047 else:
1047 else:
1048 self._flags[f] = flags
1048 self._flags[f] = flags
1049 self._dirty = True
1049 self._dirty = True
1050
1050
1051 def copy(self):
1051 def copy(self):
1052 copy = treemanifest(self._dir)
1052 copy = treemanifest(self._dir)
1053 copy._node = self._node
1053 copy._node = self._node
1054 copy._dirty = self._dirty
1054 copy._dirty = self._dirty
1055 if self._copyfunc is _noop:
1055 if self._copyfunc is _noop:
1056
1056
1057 def _copyfunc(s):
1057 def _copyfunc(s):
1058 self._load()
1058 self._load()
1059 s._lazydirs = {
1059 s._lazydirs = {
1060 d: (p, n, r, True)
1060 d: (p, n, r, True)
1061 for d, (p, n, r, c) in pycompat.iteritems(self._lazydirs)
1061 for d, (p, n, r, c) in pycompat.iteritems(self._lazydirs)
1062 }
1062 }
1063 sdirs = s._dirs
1063 sdirs = s._dirs
1064 for d, v in pycompat.iteritems(self._dirs):
1064 for d, v in pycompat.iteritems(self._dirs):
1065 sdirs[d] = v.copy()
1065 sdirs[d] = v.copy()
1066 s._files = dict.copy(self._files)
1066 s._files = dict.copy(self._files)
1067 s._flags = dict.copy(self._flags)
1067 s._flags = dict.copy(self._flags)
1068
1068
1069 if self._loadfunc is _noop:
1069 if self._loadfunc is _noop:
1070 _copyfunc(copy)
1070 _copyfunc(copy)
1071 else:
1071 else:
1072 copy._copyfunc = _copyfunc
1072 copy._copyfunc = _copyfunc
1073 else:
1073 else:
1074 copy._copyfunc = self._copyfunc
1074 copy._copyfunc = self._copyfunc
1075 return copy
1075 return copy
1076
1076
1077 def filesnotin(self, m2, match=None):
1077 def filesnotin(self, m2, match=None):
1078 '''Set of files in this manifest that are not in the other'''
1078 '''Set of files in this manifest that are not in the other'''
1079 if match and not match.always():
1079 if match and not match.always():
1080 m1 = self.matches(match)
1080 m1 = self.matches(match)
1081 m2 = m2.matches(match)
1081 m2 = m2.matches(match)
1082 return m1.filesnotin(m2)
1082 return m1.filesnotin(m2)
1083
1083
1084 files = set()
1084 files = set()
1085
1085
1086 def _filesnotin(t1, t2):
1086 def _filesnotin(t1, t2):
1087 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1087 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1088 return
1088 return
1089 t1._load()
1089 t1._load()
1090 t2._load()
1090 t2._load()
1091 self._loaddifflazy(t1, t2)
1091 self._loaddifflazy(t1, t2)
1092 for d, m1 in pycompat.iteritems(t1._dirs):
1092 for d, m1 in pycompat.iteritems(t1._dirs):
1093 if d in t2._dirs:
1093 if d in t2._dirs:
1094 m2 = t2._dirs[d]
1094 m2 = t2._dirs[d]
1095 _filesnotin(m1, m2)
1095 _filesnotin(m1, m2)
1096 else:
1096 else:
1097 files.update(m1.iterkeys())
1097 files.update(m1.iterkeys())
1098
1098
1099 for fn in t1._files:
1099 for fn in t1._files:
1100 if fn not in t2._files:
1100 if fn not in t2._files:
1101 files.add(t1._subpath(fn))
1101 files.add(t1._subpath(fn))
1102
1102
1103 _filesnotin(self, m2)
1103 _filesnotin(self, m2)
1104 return files
1104 return files
1105
1105
1106 @propertycache
1106 @propertycache
1107 def _alldirs(self):
1107 def _alldirs(self):
1108 return pathutil.dirs(self)
1108 return pathutil.dirs(self)
1109
1109
1110 def dirs(self):
1110 def dirs(self):
1111 return self._alldirs
1111 return self._alldirs
1112
1112
1113 def hasdir(self, dir):
1113 def hasdir(self, dir):
1114 self._load()
1114 self._load()
1115 topdir, subdir = _splittopdir(dir)
1115 topdir, subdir = _splittopdir(dir)
1116 if topdir:
1116 if topdir:
1117 self._loadlazy(topdir)
1117 self._loadlazy(topdir)
1118 if topdir in self._dirs:
1118 if topdir in self._dirs:
1119 return self._dirs[topdir].hasdir(subdir)
1119 return self._dirs[topdir].hasdir(subdir)
1120 return False
1120 return False
1121 dirslash = dir + b'/'
1121 dirslash = dir + b'/'
1122 return dirslash in self._dirs or dirslash in self._lazydirs
1122 return dirslash in self._dirs or dirslash in self._lazydirs
1123
1123
1124 def walk(self, match):
1124 def walk(self, match):
1125 '''Generates matching file names.
1125 '''Generates matching file names.
1126
1126
1127 Equivalent to manifest.matches(match).iterkeys(), but without creating
1127 Equivalent to manifest.matches(match).iterkeys(), but without creating
1128 an entirely new manifest.
1128 an entirely new manifest.
1129
1129
1130 It also reports nonexistent files by marking them bad with match.bad().
1130 It also reports nonexistent files by marking them bad with match.bad().
1131 '''
1131 '''
1132 if match.always():
1132 if match.always():
1133 for f in iter(self):
1133 for f in iter(self):
1134 yield f
1134 yield f
1135 return
1135 return
1136
1136
1137 fset = set(match.files())
1137 fset = set(match.files())
1138
1138
1139 for fn in self._walk(match):
1139 for fn in self._walk(match):
1140 if fn in fset:
1140 if fn in fset:
1141 # specified pattern is the exact name
1141 # specified pattern is the exact name
1142 fset.remove(fn)
1142 fset.remove(fn)
1143 yield fn
1143 yield fn
1144
1144
1145 # for dirstate.walk, files=[''] means "walk the whole tree".
1145 # for dirstate.walk, files=[''] means "walk the whole tree".
1146 # follow that here, too
1146 # follow that here, too
1147 fset.discard(b'')
1147 fset.discard(b'')
1148
1148
1149 for fn in sorted(fset):
1149 for fn in sorted(fset):
1150 if not self.hasdir(fn):
1150 if not self.hasdir(fn):
1151 match.bad(fn, None)
1151 match.bad(fn, None)
1152
1152
1153 def _walk(self, match):
1153 def _walk(self, match):
1154 '''Recursively generates matching file names for walk().'''
1154 '''Recursively generates matching file names for walk().'''
1155 visit = match.visitchildrenset(self._dir[:-1])
1155 visit = match.visitchildrenset(self._dir[:-1])
1156 if not visit:
1156 if not visit:
1157 return
1157 return
1158
1158
1159 # yield this dir's files and walk its submanifests
1159 # yield this dir's files and walk its submanifests
1160 self._load()
1160 self._load()
1161 visit = self._loadchildrensetlazy(visit)
1161 visit = self._loadchildrensetlazy(visit)
1162 for p in sorted(list(self._dirs) + list(self._files)):
1162 for p in sorted(list(self._dirs) + list(self._files)):
1163 if p in self._files:
1163 if p in self._files:
1164 fullp = self._subpath(p)
1164 fullp = self._subpath(p)
1165 if match(fullp):
1165 if match(fullp):
1166 yield fullp
1166 yield fullp
1167 else:
1167 else:
1168 if not visit or p[:-1] in visit:
1168 if not visit or p[:-1] in visit:
1169 for f in self._dirs[p]._walk(match):
1169 for f in self._dirs[p]._walk(match):
1170 yield f
1170 yield f
1171
1171
1172 def matches(self, match):
1172 def matches(self, match):
1173 '''generate a new manifest filtered by the match argument'''
1173 '''generate a new manifest filtered by the match argument'''
1174 if match.always():
1174 if match.always():
1175 return self.copy()
1175 return self.copy()
1176
1176
1177 return self._matches(match)
1177 return self._matches(match)
1178
1178
1179 def _matches(self, match):
1179 def _matches(self, match):
1180 '''recursively generate a new manifest filtered by the match argument.
1180 '''recursively generate a new manifest filtered by the match argument.
1181 '''
1181 '''
1182
1182
1183 visit = match.visitchildrenset(self._dir[:-1])
1183 visit = match.visitchildrenset(self._dir[:-1])
1184 if visit == b'all':
1184 if visit == b'all':
1185 return self.copy()
1185 return self.copy()
1186 ret = treemanifest(self._dir)
1186 ret = treemanifest(self._dir)
1187 if not visit:
1187 if not visit:
1188 return ret
1188 return ret
1189
1189
1190 self._load()
1190 self._load()
1191 for fn in self._files:
1191 for fn in self._files:
1192 # While visitchildrenset *usually* lists only subdirs, this is
1192 # While visitchildrenset *usually* lists only subdirs, this is
1193 # actually up to the matcher and may have some files in the set().
1193 # actually up to the matcher and may have some files in the set().
1194 # If visit == 'this', we should obviously look at the files in this
1194 # If visit == 'this', we should obviously look at the files in this
1195 # directory; if visit is a set, and fn is in it, we should inspect
1195 # directory; if visit is a set, and fn is in it, we should inspect
1196 # fn (but no need to inspect things not in the set).
1196 # fn (but no need to inspect things not in the set).
1197 if visit != b'this' and fn not in visit:
1197 if visit != b'this' and fn not in visit:
1198 continue
1198 continue
1199 fullp = self._subpath(fn)
1199 fullp = self._subpath(fn)
1200 # visitchildrenset isn't perfect, we still need to call the regular
1200 # visitchildrenset isn't perfect, we still need to call the regular
1201 # matcher code to further filter results.
1201 # matcher code to further filter results.
1202 if not match(fullp):
1202 if not match(fullp):
1203 continue
1203 continue
1204 ret._files[fn] = self._files[fn]
1204 ret._files[fn] = self._files[fn]
1205 if fn in self._flags:
1205 if fn in self._flags:
1206 ret._flags[fn] = self._flags[fn]
1206 ret._flags[fn] = self._flags[fn]
1207
1207
1208 visit = self._loadchildrensetlazy(visit)
1208 visit = self._loadchildrensetlazy(visit)
1209 for dir, subm in pycompat.iteritems(self._dirs):
1209 for dir, subm in pycompat.iteritems(self._dirs):
1210 if visit and dir[:-1] not in visit:
1210 if visit and dir[:-1] not in visit:
1211 continue
1211 continue
1212 m = subm._matches(match)
1212 m = subm._matches(match)
1213 if not m._isempty():
1213 if not m._isempty():
1214 ret._dirs[dir] = m
1214 ret._dirs[dir] = m
1215
1215
1216 if not ret._isempty():
1216 if not ret._isempty():
1217 ret._dirty = True
1217 ret._dirty = True
1218 return ret
1218 return ret
1219
1219
1220 def diff(self, m2, match=None, clean=False):
1220 def diff(self, m2, match=None, clean=False):
1221 '''Finds changes between the current manifest and m2.
1221 '''Finds changes between the current manifest and m2.
1222
1222
1223 Args:
1223 Args:
1224 m2: the manifest to which this manifest should be compared.
1224 m2: the manifest to which this manifest should be compared.
1225 clean: if true, include files unchanged between these manifests
1225 clean: if true, include files unchanged between these manifests
1226 with a None value in the returned dictionary.
1226 with a None value in the returned dictionary.
1227
1227
1228 The result is returned as a dict with filename as key and
1228 The result is returned as a dict with filename as key and
1229 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
1229 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
1230 nodeid in the current/other manifest and fl1/fl2 is the flag
1230 nodeid in the current/other manifest and fl1/fl2 is the flag
1231 in the current/other manifest. Where the file does not exist,
1231 in the current/other manifest. Where the file does not exist,
1232 the nodeid will be None and the flags will be the empty
1232 the nodeid will be None and the flags will be the empty
1233 string.
1233 string.
1234 '''
1234 '''
1235 if match and not match.always():
1235 if match and not match.always():
1236 m1 = self.matches(match)
1236 m1 = self.matches(match)
1237 m2 = m2.matches(match)
1237 m2 = m2.matches(match)
1238 return m1.diff(m2, clean=clean)
1238 return m1.diff(m2, clean=clean)
1239 result = {}
1239 result = {}
1240 emptytree = treemanifest()
1240 emptytree = treemanifest()
1241
1241
1242 def _iterativediff(t1, t2, stack):
1242 def _iterativediff(t1, t2, stack):
1243 """compares two tree manifests and append new tree-manifests which
1243 """compares two tree manifests and append new tree-manifests which
1244 needs to be compared to stack"""
1244 needs to be compared to stack"""
1245 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1245 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1246 return
1246 return
1247 t1._load()
1247 t1._load()
1248 t2._load()
1248 t2._load()
1249 self._loaddifflazy(t1, t2)
1249 self._loaddifflazy(t1, t2)
1250
1250
1251 for d, m1 in pycompat.iteritems(t1._dirs):
1251 for d, m1 in pycompat.iteritems(t1._dirs):
1252 m2 = t2._dirs.get(d, emptytree)
1252 m2 = t2._dirs.get(d, emptytree)
1253 stack.append((m1, m2))
1253 stack.append((m1, m2))
1254
1254
1255 for d, m2 in pycompat.iteritems(t2._dirs):
1255 for d, m2 in pycompat.iteritems(t2._dirs):
1256 if d not in t1._dirs:
1256 if d not in t1._dirs:
1257 stack.append((emptytree, m2))
1257 stack.append((emptytree, m2))
1258
1258
1259 for fn, n1 in pycompat.iteritems(t1._files):
1259 for fn, n1 in pycompat.iteritems(t1._files):
1260 fl1 = t1._flags.get(fn, b'')
1260 fl1 = t1._flags.get(fn, b'')
1261 n2 = t2._files.get(fn, None)
1261 n2 = t2._files.get(fn, None)
1262 fl2 = t2._flags.get(fn, b'')
1262 fl2 = t2._flags.get(fn, b'')
1263 if n1 != n2 or fl1 != fl2:
1263 if n1 != n2 or fl1 != fl2:
1264 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
1264 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
1265 elif clean:
1265 elif clean:
1266 result[t1._subpath(fn)] = None
1266 result[t1._subpath(fn)] = None
1267
1267
1268 for fn, n2 in pycompat.iteritems(t2._files):
1268 for fn, n2 in pycompat.iteritems(t2._files):
1269 if fn not in t1._files:
1269 if fn not in t1._files:
1270 fl2 = t2._flags.get(fn, b'')
1270 fl2 = t2._flags.get(fn, b'')
1271 result[t2._subpath(fn)] = ((None, b''), (n2, fl2))
1271 result[t2._subpath(fn)] = ((None, b''), (n2, fl2))
1272
1272
1273 stackls = []
1273 stackls = []
1274 _iterativediff(self, m2, stackls)
1274 _iterativediff(self, m2, stackls)
1275 while stackls:
1275 while stackls:
1276 t1, t2 = stackls.pop()
1276 t1, t2 = stackls.pop()
1277 # stackls is populated in the function call
1277 # stackls is populated in the function call
1278 _iterativediff(t1, t2, stackls)
1278 _iterativediff(t1, t2, stackls)
1279 return result
1279 return result
1280
1280
1281 def unmodifiedsince(self, m2):
1281 def unmodifiedsince(self, m2):
1282 return not self._dirty and not m2._dirty and self._node == m2._node
1282 return not self._dirty and not m2._dirty and self._node == m2._node
1283
1283
1284 def parse(self, text, readsubtree):
1284 def parse(self, text, readsubtree):
1285 selflazy = self._lazydirs
1285 selflazy = self._lazydirs
1286 subpath = self._subpath
1286 subpath = self._subpath
1287 for f, n, fl in _parse(text):
1287 for f, n, fl in _parse(text):
1288 if fl == b't':
1288 if fl == b't':
1289 f = f + b'/'
1289 f = f + b'/'
1290 # False below means "doesn't need to be copied" and can use the
1290 # False below means "doesn't need to be copied" and can use the
1291 # cached value from readsubtree directly.
1291 # cached value from readsubtree directly.
1292 selflazy[f] = (subpath(f), n, readsubtree, False)
1292 selflazy[f] = (subpath(f), n, readsubtree, False)
1293 elif b'/' in f:
1293 elif b'/' in f:
1294 # This is a flat manifest, so use __setitem__ and setflag rather
1294 # This is a flat manifest, so use __setitem__ and setflag rather
1295 # than assigning directly to _files and _flags, so we can
1295 # than assigning directly to _files and _flags, so we can
1296 # assign a path in a subdirectory, and to mark dirty (compared
1296 # assign a path in a subdirectory, and to mark dirty (compared
1297 # to nullid).
1297 # to nullid).
1298 self[f] = n
1298 self[f] = n
1299 if fl:
1299 if fl:
1300 self.setflag(f, fl)
1300 self.setflag(f, fl)
1301 else:
1301 else:
1302 # Assigning to _files and _flags avoids marking as dirty,
1302 # Assigning to _files and _flags avoids marking as dirty,
1303 # and should be a little faster.
1303 # and should be a little faster.
1304 self._files[f] = n
1304 self._files[f] = n
1305 if fl:
1305 if fl:
1306 self._flags[f] = fl
1306 self._flags[f] = fl
1307
1307
1308 def text(self):
1308 def text(self):
1309 """Get the full data of this manifest as a bytestring."""
1309 """Get the full data of this manifest as a bytestring."""
1310 self._load()
1310 self._load()
1311 return _text(self.iterentries())
1311 return _text(self.iterentries())
1312
1312
1313 def dirtext(self):
1313 def dirtext(self):
1314 """Get the full data of this directory as a bytestring. Make sure that
1314 """Get the full data of this directory as a bytestring. Make sure that
1315 any submanifests have been written first, so their nodeids are correct.
1315 any submanifests have been written first, so their nodeids are correct.
1316 """
1316 """
1317 self._load()
1317 self._load()
1318 flags = self.flags
1318 flags = self.flags
1319 lazydirs = [
1319 lazydirs = [
1320 (d[:-1], v[1], b't') for d, v in pycompat.iteritems(self._lazydirs)
1320 (d[:-1], v[1], b't') for d, v in pycompat.iteritems(self._lazydirs)
1321 ]
1321 ]
1322 dirs = [(d[:-1], self._dirs[d]._node, b't') for d in self._dirs]
1322 dirs = [(d[:-1], self._dirs[d]._node, b't') for d in self._dirs]
1323 files = [(f, self._files[f], flags(f)) for f in self._files]
1323 files = [(f, self._files[f], flags(f)) for f in self._files]
1324 return _text(sorted(dirs + files + lazydirs))
1324 return _text(sorted(dirs + files + lazydirs))
1325
1325
1326 def read(self, gettext, readsubtree):
1326 def read(self, gettext, readsubtree):
1327 def _load_for_read(s):
1327 def _load_for_read(s):
1328 s.parse(gettext(), readsubtree)
1328 s.parse(gettext(), readsubtree)
1329 s._dirty = False
1329 s._dirty = False
1330
1330
1331 self._loadfunc = _load_for_read
1331 self._loadfunc = _load_for_read
1332
1332
1333 def writesubtrees(self, m1, m2, writesubtree, match):
1333 def writesubtrees(self, m1, m2, writesubtree, match):
1334 self._load() # for consistency; should never have any effect here
1334 self._load() # for consistency; should never have any effect here
1335 m1._load()
1335 m1._load()
1336 m2._load()
1336 m2._load()
1337 emptytree = treemanifest()
1337 emptytree = treemanifest()
1338
1338
1339 def getnode(m, d):
1339 def getnode(m, d):
1340 ld = m._lazydirs.get(d)
1340 ld = m._lazydirs.get(d)
1341 if ld:
1341 if ld:
1342 return ld[1]
1342 return ld[1]
1343 return m._dirs.get(d, emptytree)._node
1343 return m._dirs.get(d, emptytree)._node
1344
1344
1345 # let's skip investigating things that `match` says we do not need.
1345 # let's skip investigating things that `match` says we do not need.
1346 visit = match.visitchildrenset(self._dir[:-1])
1346 visit = match.visitchildrenset(self._dir[:-1])
1347 visit = self._loadchildrensetlazy(visit)
1347 visit = self._loadchildrensetlazy(visit)
1348 if visit == b'this' or visit == b'all':
1348 if visit == b'this' or visit == b'all':
1349 visit = None
1349 visit = None
1350 for d, subm in pycompat.iteritems(self._dirs):
1350 for d, subm in pycompat.iteritems(self._dirs):
1351 if visit and d[:-1] not in visit:
1351 if visit and d[:-1] not in visit:
1352 continue
1352 continue
1353 subp1 = getnode(m1, d)
1353 subp1 = getnode(m1, d)
1354 subp2 = getnode(m2, d)
1354 subp2 = getnode(m2, d)
1355 if subp1 == nullid:
1355 if subp1 == nullid:
1356 subp1, subp2 = subp2, subp1
1356 subp1, subp2 = subp2, subp1
1357 writesubtree(subm, subp1, subp2, match)
1357 writesubtree(subm, subp1, subp2, match)
1358
1358
1359 def walksubtrees(self, matcher=None):
1359 def walksubtrees(self, matcher=None):
1360 """Returns an iterator of the subtrees of this manifest, including this
1360 """Returns an iterator of the subtrees of this manifest, including this
1361 manifest itself.
1361 manifest itself.
1362
1362
1363 If `matcher` is provided, it only returns subtrees that match.
1363 If `matcher` is provided, it only returns subtrees that match.
1364 """
1364 """
1365 if matcher and not matcher.visitdir(self._dir[:-1]):
1365 if matcher and not matcher.visitdir(self._dir[:-1]):
1366 return
1366 return
1367 if not matcher or matcher(self._dir[:-1]):
1367 if not matcher or matcher(self._dir[:-1]):
1368 yield self
1368 yield self
1369
1369
1370 self._load()
1370 self._load()
1371 # OPT: use visitchildrenset to avoid loading everything.
1371 # OPT: use visitchildrenset to avoid loading everything.
1372 self._loadalllazy()
1372 self._loadalllazy()
1373 for d, subm in pycompat.iteritems(self._dirs):
1373 for d, subm in pycompat.iteritems(self._dirs):
1374 for subtree in subm.walksubtrees(matcher=matcher):
1374 for subtree in subm.walksubtrees(matcher=matcher):
1375 yield subtree
1375 yield subtree
1376
1376
1377
1377
1378 class manifestfulltextcache(util.lrucachedict):
1378 class manifestfulltextcache(util.lrucachedict):
1379 """File-backed LRU cache for the manifest cache
1379 """File-backed LRU cache for the manifest cache
1380
1380
1381 File consists of entries, up to EOF:
1381 File consists of entries, up to EOF:
1382
1382
1383 - 20 bytes node, 4 bytes length, <length> manifest data
1383 - 20 bytes node, 4 bytes length, <length> manifest data
1384
1384
1385 These are written in reverse cache order (oldest to newest).
1385 These are written in reverse cache order (oldest to newest).
1386
1386
1387 """
1387 """
1388
1388
1389 _file = b'manifestfulltextcache'
1389 _file = b'manifestfulltextcache'
1390
1390
1391 def __init__(self, max):
1391 def __init__(self, max):
1392 super(manifestfulltextcache, self).__init__(max)
1392 super(manifestfulltextcache, self).__init__(max)
1393 self._dirty = False
1393 self._dirty = False
1394 self._read = False
1394 self._read = False
1395 self._opener = None
1395 self._opener = None
1396
1396
1397 def read(self):
1397 def read(self):
1398 if self._read or self._opener is None:
1398 if self._read or self._opener is None:
1399 return
1399 return
1400
1400
1401 try:
1401 try:
1402 with self._opener(self._file) as fp:
1402 with self._opener(self._file) as fp:
1403 set = super(manifestfulltextcache, self).__setitem__
1403 set = super(manifestfulltextcache, self).__setitem__
1404 # ignore trailing data, this is a cache, corruption is skipped
1404 # ignore trailing data, this is a cache, corruption is skipped
1405 while True:
1405 while True:
1406 node = fp.read(20)
1406 node = fp.read(20)
1407 if len(node) < 20:
1407 if len(node) < 20:
1408 break
1408 break
1409 try:
1409 try:
1410 size = struct.unpack(b'>L', fp.read(4))[0]
1410 size = struct.unpack(b'>L', fp.read(4))[0]
1411 except struct.error:
1411 except struct.error:
1412 break
1412 break
1413 value = bytearray(fp.read(size))
1413 value = bytearray(fp.read(size))
1414 if len(value) != size:
1414 if len(value) != size:
1415 break
1415 break
1416 set(node, value)
1416 set(node, value)
1417 except IOError:
1417 except IOError:
1418 # the file is allowed to be missing
1418 # the file is allowed to be missing
1419 pass
1419 pass
1420
1420
1421 self._read = True
1421 self._read = True
1422 self._dirty = False
1422 self._dirty = False
1423
1423
1424 def write(self):
1424 def write(self):
1425 if not self._dirty or self._opener is None:
1425 if not self._dirty or self._opener is None:
1426 return
1426 return
1427 # rotate backwards to the first used node
1427 # rotate backwards to the first used node
1428 with self._opener(
1428 with self._opener(
1429 self._file, b'w', atomictemp=True, checkambig=True
1429 self._file, b'w', atomictemp=True, checkambig=True
1430 ) as fp:
1430 ) as fp:
1431 node = self._head.prev
1431 node = self._head.prev
1432 while True:
1432 while True:
1433 if node.key in self._cache:
1433 if node.key in self._cache:
1434 fp.write(node.key)
1434 fp.write(node.key)
1435 fp.write(struct.pack(b'>L', len(node.value)))
1435 fp.write(struct.pack(b'>L', len(node.value)))
1436 fp.write(node.value)
1436 fp.write(node.value)
1437 if node is self._head:
1437 if node is self._head:
1438 break
1438 break
1439 node = node.prev
1439 node = node.prev
1440
1440
1441 def __len__(self):
1441 def __len__(self):
1442 if not self._read:
1442 if not self._read:
1443 self.read()
1443 self.read()
1444 return super(manifestfulltextcache, self).__len__()
1444 return super(manifestfulltextcache, self).__len__()
1445
1445
1446 def __contains__(self, k):
1446 def __contains__(self, k):
1447 if not self._read:
1447 if not self._read:
1448 self.read()
1448 self.read()
1449 return super(manifestfulltextcache, self).__contains__(k)
1449 return super(manifestfulltextcache, self).__contains__(k)
1450
1450
1451 def __iter__(self):
1451 def __iter__(self):
1452 if not self._read:
1452 if not self._read:
1453 self.read()
1453 self.read()
1454 return super(manifestfulltextcache, self).__iter__()
1454 return super(manifestfulltextcache, self).__iter__()
1455
1455
1456 def __getitem__(self, k):
1456 def __getitem__(self, k):
1457 if not self._read:
1457 if not self._read:
1458 self.read()
1458 self.read()
1459 # the cache lru order can change on read
1459 # the cache lru order can change on read
1460 setdirty = self._cache.get(k) is not self._head
1460 setdirty = self._cache.get(k) is not self._head
1461 value = super(manifestfulltextcache, self).__getitem__(k)
1461 value = super(manifestfulltextcache, self).__getitem__(k)
1462 if setdirty:
1462 if setdirty:
1463 self._dirty = True
1463 self._dirty = True
1464 return value
1464 return value
1465
1465
1466 def __setitem__(self, k, v):
1466 def __setitem__(self, k, v):
1467 if not self._read:
1467 if not self._read:
1468 self.read()
1468 self.read()
1469 super(manifestfulltextcache, self).__setitem__(k, v)
1469 super(manifestfulltextcache, self).__setitem__(k, v)
1470 self._dirty = True
1470 self._dirty = True
1471
1471
1472 def __delitem__(self, k):
1472 def __delitem__(self, k):
1473 if not self._read:
1473 if not self._read:
1474 self.read()
1474 self.read()
1475 super(manifestfulltextcache, self).__delitem__(k)
1475 super(manifestfulltextcache, self).__delitem__(k)
1476 self._dirty = True
1476 self._dirty = True
1477
1477
1478 def get(self, k, default=None):
1478 def get(self, k, default=None):
1479 if not self._read:
1479 if not self._read:
1480 self.read()
1480 self.read()
1481 return super(manifestfulltextcache, self).get(k, default=default)
1481 return super(manifestfulltextcache, self).get(k, default=default)
1482
1482
1483 def clear(self, clear_persisted_data=False):
1483 def clear(self, clear_persisted_data=False):
1484 super(manifestfulltextcache, self).clear()
1484 super(manifestfulltextcache, self).clear()
1485 if clear_persisted_data:
1485 if clear_persisted_data:
1486 self._dirty = True
1486 self._dirty = True
1487 self.write()
1487 self.write()
1488 self._read = False
1488 self._read = False
1489
1489
1490
1490
1491 # and upper bound of what we expect from compression
1491 # and upper bound of what we expect from compression
1492 # (real live value seems to be "3")
1492 # (real live value seems to be "3")
1493 MAXCOMPRESSION = 3
1493 MAXCOMPRESSION = 3
1494
1494
1495
1495
1496 @interfaceutil.implementer(repository.imanifeststorage)
1496 @interfaceutil.implementer(repository.imanifeststorage)
1497 class manifestrevlog(object):
1497 class manifestrevlog(object):
1498 '''A revlog that stores manifest texts. This is responsible for caching the
1498 '''A revlog that stores manifest texts. This is responsible for caching the
1499 full-text manifest contents.
1499 full-text manifest contents.
1500 '''
1500 '''
1501
1501
1502 def __init__(
1502 def __init__(
1503 self,
1503 self,
1504 opener,
1504 opener,
1505 tree=b'',
1505 tree=b'',
1506 dirlogcache=None,
1506 dirlogcache=None,
1507 indexfile=None,
1507 indexfile=None,
1508 treemanifest=False,
1508 treemanifest=False,
1509 ):
1509 ):
1510 """Constructs a new manifest revlog
1510 """Constructs a new manifest revlog
1511
1511
1512 `indexfile` - used by extensions to have two manifests at once, like
1512 `indexfile` - used by extensions to have two manifests at once, like
1513 when transitioning between flatmanifeset and treemanifests.
1513 when transitioning between flatmanifeset and treemanifests.
1514
1514
1515 `treemanifest` - used to indicate this is a tree manifest revlog. Opener
1515 `treemanifest` - used to indicate this is a tree manifest revlog. Opener
1516 options can also be used to make this a tree manifest revlog. The opener
1516 options can also be used to make this a tree manifest revlog. The opener
1517 option takes precedence, so if it is set to True, we ignore whatever
1517 option takes precedence, so if it is set to True, we ignore whatever
1518 value is passed in to the constructor.
1518 value is passed in to the constructor.
1519 """
1519 """
1520 # During normal operations, we expect to deal with not more than four
1520 # During normal operations, we expect to deal with not more than four
1521 # revs at a time (such as during commit --amend). When rebasing large
1521 # revs at a time (such as during commit --amend). When rebasing large
1522 # stacks of commits, the number can go up, hence the config knob below.
1522 # stacks of commits, the number can go up, hence the config knob below.
1523 cachesize = 4
1523 cachesize = 4
1524 optiontreemanifest = False
1524 optiontreemanifest = False
1525 opts = getattr(opener, 'options', None)
1525 opts = getattr(opener, 'options', None)
1526 if opts is not None:
1526 if opts is not None:
1527 cachesize = opts.get(b'manifestcachesize', cachesize)
1527 cachesize = opts.get(b'manifestcachesize', cachesize)
1528 optiontreemanifest = opts.get(b'treemanifest', False)
1528 optiontreemanifest = opts.get(b'treemanifest', False)
1529
1529
1530 self._treeondisk = optiontreemanifest or treemanifest
1530 self._treeondisk = optiontreemanifest or treemanifest
1531
1531
1532 self._fulltextcache = manifestfulltextcache(cachesize)
1532 self._fulltextcache = manifestfulltextcache(cachesize)
1533
1533
1534 if tree:
1534 if tree:
1535 assert self._treeondisk, b'opts is %r' % opts
1535 assert self._treeondisk, b'opts is %r' % opts
1536
1536
1537 if indexfile is None:
1537 if indexfile is None:
1538 indexfile = b'00manifest.i'
1538 indexfile = b'00manifest.i'
1539 if tree:
1539 if tree:
1540 indexfile = b"meta/" + tree + indexfile
1540 indexfile = b"meta/" + tree + indexfile
1541
1541
1542 self.tree = tree
1542 self.tree = tree
1543
1543
1544 # The dirlogcache is kept on the root manifest log
1544 # The dirlogcache is kept on the root manifest log
1545 if tree:
1545 if tree:
1546 self._dirlogcache = dirlogcache
1546 self._dirlogcache = dirlogcache
1547 else:
1547 else:
1548 self._dirlogcache = {b'': self}
1548 self._dirlogcache = {b'': self}
1549
1549
1550 self._revlog = revlog.revlog(
1550 self._revlog = revlog.revlog(
1551 opener,
1551 opener,
1552 indexfile,
1552 indexfile,
1553 # only root indexfile is cached
1553 # only root indexfile is cached
1554 checkambig=not bool(tree),
1554 checkambig=not bool(tree),
1555 mmaplargeindex=True,
1555 mmaplargeindex=True,
1556 upperboundcomp=MAXCOMPRESSION,
1556 upperboundcomp=MAXCOMPRESSION,
1557 )
1557 )
1558
1558
1559 self.index = self._revlog.index
1559 self.index = self._revlog.index
1560 self.version = self._revlog.version
1560 self.version = self._revlog.version
1561 self._generaldelta = self._revlog._generaldelta
1561 self._generaldelta = self._revlog._generaldelta
1562
1562
1563 def _setupmanifestcachehooks(self, repo):
1563 def _setupmanifestcachehooks(self, repo):
1564 """Persist the manifestfulltextcache on lock release"""
1564 """Persist the manifestfulltextcache on lock release"""
1565 if not util.safehasattr(repo, b'_wlockref'):
1565 if not util.safehasattr(repo, b'_wlockref'):
1566 return
1566 return
1567
1567
1568 self._fulltextcache._opener = repo.wcachevfs
1568 self._fulltextcache._opener = repo.wcachevfs
1569 if repo._currentlock(repo._wlockref) is None:
1569 if repo._currentlock(repo._wlockref) is None:
1570 return
1570 return
1571
1571
1572 reporef = weakref.ref(repo)
1572 reporef = weakref.ref(repo)
1573 manifestrevlogref = weakref.ref(self)
1573 manifestrevlogref = weakref.ref(self)
1574
1574
1575 def persistmanifestcache(success):
1575 def persistmanifestcache(success):
1576 # Repo is in an unknown state, do not persist.
1576 # Repo is in an unknown state, do not persist.
1577 if not success:
1577 if not success:
1578 return
1578 return
1579
1579
1580 repo = reporef()
1580 repo = reporef()
1581 self = manifestrevlogref()
1581 self = manifestrevlogref()
1582 if repo is None or self is None:
1582 if repo is None or self is None:
1583 return
1583 return
1584 if repo.manifestlog.getstorage(b'') is not self:
1584 if repo.manifestlog.getstorage(b'') is not self:
1585 # there's a different manifest in play now, abort
1585 # there's a different manifest in play now, abort
1586 return
1586 return
1587 self._fulltextcache.write()
1587 self._fulltextcache.write()
1588
1588
1589 repo._afterlock(persistmanifestcache)
1589 repo._afterlock(persistmanifestcache)
1590
1590
1591 @property
1591 @property
1592 def fulltextcache(self):
1592 def fulltextcache(self):
1593 return self._fulltextcache
1593 return self._fulltextcache
1594
1594
1595 def clearcaches(self, clear_persisted_data=False):
1595 def clearcaches(self, clear_persisted_data=False):
1596 self._revlog.clearcaches()
1596 self._revlog.clearcaches()
1597 self._fulltextcache.clear(clear_persisted_data=clear_persisted_data)
1597 self._fulltextcache.clear(clear_persisted_data=clear_persisted_data)
1598 self._dirlogcache = {self.tree: self}
1598 self._dirlogcache = {self.tree: self}
1599
1599
1600 def dirlog(self, d):
1600 def dirlog(self, d):
1601 if d:
1601 if d:
1602 assert self._treeondisk
1602 assert self._treeondisk
1603 if d not in self._dirlogcache:
1603 if d not in self._dirlogcache:
1604 mfrevlog = manifestrevlog(
1604 mfrevlog = manifestrevlog(
1605 self.opener, d, self._dirlogcache, treemanifest=self._treeondisk
1605 self.opener, d, self._dirlogcache, treemanifest=self._treeondisk
1606 )
1606 )
1607 self._dirlogcache[d] = mfrevlog
1607 self._dirlogcache[d] = mfrevlog
1608 return self._dirlogcache[d]
1608 return self._dirlogcache[d]
1609
1609
1610 def add(
1610 def add(
1611 self,
1611 self,
1612 m,
1612 m,
1613 transaction,
1613 transaction,
1614 link,
1614 link,
1615 p1,
1615 p1,
1616 p2,
1616 p2,
1617 added,
1617 added,
1618 removed,
1618 removed,
1619 readtree=None,
1619 readtree=None,
1620 match=None,
1620 match=None,
1621 ):
1621 ):
1622 if p1 in self.fulltextcache and util.safehasattr(m, b'fastdelta'):
1622 if p1 in self.fulltextcache and util.safehasattr(m, b'fastdelta'):
1623 # If our first parent is in the manifest cache, we can
1623 # If our first parent is in the manifest cache, we can
1624 # compute a delta here using properties we know about the
1624 # compute a delta here using properties we know about the
1625 # manifest up-front, which may save time later for the
1625 # manifest up-front, which may save time later for the
1626 # revlog layer.
1626 # revlog layer.
1627
1627
1628 _checkforbidden(added)
1628 _checkforbidden(added)
1629 # combine the changed lists into one sorted iterator
1629 # combine the changed lists into one sorted iterator
1630 work = heapq.merge(
1630 work = heapq.merge(
1631 [(x, False) for x in sorted(added)],
1631 [(x, False) for x in sorted(added)],
1632 [(x, True) for x in sorted(removed)],
1632 [(x, True) for x in sorted(removed)],
1633 )
1633 )
1634
1634
1635 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1635 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1636 cachedelta = self._revlog.rev(p1), deltatext
1636 cachedelta = self._revlog.rev(p1), deltatext
1637 text = util.buffer(arraytext)
1637 text = util.buffer(arraytext)
1638 n = self._revlog.addrevision(
1638 n = self._revlog.addrevision(
1639 text, transaction, link, p1, p2, cachedelta
1639 text, transaction, link, p1, p2, cachedelta
1640 )
1640 )
1641 else:
1641 else:
1642 # The first parent manifest isn't already loaded, so we'll
1642 # The first parent manifest isn't already loaded, so we'll
1643 # just encode a fulltext of the manifest and pass that
1643 # just encode a fulltext of the manifest and pass that
1644 # through to the revlog layer, and let it handle the delta
1644 # through to the revlog layer, and let it handle the delta
1645 # process.
1645 # process.
1646 if self._treeondisk:
1646 if self._treeondisk:
1647 assert readtree, b"readtree must be set for treemanifest writes"
1647 assert readtree, b"readtree must be set for treemanifest writes"
1648 assert match, b"match must be specified for treemanifest writes"
1648 assert match, b"match must be specified for treemanifest writes"
1649 m1 = readtree(self.tree, p1)
1649 m1 = readtree(self.tree, p1)
1650 m2 = readtree(self.tree, p2)
1650 m2 = readtree(self.tree, p2)
1651 n = self._addtree(
1651 n = self._addtree(
1652 m, transaction, link, m1, m2, readtree, match=match
1652 m, transaction, link, m1, m2, readtree, match=match
1653 )
1653 )
1654 arraytext = None
1654 arraytext = None
1655 else:
1655 else:
1656 text = m.text()
1656 text = m.text()
1657 n = self._revlog.addrevision(text, transaction, link, p1, p2)
1657 n = self._revlog.addrevision(text, transaction, link, p1, p2)
1658 arraytext = bytearray(text)
1658 arraytext = bytearray(text)
1659
1659
1660 if arraytext is not None:
1660 if arraytext is not None:
1661 self.fulltextcache[n] = arraytext
1661 self.fulltextcache[n] = arraytext
1662
1662
1663 return n
1663 return n
1664
1664
1665 def _addtree(self, m, transaction, link, m1, m2, readtree, match):
1665 def _addtree(self, m, transaction, link, m1, m2, readtree, match):
1666 # If the manifest is unchanged compared to one parent,
1666 # If the manifest is unchanged compared to one parent,
1667 # don't write a new revision
1667 # don't write a new revision
1668 if self.tree != b'' and (
1668 if self.tree != b'' and (
1669 m.unmodifiedsince(m1) or m.unmodifiedsince(m2)
1669 m.unmodifiedsince(m1) or m.unmodifiedsince(m2)
1670 ):
1670 ):
1671 return m.node()
1671 return m.node()
1672
1672
1673 def writesubtree(subm, subp1, subp2, match):
1673 def writesubtree(subm, subp1, subp2, match):
1674 sublog = self.dirlog(subm.dir())
1674 sublog = self.dirlog(subm.dir())
1675 sublog.add(
1675 sublog.add(
1676 subm,
1676 subm,
1677 transaction,
1677 transaction,
1678 link,
1678 link,
1679 subp1,
1679 subp1,
1680 subp2,
1680 subp2,
1681 None,
1681 None,
1682 None,
1682 None,
1683 readtree=readtree,
1683 readtree=readtree,
1684 match=match,
1684 match=match,
1685 )
1685 )
1686
1686
1687 m.writesubtrees(m1, m2, writesubtree, match)
1687 m.writesubtrees(m1, m2, writesubtree, match)
1688 text = m.dirtext()
1688 text = m.dirtext()
1689 n = None
1689 n = None
1690 if self.tree != b'':
1690 if self.tree != b'':
1691 # Double-check whether contents are unchanged to one parent
1691 # Double-check whether contents are unchanged to one parent
1692 if text == m1.dirtext():
1692 if text == m1.dirtext():
1693 n = m1.node()
1693 n = m1.node()
1694 elif text == m2.dirtext():
1694 elif text == m2.dirtext():
1695 n = m2.node()
1695 n = m2.node()
1696
1696
1697 if not n:
1697 if not n:
1698 n = self._revlog.addrevision(
1698 n = self._revlog.addrevision(
1699 text, transaction, link, m1.node(), m2.node()
1699 text, transaction, link, m1.node(), m2.node()
1700 )
1700 )
1701
1701
1702 # Save nodeid so parent manifest can calculate its nodeid
1702 # Save nodeid so parent manifest can calculate its nodeid
1703 m.setnode(n)
1703 m.setnode(n)
1704 return n
1704 return n
1705
1705
1706 def __len__(self):
1706 def __len__(self):
1707 return len(self._revlog)
1707 return len(self._revlog)
1708
1708
1709 def __iter__(self):
1709 def __iter__(self):
1710 return self._revlog.__iter__()
1710 return self._revlog.__iter__()
1711
1711
1712 def rev(self, node):
1712 def rev(self, node):
1713 return self._revlog.rev(node)
1713 return self._revlog.rev(node)
1714
1714
1715 def node(self, rev):
1715 def node(self, rev):
1716 return self._revlog.node(rev)
1716 return self._revlog.node(rev)
1717
1717
1718 def lookup(self, value):
1718 def lookup(self, value):
1719 return self._revlog.lookup(value)
1719 return self._revlog.lookup(value)
1720
1720
1721 def parentrevs(self, rev):
1721 def parentrevs(self, rev):
1722 return self._revlog.parentrevs(rev)
1722 return self._revlog.parentrevs(rev)
1723
1723
1724 def parents(self, node):
1724 def parents(self, node):
1725 return self._revlog.parents(node)
1725 return self._revlog.parents(node)
1726
1726
1727 def linkrev(self, rev):
1727 def linkrev(self, rev):
1728 return self._revlog.linkrev(rev)
1728 return self._revlog.linkrev(rev)
1729
1729
1730 def checksize(self):
1730 def checksize(self):
1731 return self._revlog.checksize()
1731 return self._revlog.checksize()
1732
1732
1733 def revision(self, node, _df=None, raw=False):
1733 def revision(self, node, _df=None, raw=False):
1734 return self._revlog.revision(node, _df=_df, raw=raw)
1734 return self._revlog.revision(node, _df=_df, raw=raw)
1735
1735
1736 def rawdata(self, node, _df=None):
1736 def rawdata(self, node, _df=None):
1737 return self._revlog.rawdata(node, _df=_df)
1737 return self._revlog.rawdata(node, _df=_df)
1738
1738
1739 def revdiff(self, rev1, rev2):
1739 def revdiff(self, rev1, rev2):
1740 return self._revlog.revdiff(rev1, rev2)
1740 return self._revlog.revdiff(rev1, rev2)
1741
1741
1742 def cmp(self, node, text):
1742 def cmp(self, node, text):
1743 return self._revlog.cmp(node, text)
1743 return self._revlog.cmp(node, text)
1744
1744
1745 def deltaparent(self, rev):
1745 def deltaparent(self, rev):
1746 return self._revlog.deltaparent(rev)
1746 return self._revlog.deltaparent(rev)
1747
1747
1748 def emitrevisions(
1748 def emitrevisions(
1749 self,
1749 self,
1750 nodes,
1750 nodes,
1751 nodesorder=None,
1751 nodesorder=None,
1752 revisiondata=False,
1752 revisiondata=False,
1753 assumehaveparentrevisions=False,
1753 assumehaveparentrevisions=False,
1754 deltamode=repository.CG_DELTAMODE_STD,
1754 deltamode=repository.CG_DELTAMODE_STD,
1755 ):
1755 ):
1756 return self._revlog.emitrevisions(
1756 return self._revlog.emitrevisions(
1757 nodes,
1757 nodes,
1758 nodesorder=nodesorder,
1758 nodesorder=nodesorder,
1759 revisiondata=revisiondata,
1759 revisiondata=revisiondata,
1760 assumehaveparentrevisions=assumehaveparentrevisions,
1760 assumehaveparentrevisions=assumehaveparentrevisions,
1761 deltamode=deltamode,
1761 deltamode=deltamode,
1762 )
1762 )
1763
1763
1764 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
1764 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
1765 return self._revlog.addgroup(
1765 return self._revlog.addgroup(
1766 deltas, linkmapper, transaction, addrevisioncb=addrevisioncb
1766 deltas, linkmapper, transaction, addrevisioncb=addrevisioncb
1767 )
1767 )
1768
1768
1769 def rawsize(self, rev):
1769 def rawsize(self, rev):
1770 return self._revlog.rawsize(rev)
1770 return self._revlog.rawsize(rev)
1771
1771
1772 def getstrippoint(self, minlink):
1772 def getstrippoint(self, minlink):
1773 return self._revlog.getstrippoint(minlink)
1773 return self._revlog.getstrippoint(minlink)
1774
1774
1775 def strip(self, minlink, transaction):
1775 def strip(self, minlink, transaction):
1776 return self._revlog.strip(minlink, transaction)
1776 return self._revlog.strip(minlink, transaction)
1777
1777
1778 def files(self):
1778 def files(self):
1779 return self._revlog.files()
1779 return self._revlog.files()
1780
1780
1781 def clone(self, tr, destrevlog, **kwargs):
1781 def clone(self, tr, destrevlog, **kwargs):
1782 if not isinstance(destrevlog, manifestrevlog):
1782 if not isinstance(destrevlog, manifestrevlog):
1783 raise error.ProgrammingError(b'expected manifestrevlog to clone()')
1783 raise error.ProgrammingError(b'expected manifestrevlog to clone()')
1784
1784
1785 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
1785 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
1786
1786
1787 def storageinfo(
1787 def storageinfo(
1788 self,
1788 self,
1789 exclusivefiles=False,
1789 exclusivefiles=False,
1790 sharedfiles=False,
1790 sharedfiles=False,
1791 revisionscount=False,
1791 revisionscount=False,
1792 trackedsize=False,
1792 trackedsize=False,
1793 storedsize=False,
1793 storedsize=False,
1794 ):
1794 ):
1795 return self._revlog.storageinfo(
1795 return self._revlog.storageinfo(
1796 exclusivefiles=exclusivefiles,
1796 exclusivefiles=exclusivefiles,
1797 sharedfiles=sharedfiles,
1797 sharedfiles=sharedfiles,
1798 revisionscount=revisionscount,
1798 revisionscount=revisionscount,
1799 trackedsize=trackedsize,
1799 trackedsize=trackedsize,
1800 storedsize=storedsize,
1800 storedsize=storedsize,
1801 )
1801 )
1802
1802
1803 @property
1803 @property
1804 def indexfile(self):
1804 def indexfile(self):
1805 return self._revlog.indexfile
1805 return self._revlog.indexfile
1806
1806
1807 @indexfile.setter
1807 @indexfile.setter
1808 def indexfile(self, value):
1808 def indexfile(self, value):
1809 self._revlog.indexfile = value
1809 self._revlog.indexfile = value
1810
1810
1811 @property
1811 @property
1812 def opener(self):
1812 def opener(self):
1813 return self._revlog.opener
1813 return self._revlog.opener
1814
1814
1815 @opener.setter
1815 @opener.setter
1816 def opener(self, value):
1816 def opener(self, value):
1817 self._revlog.opener = value
1817 self._revlog.opener = value
1818
1818
1819
1819
1820 @interfaceutil.implementer(repository.imanifestlog)
1820 @interfaceutil.implementer(repository.imanifestlog)
1821 class manifestlog(object):
1821 class manifestlog(object):
1822 """A collection class representing the collection of manifest snapshots
1822 """A collection class representing the collection of manifest snapshots
1823 referenced by commits in the repository.
1823 referenced by commits in the repository.
1824
1824
1825 In this situation, 'manifest' refers to the abstract concept of a snapshot
1825 In this situation, 'manifest' refers to the abstract concept of a snapshot
1826 of the list of files in the given commit. Consumers of the output of this
1826 of the list of files in the given commit. Consumers of the output of this
1827 class do not care about the implementation details of the actual manifests
1827 class do not care about the implementation details of the actual manifests
1828 they receive (i.e. tree or flat or lazily loaded, etc)."""
1828 they receive (i.e. tree or flat or lazily loaded, etc)."""
1829
1829
1830 def __init__(self, opener, repo, rootstore, narrowmatch):
1830 def __init__(self, opener, repo, rootstore, narrowmatch):
1831 usetreemanifest = False
1831 usetreemanifest = False
1832 cachesize = 4
1832 cachesize = 4
1833
1833
1834 opts = getattr(opener, 'options', None)
1834 opts = getattr(opener, 'options', None)
1835 if opts is not None:
1835 if opts is not None:
1836 usetreemanifest = opts.get(b'treemanifest', usetreemanifest)
1836 usetreemanifest = opts.get(b'treemanifest', usetreemanifest)
1837 cachesize = opts.get(b'manifestcachesize', cachesize)
1837 cachesize = opts.get(b'manifestcachesize', cachesize)
1838
1838
1839 self._treemanifests = usetreemanifest
1839 self._treemanifests = usetreemanifest
1840
1840
1841 self._rootstore = rootstore
1841 self._rootstore = rootstore
1842 self._rootstore._setupmanifestcachehooks(repo)
1842 self._rootstore._setupmanifestcachehooks(repo)
1843 self._narrowmatch = narrowmatch
1843 self._narrowmatch = narrowmatch
1844
1844
1845 # A cache of the manifestctx or treemanifestctx for each directory
1845 # A cache of the manifestctx or treemanifestctx for each directory
1846 self._dirmancache = {}
1846 self._dirmancache = {}
1847 self._dirmancache[b''] = util.lrucachedict(cachesize)
1847 self._dirmancache[b''] = util.lrucachedict(cachesize)
1848
1848
1849 self._cachesize = cachesize
1849 self._cachesize = cachesize
1850
1850
1851 def __getitem__(self, node):
1851 def __getitem__(self, node):
1852 """Retrieves the manifest instance for the given node. Throws a
1852 """Retrieves the manifest instance for the given node. Throws a
1853 LookupError if not found.
1853 LookupError if not found.
1854 """
1854 """
1855 return self.get(b'', node)
1855 return self.get(b'', node)
1856
1856
1857 def get(self, tree, node, verify=True):
1857 def get(self, tree, node, verify=True):
1858 """Retrieves the manifest instance for the given node. Throws a
1858 """Retrieves the manifest instance for the given node. Throws a
1859 LookupError if not found.
1859 LookupError if not found.
1860
1860
1861 `verify` - if True an exception will be thrown if the node is not in
1861 `verify` - if True an exception will be thrown if the node is not in
1862 the revlog
1862 the revlog
1863 """
1863 """
1864 if node in self._dirmancache.get(tree, ()):
1864 if node in self._dirmancache.get(tree, ()):
1865 return self._dirmancache[tree][node]
1865 return self._dirmancache[tree][node]
1866
1866
1867 if not self._narrowmatch.always():
1867 if not self._narrowmatch.always():
1868 if not self._narrowmatch.visitdir(tree[:-1]):
1868 if not self._narrowmatch.visitdir(tree[:-1]):
1869 return excludeddirmanifestctx(tree, node)
1869 return excludeddirmanifestctx(tree, node)
1870 if tree:
1870 if tree:
1871 if self._rootstore._treeondisk:
1871 if self._rootstore._treeondisk:
1872 if verify:
1872 if verify:
1873 # Side-effect is LookupError is raised if node doesn't
1873 # Side-effect is LookupError is raised if node doesn't
1874 # exist.
1874 # exist.
1875 self.getstorage(tree).rev(node)
1875 self.getstorage(tree).rev(node)
1876
1876
1877 m = treemanifestctx(self, tree, node)
1877 m = treemanifestctx(self, tree, node)
1878 else:
1878 else:
1879 raise error.Abort(
1879 raise error.Abort(
1880 _(
1880 _(
1881 b"cannot ask for manifest directory '%s' in a flat "
1881 b"cannot ask for manifest directory '%s' in a flat "
1882 b"manifest"
1882 b"manifest"
1883 )
1883 )
1884 % tree
1884 % tree
1885 )
1885 )
1886 else:
1886 else:
1887 if verify:
1887 if verify:
1888 # Side-effect is LookupError is raised if node doesn't exist.
1888 # Side-effect is LookupError is raised if node doesn't exist.
1889 self._rootstore.rev(node)
1889 self._rootstore.rev(node)
1890
1890
1891 if self._treemanifests:
1891 if self._treemanifests:
1892 m = treemanifestctx(self, b'', node)
1892 m = treemanifestctx(self, b'', node)
1893 else:
1893 else:
1894 m = manifestctx(self, node)
1894 m = manifestctx(self, node)
1895
1895
1896 if node != nullid:
1896 if node != nullid:
1897 mancache = self._dirmancache.get(tree)
1897 mancache = self._dirmancache.get(tree)
1898 if not mancache:
1898 if not mancache:
1899 mancache = util.lrucachedict(self._cachesize)
1899 mancache = util.lrucachedict(self._cachesize)
1900 self._dirmancache[tree] = mancache
1900 self._dirmancache[tree] = mancache
1901 mancache[node] = m
1901 mancache[node] = m
1902 return m
1902 return m
1903
1903
1904 def getstorage(self, tree):
1904 def getstorage(self, tree):
1905 return self._rootstore.dirlog(tree)
1905 return self._rootstore.dirlog(tree)
1906
1906
1907 def clearcaches(self, clear_persisted_data=False):
1907 def clearcaches(self, clear_persisted_data=False):
1908 self._dirmancache.clear()
1908 self._dirmancache.clear()
1909 self._rootstore.clearcaches(clear_persisted_data=clear_persisted_data)
1909 self._rootstore.clearcaches(clear_persisted_data=clear_persisted_data)
1910
1910
1911 def rev(self, node):
1911 def rev(self, node):
1912 return self._rootstore.rev(node)
1912 return self._rootstore.rev(node)
1913
1913
1914
1914
1915 @interfaceutil.implementer(repository.imanifestrevisionwritable)
1915 @interfaceutil.implementer(repository.imanifestrevisionwritable)
1916 class memmanifestctx(object):
1916 class memmanifestctx(object):
1917 def __init__(self, manifestlog):
1917 def __init__(self, manifestlog):
1918 self._manifestlog = manifestlog
1918 self._manifestlog = manifestlog
1919 self._manifestdict = manifestdict()
1919 self._manifestdict = manifestdict()
1920
1920
1921 def _storage(self):
1921 def _storage(self):
1922 return self._manifestlog.getstorage(b'')
1922 return self._manifestlog.getstorage(b'')
1923
1923
1924 def new(self):
1925 return memmanifestctx(self._manifestlog)
1926
1927 def copy(self):
1924 def copy(self):
1928 memmf = memmanifestctx(self._manifestlog)
1925 memmf = memmanifestctx(self._manifestlog)
1929 memmf._manifestdict = self.read().copy()
1926 memmf._manifestdict = self.read().copy()
1930 return memmf
1927 return memmf
1931
1928
1932 def read(self):
1929 def read(self):
1933 return self._manifestdict
1930 return self._manifestdict
1934
1931
1935 def write(self, transaction, link, p1, p2, added, removed, match=None):
1932 def write(self, transaction, link, p1, p2, added, removed, match=None):
1936 return self._storage().add(
1933 return self._storage().add(
1937 self._manifestdict,
1934 self._manifestdict,
1938 transaction,
1935 transaction,
1939 link,
1936 link,
1940 p1,
1937 p1,
1941 p2,
1938 p2,
1942 added,
1939 added,
1943 removed,
1940 removed,
1944 match=match,
1941 match=match,
1945 )
1942 )
1946
1943
1947
1944
1948 @interfaceutil.implementer(repository.imanifestrevisionstored)
1945 @interfaceutil.implementer(repository.imanifestrevisionstored)
1949 class manifestctx(object):
1946 class manifestctx(object):
1950 """A class representing a single revision of a manifest, including its
1947 """A class representing a single revision of a manifest, including its
1951 contents, its parent revs, and its linkrev.
1948 contents, its parent revs, and its linkrev.
1952 """
1949 """
1953
1950
1954 def __init__(self, manifestlog, node):
1951 def __init__(self, manifestlog, node):
1955 self._manifestlog = manifestlog
1952 self._manifestlog = manifestlog
1956 self._data = None
1953 self._data = None
1957
1954
1958 self._node = node
1955 self._node = node
1959
1956
1960 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
1957 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
1961 # but let's add it later when something needs it and we can load it
1958 # but let's add it later when something needs it and we can load it
1962 # lazily.
1959 # lazily.
1963 # self.p1, self.p2 = store.parents(node)
1960 # self.p1, self.p2 = store.parents(node)
1964 # rev = store.rev(node)
1961 # rev = store.rev(node)
1965 # self.linkrev = store.linkrev(rev)
1962 # self.linkrev = store.linkrev(rev)
1966
1963
1967 def _storage(self):
1964 def _storage(self):
1968 return self._manifestlog.getstorage(b'')
1965 return self._manifestlog.getstorage(b'')
1969
1966
1970 def node(self):
1967 def node(self):
1971 return self._node
1968 return self._node
1972
1969
1973 def new(self):
1974 return memmanifestctx(self._manifestlog)
1975
1976 def copy(self):
1970 def copy(self):
1977 memmf = memmanifestctx(self._manifestlog)
1971 memmf = memmanifestctx(self._manifestlog)
1978 memmf._manifestdict = self.read().copy()
1972 memmf._manifestdict = self.read().copy()
1979 return memmf
1973 return memmf
1980
1974
1981 @propertycache
1975 @propertycache
1982 def parents(self):
1976 def parents(self):
1983 return self._storage().parents(self._node)
1977 return self._storage().parents(self._node)
1984
1978
1985 def read(self):
1979 def read(self):
1986 if self._data is None:
1980 if self._data is None:
1987 if self._node == nullid:
1981 if self._node == nullid:
1988 self._data = manifestdict()
1982 self._data = manifestdict()
1989 else:
1983 else:
1990 store = self._storage()
1984 store = self._storage()
1991 if self._node in store.fulltextcache:
1985 if self._node in store.fulltextcache:
1992 text = pycompat.bytestr(store.fulltextcache[self._node])
1986 text = pycompat.bytestr(store.fulltextcache[self._node])
1993 else:
1987 else:
1994 text = store.revision(self._node)
1988 text = store.revision(self._node)
1995 arraytext = bytearray(text)
1989 arraytext = bytearray(text)
1996 store.fulltextcache[self._node] = arraytext
1990 store.fulltextcache[self._node] = arraytext
1997 self._data = manifestdict(text)
1991 self._data = manifestdict(text)
1998 return self._data
1992 return self._data
1999
1993
2000 def readfast(self, shallow=False):
1994 def readfast(self, shallow=False):
2001 '''Calls either readdelta or read, based on which would be less work.
1995 '''Calls either readdelta or read, based on which would be less work.
2002 readdelta is called if the delta is against the p1, and therefore can be
1996 readdelta is called if the delta is against the p1, and therefore can be
2003 read quickly.
1997 read quickly.
2004
1998
2005 If `shallow` is True, nothing changes since this is a flat manifest.
1999 If `shallow` is True, nothing changes since this is a flat manifest.
2006 '''
2000 '''
2007 store = self._storage()
2001 store = self._storage()
2008 r = store.rev(self._node)
2002 r = store.rev(self._node)
2009 deltaparent = store.deltaparent(r)
2003 deltaparent = store.deltaparent(r)
2010 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
2004 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
2011 return self.readdelta()
2005 return self.readdelta()
2012 return self.read()
2006 return self.read()
2013
2007
2014 def readdelta(self, shallow=False):
2008 def readdelta(self, shallow=False):
2015 '''Returns a manifest containing just the entries that are present
2009 '''Returns a manifest containing just the entries that are present
2016 in this manifest, but not in its p1 manifest. This is efficient to read
2010 in this manifest, but not in its p1 manifest. This is efficient to read
2017 if the revlog delta is already p1.
2011 if the revlog delta is already p1.
2018
2012
2019 Changing the value of `shallow` has no effect on flat manifests.
2013 Changing the value of `shallow` has no effect on flat manifests.
2020 '''
2014 '''
2021 store = self._storage()
2015 store = self._storage()
2022 r = store.rev(self._node)
2016 r = store.rev(self._node)
2023 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
2017 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
2024 return manifestdict(d)
2018 return manifestdict(d)
2025
2019
2026 def find(self, key):
2020 def find(self, key):
2027 return self.read().find(key)
2021 return self.read().find(key)
2028
2022
2029
2023
2030 @interfaceutil.implementer(repository.imanifestrevisionwritable)
2024 @interfaceutil.implementer(repository.imanifestrevisionwritable)
2031 class memtreemanifestctx(object):
2025 class memtreemanifestctx(object):
2032 def __init__(self, manifestlog, dir=b''):
2026 def __init__(self, manifestlog, dir=b''):
2033 self._manifestlog = manifestlog
2027 self._manifestlog = manifestlog
2034 self._dir = dir
2028 self._dir = dir
2035 self._treemanifest = treemanifest()
2029 self._treemanifest = treemanifest()
2036
2030
2037 def _storage(self):
2031 def _storage(self):
2038 return self._manifestlog.getstorage(b'')
2032 return self._manifestlog.getstorage(b'')
2039
2033
2040 def new(self, dir=b''):
2041 return memtreemanifestctx(self._manifestlog, dir=dir)
2042
2043 def copy(self):
2034 def copy(self):
2044 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
2035 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
2045 memmf._treemanifest = self._treemanifest.copy()
2036 memmf._treemanifest = self._treemanifest.copy()
2046 return memmf
2037 return memmf
2047
2038
2048 def read(self):
2039 def read(self):
2049 return self._treemanifest
2040 return self._treemanifest
2050
2041
2051 def write(self, transaction, link, p1, p2, added, removed, match=None):
2042 def write(self, transaction, link, p1, p2, added, removed, match=None):
2052 def readtree(dir, node):
2043 def readtree(dir, node):
2053 return self._manifestlog.get(dir, node).read()
2044 return self._manifestlog.get(dir, node).read()
2054
2045
2055 return self._storage().add(
2046 return self._storage().add(
2056 self._treemanifest,
2047 self._treemanifest,
2057 transaction,
2048 transaction,
2058 link,
2049 link,
2059 p1,
2050 p1,
2060 p2,
2051 p2,
2061 added,
2052 added,
2062 removed,
2053 removed,
2063 readtree=readtree,
2054 readtree=readtree,
2064 match=match,
2055 match=match,
2065 )
2056 )
2066
2057
2067
2058
2068 @interfaceutil.implementer(repository.imanifestrevisionstored)
2059 @interfaceutil.implementer(repository.imanifestrevisionstored)
2069 class treemanifestctx(object):
2060 class treemanifestctx(object):
2070 def __init__(self, manifestlog, dir, node):
2061 def __init__(self, manifestlog, dir, node):
2071 self._manifestlog = manifestlog
2062 self._manifestlog = manifestlog
2072 self._dir = dir
2063 self._dir = dir
2073 self._data = None
2064 self._data = None
2074
2065
2075 self._node = node
2066 self._node = node
2076
2067
2077 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
2068 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
2078 # we can instantiate treemanifestctx objects for directories we don't
2069 # we can instantiate treemanifestctx objects for directories we don't
2079 # have on disk.
2070 # have on disk.
2080 # self.p1, self.p2 = store.parents(node)
2071 # self.p1, self.p2 = store.parents(node)
2081 # rev = store.rev(node)
2072 # rev = store.rev(node)
2082 # self.linkrev = store.linkrev(rev)
2073 # self.linkrev = store.linkrev(rev)
2083
2074
2084 def _storage(self):
2075 def _storage(self):
2085 narrowmatch = self._manifestlog._narrowmatch
2076 narrowmatch = self._manifestlog._narrowmatch
2086 if not narrowmatch.always():
2077 if not narrowmatch.always():
2087 if not narrowmatch.visitdir(self._dir[:-1]):
2078 if not narrowmatch.visitdir(self._dir[:-1]):
2088 return excludedmanifestrevlog(self._dir)
2079 return excludedmanifestrevlog(self._dir)
2089 return self._manifestlog.getstorage(self._dir)
2080 return self._manifestlog.getstorage(self._dir)
2090
2081
2091 def read(self):
2082 def read(self):
2092 if self._data is None:
2083 if self._data is None:
2093 store = self._storage()
2084 store = self._storage()
2094 if self._node == nullid:
2085 if self._node == nullid:
2095 self._data = treemanifest()
2086 self._data = treemanifest()
2096 # TODO accessing non-public API
2087 # TODO accessing non-public API
2097 elif store._treeondisk:
2088 elif store._treeondisk:
2098 m = treemanifest(dir=self._dir)
2089 m = treemanifest(dir=self._dir)
2099
2090
2100 def gettext():
2091 def gettext():
2101 return store.revision(self._node)
2092 return store.revision(self._node)
2102
2093
2103 def readsubtree(dir, subm):
2094 def readsubtree(dir, subm):
2104 # Set verify to False since we need to be able to create
2095 # Set verify to False since we need to be able to create
2105 # subtrees for trees that don't exist on disk.
2096 # subtrees for trees that don't exist on disk.
2106 return self._manifestlog.get(dir, subm, verify=False).read()
2097 return self._manifestlog.get(dir, subm, verify=False).read()
2107
2098
2108 m.read(gettext, readsubtree)
2099 m.read(gettext, readsubtree)
2109 m.setnode(self._node)
2100 m.setnode(self._node)
2110 self._data = m
2101 self._data = m
2111 else:
2102 else:
2112 if self._node in store.fulltextcache:
2103 if self._node in store.fulltextcache:
2113 text = pycompat.bytestr(store.fulltextcache[self._node])
2104 text = pycompat.bytestr(store.fulltextcache[self._node])
2114 else:
2105 else:
2115 text = store.revision(self._node)
2106 text = store.revision(self._node)
2116 arraytext = bytearray(text)
2107 arraytext = bytearray(text)
2117 store.fulltextcache[self._node] = arraytext
2108 store.fulltextcache[self._node] = arraytext
2118 self._data = treemanifest(dir=self._dir, text=text)
2109 self._data = treemanifest(dir=self._dir, text=text)
2119
2110
2120 return self._data
2111 return self._data
2121
2112
2122 def node(self):
2113 def node(self):
2123 return self._node
2114 return self._node
2124
2115
2125 def new(self, dir=b''):
2126 return memtreemanifestctx(self._manifestlog, dir=dir)
2127
2128 def copy(self):
2116 def copy(self):
2129 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
2117 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
2130 memmf._treemanifest = self.read().copy()
2118 memmf._treemanifest = self.read().copy()
2131 return memmf
2119 return memmf
2132
2120
2133 @propertycache
2121 @propertycache
2134 def parents(self):
2122 def parents(self):
2135 return self._storage().parents(self._node)
2123 return self._storage().parents(self._node)
2136
2124
2137 def readdelta(self, shallow=False):
2125 def readdelta(self, shallow=False):
2138 '''Returns a manifest containing just the entries that are present
2126 '''Returns a manifest containing just the entries that are present
2139 in this manifest, but not in its p1 manifest. This is efficient to read
2127 in this manifest, but not in its p1 manifest. This is efficient to read
2140 if the revlog delta is already p1.
2128 if the revlog delta is already p1.
2141
2129
2142 If `shallow` is True, this will read the delta for this directory,
2130 If `shallow` is True, this will read the delta for this directory,
2143 without recursively reading subdirectory manifests. Instead, any
2131 without recursively reading subdirectory manifests. Instead, any
2144 subdirectory entry will be reported as it appears in the manifest, i.e.
2132 subdirectory entry will be reported as it appears in the manifest, i.e.
2145 the subdirectory will be reported among files and distinguished only by
2133 the subdirectory will be reported among files and distinguished only by
2146 its 't' flag.
2134 its 't' flag.
2147 '''
2135 '''
2148 store = self._storage()
2136 store = self._storage()
2149 if shallow:
2137 if shallow:
2150 r = store.rev(self._node)
2138 r = store.rev(self._node)
2151 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
2139 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
2152 return manifestdict(d)
2140 return manifestdict(d)
2153 else:
2141 else:
2154 # Need to perform a slow delta
2142 # Need to perform a slow delta
2155 r0 = store.deltaparent(store.rev(self._node))
2143 r0 = store.deltaparent(store.rev(self._node))
2156 m0 = self._manifestlog.get(self._dir, store.node(r0)).read()
2144 m0 = self._manifestlog.get(self._dir, store.node(r0)).read()
2157 m1 = self.read()
2145 m1 = self.read()
2158 md = treemanifest(dir=self._dir)
2146 md = treemanifest(dir=self._dir)
2159 for f, ((n0, fl0), (n1, fl1)) in pycompat.iteritems(m0.diff(m1)):
2147 for f, ((n0, fl0), (n1, fl1)) in pycompat.iteritems(m0.diff(m1)):
2160 if n1:
2148 if n1:
2161 md[f] = n1
2149 md[f] = n1
2162 if fl1:
2150 if fl1:
2163 md.setflag(f, fl1)
2151 md.setflag(f, fl1)
2164 return md
2152 return md
2165
2153
2166 def readfast(self, shallow=False):
2154 def readfast(self, shallow=False):
2167 '''Calls either readdelta or read, based on which would be less work.
2155 '''Calls either readdelta or read, based on which would be less work.
2168 readdelta is called if the delta is against the p1, and therefore can be
2156 readdelta is called if the delta is against the p1, and therefore can be
2169 read quickly.
2157 read quickly.
2170
2158
2171 If `shallow` is True, it only returns the entries from this manifest,
2159 If `shallow` is True, it only returns the entries from this manifest,
2172 and not any submanifests.
2160 and not any submanifests.
2173 '''
2161 '''
2174 store = self._storage()
2162 store = self._storage()
2175 r = store.rev(self._node)
2163 r = store.rev(self._node)
2176 deltaparent = store.deltaparent(r)
2164 deltaparent = store.deltaparent(r)
2177 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
2165 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
2178 return self.readdelta(shallow=shallow)
2166 return self.readdelta(shallow=shallow)
2179
2167
2180 if shallow:
2168 if shallow:
2181 return manifestdict(store.revision(self._node))
2169 return manifestdict(store.revision(self._node))
2182 else:
2170 else:
2183 return self.read()
2171 return self.read()
2184
2172
2185 def find(self, key):
2173 def find(self, key):
2186 return self.read().find(key)
2174 return self.read().find(key)
2187
2175
2188
2176
2189 class excludeddir(treemanifest):
2177 class excludeddir(treemanifest):
2190 """Stand-in for a directory that is excluded from the repository.
2178 """Stand-in for a directory that is excluded from the repository.
2191
2179
2192 With narrowing active on a repository that uses treemanifests,
2180 With narrowing active on a repository that uses treemanifests,
2193 some of the directory revlogs will be excluded from the resulting
2181 some of the directory revlogs will be excluded from the resulting
2194 clone. This is a huge storage win for clients, but means we need
2182 clone. This is a huge storage win for clients, but means we need
2195 some sort of pseudo-manifest to surface to internals so we can
2183 some sort of pseudo-manifest to surface to internals so we can
2196 detect a merge conflict outside the narrowspec. That's what this
2184 detect a merge conflict outside the narrowspec. That's what this
2197 class is: it stands in for a directory whose node is known, but
2185 class is: it stands in for a directory whose node is known, but
2198 whose contents are unknown.
2186 whose contents are unknown.
2199 """
2187 """
2200
2188
2201 def __init__(self, dir, node):
2189 def __init__(self, dir, node):
2202 super(excludeddir, self).__init__(dir)
2190 super(excludeddir, self).__init__(dir)
2203 self._node = node
2191 self._node = node
2204 # Add an empty file, which will be included by iterators and such,
2192 # Add an empty file, which will be included by iterators and such,
2205 # appearing as the directory itself (i.e. something like "dir/")
2193 # appearing as the directory itself (i.e. something like "dir/")
2206 self._files[b''] = node
2194 self._files[b''] = node
2207 self._flags[b''] = b't'
2195 self._flags[b''] = b't'
2208
2196
2209 # Manifests outside the narrowspec should never be modified, so avoid
2197 # Manifests outside the narrowspec should never be modified, so avoid
2210 # copying. This makes a noticeable difference when there are very many
2198 # copying. This makes a noticeable difference when there are very many
2211 # directories outside the narrowspec. Also, it makes sense for the copy to
2199 # directories outside the narrowspec. Also, it makes sense for the copy to
2212 # be of the same type as the original, which would not happen with the
2200 # be of the same type as the original, which would not happen with the
2213 # super type's copy().
2201 # super type's copy().
2214 def copy(self):
2202 def copy(self):
2215 return self
2203 return self
2216
2204
2217
2205
2218 class excludeddirmanifestctx(treemanifestctx):
2206 class excludeddirmanifestctx(treemanifestctx):
2219 """context wrapper for excludeddir - see that docstring for rationale"""
2207 """context wrapper for excludeddir - see that docstring for rationale"""
2220
2208
2221 def __init__(self, dir, node):
2209 def __init__(self, dir, node):
2222 self._dir = dir
2210 self._dir = dir
2223 self._node = node
2211 self._node = node
2224
2212
2225 def read(self):
2213 def read(self):
2226 return excludeddir(self._dir, self._node)
2214 return excludeddir(self._dir, self._node)
2227
2215
2228 def write(self, *args):
2216 def write(self, *args):
2229 raise error.ProgrammingError(
2217 raise error.ProgrammingError(
2230 b'attempt to write manifest from excluded dir %s' % self._dir
2218 b'attempt to write manifest from excluded dir %s' % self._dir
2231 )
2219 )
2232
2220
2233
2221
2234 class excludedmanifestrevlog(manifestrevlog):
2222 class excludedmanifestrevlog(manifestrevlog):
2235 """Stand-in for excluded treemanifest revlogs.
2223 """Stand-in for excluded treemanifest revlogs.
2236
2224
2237 When narrowing is active on a treemanifest repository, we'll have
2225 When narrowing is active on a treemanifest repository, we'll have
2238 references to directories we can't see due to the revlog being
2226 references to directories we can't see due to the revlog being
2239 skipped. This class exists to conform to the manifestrevlog
2227 skipped. This class exists to conform to the manifestrevlog
2240 interface for those directories and proactively prevent writes to
2228 interface for those directories and proactively prevent writes to
2241 outside the narrowspec.
2229 outside the narrowspec.
2242 """
2230 """
2243
2231
2244 def __init__(self, dir):
2232 def __init__(self, dir):
2245 self._dir = dir
2233 self._dir = dir
2246
2234
2247 def __len__(self):
2235 def __len__(self):
2248 raise error.ProgrammingError(
2236 raise error.ProgrammingError(
2249 b'attempt to get length of excluded dir %s' % self._dir
2237 b'attempt to get length of excluded dir %s' % self._dir
2250 )
2238 )
2251
2239
2252 def rev(self, node):
2240 def rev(self, node):
2253 raise error.ProgrammingError(
2241 raise error.ProgrammingError(
2254 b'attempt to get rev from excluded dir %s' % self._dir
2242 b'attempt to get rev from excluded dir %s' % self._dir
2255 )
2243 )
2256
2244
2257 def linkrev(self, node):
2245 def linkrev(self, node):
2258 raise error.ProgrammingError(
2246 raise error.ProgrammingError(
2259 b'attempt to get linkrev from excluded dir %s' % self._dir
2247 b'attempt to get linkrev from excluded dir %s' % self._dir
2260 )
2248 )
2261
2249
2262 def node(self, rev):
2250 def node(self, rev):
2263 raise error.ProgrammingError(
2251 raise error.ProgrammingError(
2264 b'attempt to get node from excluded dir %s' % self._dir
2252 b'attempt to get node from excluded dir %s' % self._dir
2265 )
2253 )
2266
2254
2267 def add(self, *args, **kwargs):
2255 def add(self, *args, **kwargs):
2268 # We should never write entries in dirlogs outside the narrow clone.
2256 # We should never write entries in dirlogs outside the narrow clone.
2269 # However, the method still gets called from writesubtree() in
2257 # However, the method still gets called from writesubtree() in
2270 # _addtree(), so we need to handle it. We should possibly make that
2258 # _addtree(), so we need to handle it. We should possibly make that
2271 # avoid calling add() with a clean manifest (_dirty is always False
2259 # avoid calling add() with a clean manifest (_dirty is always False
2272 # in excludeddir instances).
2260 # in excludeddir instances).
2273 pass
2261 pass
@@ -1,283 +1,282
1 # Test that certain objects conform to well-defined interfaces.
1 # Test that certain objects conform to well-defined interfaces.
2
2
3 from __future__ import absolute_import, print_function
3 from __future__ import absolute_import, print_function
4
4
5 from mercurial import encoding
5 from mercurial import encoding
6
6
7 encoding.environ[b'HGREALINTERFACES'] = b'1'
7 encoding.environ[b'HGREALINTERFACES'] = b'1'
8
8
9 import os
9 import os
10 import subprocess
10 import subprocess
11 import sys
11 import sys
12
12
13 # Only run if tests are run in a repo
13 # Only run if tests are run in a repo
14 if subprocess.call(
14 if subprocess.call(
15 ['python', '%s/hghave' % os.environ['TESTDIR'], 'test-repo']
15 ['python', '%s/hghave' % os.environ['TESTDIR'], 'test-repo']
16 ):
16 ):
17 sys.exit(80)
17 sys.exit(80)
18
18
19 from mercurial.interfaces import (
19 from mercurial.interfaces import (
20 dirstate as intdirstate,
20 dirstate as intdirstate,
21 repository,
21 repository,
22 )
22 )
23 from mercurial.thirdparty.zope import interface as zi
23 from mercurial.thirdparty.zope import interface as zi
24 from mercurial.thirdparty.zope.interface import verify as ziverify
24 from mercurial.thirdparty.zope.interface import verify as ziverify
25 from mercurial import (
25 from mercurial import (
26 bundlerepo,
26 bundlerepo,
27 dirstate,
27 dirstate,
28 filelog,
28 filelog,
29 httppeer,
29 httppeer,
30 localrepo,
30 localrepo,
31 manifest,
31 manifest,
32 pycompat,
32 pycompat,
33 revlog,
33 revlog,
34 sshpeer,
34 sshpeer,
35 statichttprepo,
35 statichttprepo,
36 ui as uimod,
36 ui as uimod,
37 unionrepo,
37 unionrepo,
38 vfs as vfsmod,
38 vfs as vfsmod,
39 wireprotoserver,
39 wireprotoserver,
40 wireprototypes,
40 wireprototypes,
41 wireprotov1peer,
41 wireprotov1peer,
42 wireprotov2server,
42 wireprotov2server,
43 )
43 )
44
44
45 testdir = os.path.dirname(__file__)
45 testdir = os.path.dirname(__file__)
46 rootdir = pycompat.fsencode(os.path.normpath(os.path.join(testdir, '..')))
46 rootdir = pycompat.fsencode(os.path.normpath(os.path.join(testdir, '..')))
47
47
48 sys.path[0:0] = [testdir]
48 sys.path[0:0] = [testdir]
49 import simplestorerepo
49 import simplestorerepo
50
50
51 del sys.path[0]
51 del sys.path[0]
52
52
53
53
54 def checkzobject(o, allowextra=False):
54 def checkzobject(o, allowextra=False):
55 """Verify an object with a zope interface."""
55 """Verify an object with a zope interface."""
56 ifaces = zi.providedBy(o)
56 ifaces = zi.providedBy(o)
57 if not ifaces:
57 if not ifaces:
58 print('%r does not provide any zope interfaces' % o)
58 print('%r does not provide any zope interfaces' % o)
59 return
59 return
60
60
61 # Run zope.interface's built-in verification routine. This verifies that
61 # Run zope.interface's built-in verification routine. This verifies that
62 # everything that is supposed to be present is present.
62 # everything that is supposed to be present is present.
63 for iface in ifaces:
63 for iface in ifaces:
64 ziverify.verifyObject(iface, o)
64 ziverify.verifyObject(iface, o)
65
65
66 if allowextra:
66 if allowextra:
67 return
67 return
68
68
69 # Now verify that the object provides no extra public attributes that
69 # Now verify that the object provides no extra public attributes that
70 # aren't declared as part of interfaces.
70 # aren't declared as part of interfaces.
71 allowed = set()
71 allowed = set()
72 for iface in ifaces:
72 for iface in ifaces:
73 allowed |= set(iface.names(all=True))
73 allowed |= set(iface.names(all=True))
74
74
75 public = {a for a in dir(o) if not a.startswith('_')}
75 public = {a for a in dir(o) if not a.startswith('_')}
76
76
77 for attr in sorted(public - allowed):
77 for attr in sorted(public - allowed):
78 print(
78 print(
79 'public attribute not declared in interfaces: %s.%s'
79 'public attribute not declared in interfaces: %s.%s'
80 % (o.__class__.__name__, attr)
80 % (o.__class__.__name__, attr)
81 )
81 )
82
82
83
83
84 # Facilitates testing localpeer.
84 # Facilitates testing localpeer.
85 class dummyrepo(object):
85 class dummyrepo(object):
86 def __init__(self):
86 def __init__(self):
87 self.ui = uimod.ui()
87 self.ui = uimod.ui()
88
88
89 def filtered(self, name):
89 def filtered(self, name):
90 pass
90 pass
91
91
92 def _restrictcapabilities(self, caps):
92 def _restrictcapabilities(self, caps):
93 pass
93 pass
94
94
95
95
96 class dummyopener(object):
96 class dummyopener(object):
97 handlers = []
97 handlers = []
98
98
99
99
100 # Facilitates testing sshpeer without requiring a server.
100 # Facilitates testing sshpeer without requiring a server.
101 class badpeer(httppeer.httppeer):
101 class badpeer(httppeer.httppeer):
102 def __init__(self):
102 def __init__(self):
103 super(badpeer, self).__init__(
103 super(badpeer, self).__init__(
104 None, None, None, dummyopener(), None, None
104 None, None, None, dummyopener(), None, None
105 )
105 )
106 self.badattribute = True
106 self.badattribute = True
107
107
108 def badmethod(self):
108 def badmethod(self):
109 pass
109 pass
110
110
111
111
112 class dummypipe(object):
112 class dummypipe(object):
113 def close(self):
113 def close(self):
114 pass
114 pass
115
115
116
116
117 def main():
117 def main():
118 ui = uimod.ui()
118 ui = uimod.ui()
119 # Needed so we can open a local repo with obsstore without a warning.
119 # Needed so we can open a local repo with obsstore without a warning.
120 ui.setconfig(b'experimental', b'evolution.createmarkers', True)
120 ui.setconfig(b'experimental', b'evolution.createmarkers', True)
121
121
122 checkzobject(badpeer())
122 checkzobject(badpeer())
123
123
124 ziverify.verifyClass(repository.ipeerbase, httppeer.httppeer)
124 ziverify.verifyClass(repository.ipeerbase, httppeer.httppeer)
125 checkzobject(httppeer.httppeer(None, None, None, dummyopener(), None, None))
125 checkzobject(httppeer.httppeer(None, None, None, dummyopener(), None, None))
126
126
127 ziverify.verifyClass(repository.ipeerv2, httppeer.httpv2peer)
127 ziverify.verifyClass(repository.ipeerv2, httppeer.httpv2peer)
128 checkzobject(httppeer.httpv2peer(None, b'', b'', None, None, None))
128 checkzobject(httppeer.httpv2peer(None, b'', b'', None, None, None))
129
129
130 ziverify.verifyClass(repository.ipeerbase, localrepo.localpeer)
130 ziverify.verifyClass(repository.ipeerbase, localrepo.localpeer)
131 checkzobject(localrepo.localpeer(dummyrepo()))
131 checkzobject(localrepo.localpeer(dummyrepo()))
132
132
133 ziverify.verifyClass(
133 ziverify.verifyClass(
134 repository.ipeercommandexecutor, localrepo.localcommandexecutor
134 repository.ipeercommandexecutor, localrepo.localcommandexecutor
135 )
135 )
136 checkzobject(localrepo.localcommandexecutor(None))
136 checkzobject(localrepo.localcommandexecutor(None))
137
137
138 ziverify.verifyClass(
138 ziverify.verifyClass(
139 repository.ipeercommandexecutor, wireprotov1peer.peerexecutor
139 repository.ipeercommandexecutor, wireprotov1peer.peerexecutor
140 )
140 )
141 checkzobject(wireprotov1peer.peerexecutor(None))
141 checkzobject(wireprotov1peer.peerexecutor(None))
142
142
143 ziverify.verifyClass(repository.ipeerbase, sshpeer.sshv1peer)
143 ziverify.verifyClass(repository.ipeerbase, sshpeer.sshv1peer)
144 checkzobject(
144 checkzobject(
145 sshpeer.sshv1peer(
145 sshpeer.sshv1peer(
146 ui,
146 ui,
147 b'ssh://localhost/foo',
147 b'ssh://localhost/foo',
148 b'',
148 b'',
149 dummypipe(),
149 dummypipe(),
150 dummypipe(),
150 dummypipe(),
151 None,
151 None,
152 None,
152 None,
153 )
153 )
154 )
154 )
155
155
156 ziverify.verifyClass(repository.ipeerbase, sshpeer.sshv2peer)
156 ziverify.verifyClass(repository.ipeerbase, sshpeer.sshv2peer)
157 checkzobject(
157 checkzobject(
158 sshpeer.sshv2peer(
158 sshpeer.sshv2peer(
159 ui,
159 ui,
160 b'ssh://localhost/foo',
160 b'ssh://localhost/foo',
161 b'',
161 b'',
162 dummypipe(),
162 dummypipe(),
163 dummypipe(),
163 dummypipe(),
164 None,
164 None,
165 None,
165 None,
166 )
166 )
167 )
167 )
168
168
169 ziverify.verifyClass(repository.ipeerbase, bundlerepo.bundlepeer)
169 ziverify.verifyClass(repository.ipeerbase, bundlerepo.bundlepeer)
170 checkzobject(bundlerepo.bundlepeer(dummyrepo()))
170 checkzobject(bundlerepo.bundlepeer(dummyrepo()))
171
171
172 ziverify.verifyClass(repository.ipeerbase, statichttprepo.statichttppeer)
172 ziverify.verifyClass(repository.ipeerbase, statichttprepo.statichttppeer)
173 checkzobject(statichttprepo.statichttppeer(dummyrepo()))
173 checkzobject(statichttprepo.statichttppeer(dummyrepo()))
174
174
175 ziverify.verifyClass(repository.ipeerbase, unionrepo.unionpeer)
175 ziverify.verifyClass(repository.ipeerbase, unionrepo.unionpeer)
176 checkzobject(unionrepo.unionpeer(dummyrepo()))
176 checkzobject(unionrepo.unionpeer(dummyrepo()))
177
177
178 ziverify.verifyClass(
178 ziverify.verifyClass(
179 repository.ilocalrepositorymain, localrepo.localrepository
179 repository.ilocalrepositorymain, localrepo.localrepository
180 )
180 )
181 ziverify.verifyClass(
181 ziverify.verifyClass(
182 repository.ilocalrepositoryfilestorage, localrepo.revlogfilestorage
182 repository.ilocalrepositoryfilestorage, localrepo.revlogfilestorage
183 )
183 )
184 repo = localrepo.makelocalrepository(ui, rootdir)
184 repo = localrepo.makelocalrepository(ui, rootdir)
185 checkzobject(repo)
185 checkzobject(repo)
186
186
187 ziverify.verifyClass(
187 ziverify.verifyClass(
188 wireprototypes.baseprotocolhandler, wireprotoserver.sshv1protocolhandler
188 wireprototypes.baseprotocolhandler, wireprotoserver.sshv1protocolhandler
189 )
189 )
190 ziverify.verifyClass(
190 ziverify.verifyClass(
191 wireprototypes.baseprotocolhandler, wireprotoserver.sshv2protocolhandler
191 wireprototypes.baseprotocolhandler, wireprotoserver.sshv2protocolhandler
192 )
192 )
193 ziverify.verifyClass(
193 ziverify.verifyClass(
194 wireprototypes.baseprotocolhandler,
194 wireprototypes.baseprotocolhandler,
195 wireprotoserver.httpv1protocolhandler,
195 wireprotoserver.httpv1protocolhandler,
196 )
196 )
197 ziverify.verifyClass(
197 ziverify.verifyClass(
198 wireprototypes.baseprotocolhandler,
198 wireprototypes.baseprotocolhandler,
199 wireprotov2server.httpv2protocolhandler,
199 wireprotov2server.httpv2protocolhandler,
200 )
200 )
201
201
202 sshv1 = wireprotoserver.sshv1protocolhandler(None, None, None)
202 sshv1 = wireprotoserver.sshv1protocolhandler(None, None, None)
203 checkzobject(sshv1)
203 checkzobject(sshv1)
204 sshv2 = wireprotoserver.sshv2protocolhandler(None, None, None)
204 sshv2 = wireprotoserver.sshv2protocolhandler(None, None, None)
205 checkzobject(sshv2)
205 checkzobject(sshv2)
206
206
207 httpv1 = wireprotoserver.httpv1protocolhandler(None, None, None)
207 httpv1 = wireprotoserver.httpv1protocolhandler(None, None, None)
208 checkzobject(httpv1)
208 checkzobject(httpv1)
209 httpv2 = wireprotov2server.httpv2protocolhandler(None, None)
209 httpv2 = wireprotov2server.httpv2protocolhandler(None, None)
210 checkzobject(httpv2)
210 checkzobject(httpv2)
211
211
212 ziverify.verifyClass(repository.ifilestorage, filelog.filelog)
212 ziverify.verifyClass(repository.ifilestorage, filelog.filelog)
213 ziverify.verifyClass(repository.imanifestdict, manifest.manifestdict)
213 ziverify.verifyClass(repository.imanifestdict, manifest.manifestdict)
214 ziverify.verifyClass(
214 ziverify.verifyClass(
215 repository.imanifestrevisionstored, manifest.manifestctx
215 repository.imanifestrevisionstored, manifest.manifestctx
216 )
216 )
217 ziverify.verifyClass(
217 ziverify.verifyClass(
218 repository.imanifestrevisionwritable, manifest.memmanifestctx
218 repository.imanifestrevisionwritable, manifest.memmanifestctx
219 )
219 )
220 ziverify.verifyClass(
220 ziverify.verifyClass(
221 repository.imanifestrevisionstored, manifest.treemanifestctx
221 repository.imanifestrevisionstored, manifest.treemanifestctx
222 )
222 )
223 ziverify.verifyClass(
223 ziverify.verifyClass(
224 repository.imanifestrevisionwritable, manifest.memtreemanifestctx
224 repository.imanifestrevisionwritable, manifest.memtreemanifestctx
225 )
225 )
226 ziverify.verifyClass(repository.imanifestlog, manifest.manifestlog)
226 ziverify.verifyClass(repository.imanifestlog, manifest.manifestlog)
227 ziverify.verifyClass(repository.imanifeststorage, manifest.manifestrevlog)
227 ziverify.verifyClass(repository.imanifeststorage, manifest.manifestrevlog)
228
228
229 ziverify.verifyClass(
229 ziverify.verifyClass(
230 repository.irevisiondelta, simplestorerepo.simplestorerevisiondelta
230 repository.irevisiondelta, simplestorerepo.simplestorerevisiondelta
231 )
231 )
232 ziverify.verifyClass(repository.ifilestorage, simplestorerepo.filestorage)
232 ziverify.verifyClass(repository.ifilestorage, simplestorerepo.filestorage)
233 ziverify.verifyClass(
233 ziverify.verifyClass(
234 repository.iverifyproblem, simplestorerepo.simplefilestoreproblem
234 repository.iverifyproblem, simplestorerepo.simplefilestoreproblem
235 )
235 )
236
236
237 ziverify.verifyClass(intdirstate.idirstate, dirstate.dirstate)
237 ziverify.verifyClass(intdirstate.idirstate, dirstate.dirstate)
238
238
239 vfs = vfsmod.vfs(b'.')
239 vfs = vfsmod.vfs(b'.')
240 fl = filelog.filelog(vfs, b'dummy.i')
240 fl = filelog.filelog(vfs, b'dummy.i')
241 checkzobject(fl, allowextra=True)
241 checkzobject(fl, allowextra=True)
242
242
243 # Conforms to imanifestlog.
243 # Conforms to imanifestlog.
244 ml = manifest.manifestlog(
244 ml = manifest.manifestlog(
245 vfs, repo, manifest.manifestrevlog(repo.svfs), repo.narrowmatch()
245 vfs, repo, manifest.manifestrevlog(repo.svfs), repo.narrowmatch()
246 )
246 )
247 checkzobject(ml)
247 checkzobject(ml)
248 checkzobject(repo.manifestlog)
248 checkzobject(repo.manifestlog)
249
249
250 # Conforms to imanifestrevision.
250 # Conforms to imanifestrevision.
251 mctx = ml[repo[0].manifestnode()]
251 mctx = ml[repo[0].manifestnode()]
252 checkzobject(mctx)
252 checkzobject(mctx)
253
253
254 # Conforms to imanifestrevisionwritable.
254 # Conforms to imanifestrevisionwritable.
255 checkzobject(mctx.new())
256 checkzobject(mctx.copy())
255 checkzobject(mctx.copy())
257
256
258 # Conforms to imanifestdict.
257 # Conforms to imanifestdict.
259 checkzobject(mctx.read())
258 checkzobject(mctx.read())
260
259
261 mrl = manifest.manifestrevlog(vfs)
260 mrl = manifest.manifestrevlog(vfs)
262 checkzobject(mrl)
261 checkzobject(mrl)
263
262
264 ziverify.verifyClass(repository.irevisiondelta, revlog.revlogrevisiondelta)
263 ziverify.verifyClass(repository.irevisiondelta, revlog.revlogrevisiondelta)
265
264
266 rd = revlog.revlogrevisiondelta(
265 rd = revlog.revlogrevisiondelta(
267 node=b'',
266 node=b'',
268 p1node=b'',
267 p1node=b'',
269 p2node=b'',
268 p2node=b'',
270 basenode=b'',
269 basenode=b'',
271 linknode=b'',
270 linknode=b'',
272 flags=b'',
271 flags=b'',
273 baserevisionsize=None,
272 baserevisionsize=None,
274 revision=b'',
273 revision=b'',
275 delta=None,
274 delta=None,
276 )
275 )
277 checkzobject(rd)
276 checkzobject(rd)
278
277
279 ziverify.verifyClass(repository.iverifyproblem, revlog.revlogproblem)
278 ziverify.verifyClass(repository.iverifyproblem, revlog.revlogproblem)
280 checkzobject(revlog.revlogproblem())
279 checkzobject(revlog.revlogproblem())
281
280
282
281
283 main()
282 main()
General Comments 0
You need to be logged in to leave comments. Login now