##// END OF EJS Templates
manifest: move matches method to be outside the interface...
Augie Fackler -
r44826:0bf3b5e8 default
parent child Browse files
Show More
@@ -1,1978 +1,1969 b''
1 # repository.py - Interfaces and base classes for repositories and peers.
1 # repository.py - Interfaces and base classes for repositories and peers.
2 #
2 #
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from ..i18n import _
10 from ..i18n import _
11 from .. import error
11 from .. import error
12 from . import util as interfaceutil
12 from . import util as interfaceutil
13
13
14 # When narrowing is finalized and no longer subject to format changes,
14 # When narrowing is finalized and no longer subject to format changes,
15 # we should move this to just "narrow" or similar.
15 # we should move this to just "narrow" or similar.
16 NARROW_REQUIREMENT = b'narrowhg-experimental'
16 NARROW_REQUIREMENT = b'narrowhg-experimental'
17
17
18 # Local repository feature string.
18 # Local repository feature string.
19
19
20 # Revlogs are being used for file storage.
20 # Revlogs are being used for file storage.
21 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
21 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
22 # The storage part of the repository is shared from an external source.
22 # The storage part of the repository is shared from an external source.
23 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
23 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
24 # LFS supported for backing file storage.
24 # LFS supported for backing file storage.
25 REPO_FEATURE_LFS = b'lfs'
25 REPO_FEATURE_LFS = b'lfs'
26 # Repository supports being stream cloned.
26 # Repository supports being stream cloned.
27 REPO_FEATURE_STREAM_CLONE = b'streamclone'
27 REPO_FEATURE_STREAM_CLONE = b'streamclone'
28 # Files storage may lack data for all ancestors.
28 # Files storage may lack data for all ancestors.
29 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
29 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
30
30
31 REVISION_FLAG_CENSORED = 1 << 15
31 REVISION_FLAG_CENSORED = 1 << 15
32 REVISION_FLAG_ELLIPSIS = 1 << 14
32 REVISION_FLAG_ELLIPSIS = 1 << 14
33 REVISION_FLAG_EXTSTORED = 1 << 13
33 REVISION_FLAG_EXTSTORED = 1 << 13
34 REVISION_FLAG_SIDEDATA = 1 << 12
34 REVISION_FLAG_SIDEDATA = 1 << 12
35
35
36 REVISION_FLAGS_KNOWN = (
36 REVISION_FLAGS_KNOWN = (
37 REVISION_FLAG_CENSORED
37 REVISION_FLAG_CENSORED
38 | REVISION_FLAG_ELLIPSIS
38 | REVISION_FLAG_ELLIPSIS
39 | REVISION_FLAG_EXTSTORED
39 | REVISION_FLAG_EXTSTORED
40 | REVISION_FLAG_SIDEDATA
40 | REVISION_FLAG_SIDEDATA
41 )
41 )
42
42
43 CG_DELTAMODE_STD = b'default'
43 CG_DELTAMODE_STD = b'default'
44 CG_DELTAMODE_PREV = b'previous'
44 CG_DELTAMODE_PREV = b'previous'
45 CG_DELTAMODE_FULL = b'fulltext'
45 CG_DELTAMODE_FULL = b'fulltext'
46 CG_DELTAMODE_P1 = b'p1'
46 CG_DELTAMODE_P1 = b'p1'
47
47
48
48
49 class ipeerconnection(interfaceutil.Interface):
49 class ipeerconnection(interfaceutil.Interface):
50 """Represents a "connection" to a repository.
50 """Represents a "connection" to a repository.
51
51
52 This is the base interface for representing a connection to a repository.
52 This is the base interface for representing a connection to a repository.
53 It holds basic properties and methods applicable to all peer types.
53 It holds basic properties and methods applicable to all peer types.
54
54
55 This is not a complete interface definition and should not be used
55 This is not a complete interface definition and should not be used
56 outside of this module.
56 outside of this module.
57 """
57 """
58
58
59 ui = interfaceutil.Attribute("""ui.ui instance""")
59 ui = interfaceutil.Attribute("""ui.ui instance""")
60
60
61 def url():
61 def url():
62 """Returns a URL string representing this peer.
62 """Returns a URL string representing this peer.
63
63
64 Currently, implementations expose the raw URL used to construct the
64 Currently, implementations expose the raw URL used to construct the
65 instance. It may contain credentials as part of the URL. The
65 instance. It may contain credentials as part of the URL. The
66 expectations of the value aren't well-defined and this could lead to
66 expectations of the value aren't well-defined and this could lead to
67 data leakage.
67 data leakage.
68
68
69 TODO audit/clean consumers and more clearly define the contents of this
69 TODO audit/clean consumers and more clearly define the contents of this
70 value.
70 value.
71 """
71 """
72
72
73 def local():
73 def local():
74 """Returns a local repository instance.
74 """Returns a local repository instance.
75
75
76 If the peer represents a local repository, returns an object that
76 If the peer represents a local repository, returns an object that
77 can be used to interface with it. Otherwise returns ``None``.
77 can be used to interface with it. Otherwise returns ``None``.
78 """
78 """
79
79
80 def peer():
80 def peer():
81 """Returns an object conforming to this interface.
81 """Returns an object conforming to this interface.
82
82
83 Most implementations will ``return self``.
83 Most implementations will ``return self``.
84 """
84 """
85
85
86 def canpush():
86 def canpush():
87 """Returns a boolean indicating if this peer can be pushed to."""
87 """Returns a boolean indicating if this peer can be pushed to."""
88
88
89 def close():
89 def close():
90 """Close the connection to this peer.
90 """Close the connection to this peer.
91
91
92 This is called when the peer will no longer be used. Resources
92 This is called when the peer will no longer be used. Resources
93 associated with the peer should be cleaned up.
93 associated with the peer should be cleaned up.
94 """
94 """
95
95
96
96
97 class ipeercapabilities(interfaceutil.Interface):
97 class ipeercapabilities(interfaceutil.Interface):
98 """Peer sub-interface related to capabilities."""
98 """Peer sub-interface related to capabilities."""
99
99
100 def capable(name):
100 def capable(name):
101 """Determine support for a named capability.
101 """Determine support for a named capability.
102
102
103 Returns ``False`` if capability not supported.
103 Returns ``False`` if capability not supported.
104
104
105 Returns ``True`` if boolean capability is supported. Returns a string
105 Returns ``True`` if boolean capability is supported. Returns a string
106 if capability support is non-boolean.
106 if capability support is non-boolean.
107
107
108 Capability strings may or may not map to wire protocol capabilities.
108 Capability strings may or may not map to wire protocol capabilities.
109 """
109 """
110
110
111 def requirecap(name, purpose):
111 def requirecap(name, purpose):
112 """Require a capability to be present.
112 """Require a capability to be present.
113
113
114 Raises a ``CapabilityError`` if the capability isn't present.
114 Raises a ``CapabilityError`` if the capability isn't present.
115 """
115 """
116
116
117
117
118 class ipeercommands(interfaceutil.Interface):
118 class ipeercommands(interfaceutil.Interface):
119 """Client-side interface for communicating over the wire protocol.
119 """Client-side interface for communicating over the wire protocol.
120
120
121 This interface is used as a gateway to the Mercurial wire protocol.
121 This interface is used as a gateway to the Mercurial wire protocol.
122 methods commonly call wire protocol commands of the same name.
122 methods commonly call wire protocol commands of the same name.
123 """
123 """
124
124
125 def branchmap():
125 def branchmap():
126 """Obtain heads in named branches.
126 """Obtain heads in named branches.
127
127
128 Returns a dict mapping branch name to an iterable of nodes that are
128 Returns a dict mapping branch name to an iterable of nodes that are
129 heads on that branch.
129 heads on that branch.
130 """
130 """
131
131
132 def capabilities():
132 def capabilities():
133 """Obtain capabilities of the peer.
133 """Obtain capabilities of the peer.
134
134
135 Returns a set of string capabilities.
135 Returns a set of string capabilities.
136 """
136 """
137
137
138 def clonebundles():
138 def clonebundles():
139 """Obtains the clone bundles manifest for the repo.
139 """Obtains the clone bundles manifest for the repo.
140
140
141 Returns the manifest as unparsed bytes.
141 Returns the manifest as unparsed bytes.
142 """
142 """
143
143
144 def debugwireargs(one, two, three=None, four=None, five=None):
144 def debugwireargs(one, two, three=None, four=None, five=None):
145 """Used to facilitate debugging of arguments passed over the wire."""
145 """Used to facilitate debugging of arguments passed over the wire."""
146
146
147 def getbundle(source, **kwargs):
147 def getbundle(source, **kwargs):
148 """Obtain remote repository data as a bundle.
148 """Obtain remote repository data as a bundle.
149
149
150 This command is how the bulk of repository data is transferred from
150 This command is how the bulk of repository data is transferred from
151 the peer to the local repository
151 the peer to the local repository
152
152
153 Returns a generator of bundle data.
153 Returns a generator of bundle data.
154 """
154 """
155
155
156 def heads():
156 def heads():
157 """Determine all known head revisions in the peer.
157 """Determine all known head revisions in the peer.
158
158
159 Returns an iterable of binary nodes.
159 Returns an iterable of binary nodes.
160 """
160 """
161
161
162 def known(nodes):
162 def known(nodes):
163 """Determine whether multiple nodes are known.
163 """Determine whether multiple nodes are known.
164
164
165 Accepts an iterable of nodes whose presence to check for.
165 Accepts an iterable of nodes whose presence to check for.
166
166
167 Returns an iterable of booleans indicating of the corresponding node
167 Returns an iterable of booleans indicating of the corresponding node
168 at that index is known to the peer.
168 at that index is known to the peer.
169 """
169 """
170
170
171 def listkeys(namespace):
171 def listkeys(namespace):
172 """Obtain all keys in a pushkey namespace.
172 """Obtain all keys in a pushkey namespace.
173
173
174 Returns an iterable of key names.
174 Returns an iterable of key names.
175 """
175 """
176
176
177 def lookup(key):
177 def lookup(key):
178 """Resolve a value to a known revision.
178 """Resolve a value to a known revision.
179
179
180 Returns a binary node of the resolved revision on success.
180 Returns a binary node of the resolved revision on success.
181 """
181 """
182
182
183 def pushkey(namespace, key, old, new):
183 def pushkey(namespace, key, old, new):
184 """Set a value using the ``pushkey`` protocol.
184 """Set a value using the ``pushkey`` protocol.
185
185
186 Arguments correspond to the pushkey namespace and key to operate on and
186 Arguments correspond to the pushkey namespace and key to operate on and
187 the old and new values for that key.
187 the old and new values for that key.
188
188
189 Returns a string with the peer result. The value inside varies by the
189 Returns a string with the peer result. The value inside varies by the
190 namespace.
190 namespace.
191 """
191 """
192
192
193 def stream_out():
193 def stream_out():
194 """Obtain streaming clone data.
194 """Obtain streaming clone data.
195
195
196 Successful result should be a generator of data chunks.
196 Successful result should be a generator of data chunks.
197 """
197 """
198
198
199 def unbundle(bundle, heads, url):
199 def unbundle(bundle, heads, url):
200 """Transfer repository data to the peer.
200 """Transfer repository data to the peer.
201
201
202 This is how the bulk of data during a push is transferred.
202 This is how the bulk of data during a push is transferred.
203
203
204 Returns the integer number of heads added to the peer.
204 Returns the integer number of heads added to the peer.
205 """
205 """
206
206
207
207
208 class ipeerlegacycommands(interfaceutil.Interface):
208 class ipeerlegacycommands(interfaceutil.Interface):
209 """Interface for implementing support for legacy wire protocol commands.
209 """Interface for implementing support for legacy wire protocol commands.
210
210
211 Wire protocol commands transition to legacy status when they are no longer
211 Wire protocol commands transition to legacy status when they are no longer
212 used by modern clients. To facilitate identifying which commands are
212 used by modern clients. To facilitate identifying which commands are
213 legacy, the interfaces are split.
213 legacy, the interfaces are split.
214 """
214 """
215
215
216 def between(pairs):
216 def between(pairs):
217 """Obtain nodes between pairs of nodes.
217 """Obtain nodes between pairs of nodes.
218
218
219 ``pairs`` is an iterable of node pairs.
219 ``pairs`` is an iterable of node pairs.
220
220
221 Returns an iterable of iterables of nodes corresponding to each
221 Returns an iterable of iterables of nodes corresponding to each
222 requested pair.
222 requested pair.
223 """
223 """
224
224
225 def branches(nodes):
225 def branches(nodes):
226 """Obtain ancestor changesets of specific nodes back to a branch point.
226 """Obtain ancestor changesets of specific nodes back to a branch point.
227
227
228 For each requested node, the peer finds the first ancestor node that is
228 For each requested node, the peer finds the first ancestor node that is
229 a DAG root or is a merge.
229 a DAG root or is a merge.
230
230
231 Returns an iterable of iterables with the resolved values for each node.
231 Returns an iterable of iterables with the resolved values for each node.
232 """
232 """
233
233
234 def changegroup(nodes, source):
234 def changegroup(nodes, source):
235 """Obtain a changegroup with data for descendants of specified nodes."""
235 """Obtain a changegroup with data for descendants of specified nodes."""
236
236
237 def changegroupsubset(bases, heads, source):
237 def changegroupsubset(bases, heads, source):
238 pass
238 pass
239
239
240
240
241 class ipeercommandexecutor(interfaceutil.Interface):
241 class ipeercommandexecutor(interfaceutil.Interface):
242 """Represents a mechanism to execute remote commands.
242 """Represents a mechanism to execute remote commands.
243
243
244 This is the primary interface for requesting that wire protocol commands
244 This is the primary interface for requesting that wire protocol commands
245 be executed. Instances of this interface are active in a context manager
245 be executed. Instances of this interface are active in a context manager
246 and have a well-defined lifetime. When the context manager exits, all
246 and have a well-defined lifetime. When the context manager exits, all
247 outstanding requests are waited on.
247 outstanding requests are waited on.
248 """
248 """
249
249
250 def callcommand(name, args):
250 def callcommand(name, args):
251 """Request that a named command be executed.
251 """Request that a named command be executed.
252
252
253 Receives the command name and a dictionary of command arguments.
253 Receives the command name and a dictionary of command arguments.
254
254
255 Returns a ``concurrent.futures.Future`` that will resolve to the
255 Returns a ``concurrent.futures.Future`` that will resolve to the
256 result of that command request. That exact value is left up to
256 result of that command request. That exact value is left up to
257 the implementation and possibly varies by command.
257 the implementation and possibly varies by command.
258
258
259 Not all commands can coexist with other commands in an executor
259 Not all commands can coexist with other commands in an executor
260 instance: it depends on the underlying wire protocol transport being
260 instance: it depends on the underlying wire protocol transport being
261 used and the command itself.
261 used and the command itself.
262
262
263 Implementations MAY call ``sendcommands()`` automatically if the
263 Implementations MAY call ``sendcommands()`` automatically if the
264 requested command can not coexist with other commands in this executor.
264 requested command can not coexist with other commands in this executor.
265
265
266 Implementations MAY call ``sendcommands()`` automatically when the
266 Implementations MAY call ``sendcommands()`` automatically when the
267 future's ``result()`` is called. So, consumers using multiple
267 future's ``result()`` is called. So, consumers using multiple
268 commands with an executor MUST ensure that ``result()`` is not called
268 commands with an executor MUST ensure that ``result()`` is not called
269 until all command requests have been issued.
269 until all command requests have been issued.
270 """
270 """
271
271
272 def sendcommands():
272 def sendcommands():
273 """Trigger submission of queued command requests.
273 """Trigger submission of queued command requests.
274
274
275 Not all transports submit commands as soon as they are requested to
275 Not all transports submit commands as soon as they are requested to
276 run. When called, this method forces queued command requests to be
276 run. When called, this method forces queued command requests to be
277 issued. It will no-op if all commands have already been sent.
277 issued. It will no-op if all commands have already been sent.
278
278
279 When called, no more new commands may be issued with this executor.
279 When called, no more new commands may be issued with this executor.
280 """
280 """
281
281
282 def close():
282 def close():
283 """Signal that this command request is finished.
283 """Signal that this command request is finished.
284
284
285 When called, no more new commands may be issued. All outstanding
285 When called, no more new commands may be issued. All outstanding
286 commands that have previously been issued are waited on before
286 commands that have previously been issued are waited on before
287 returning. This not only includes waiting for the futures to resolve,
287 returning. This not only includes waiting for the futures to resolve,
288 but also waiting for all response data to arrive. In other words,
288 but also waiting for all response data to arrive. In other words,
289 calling this waits for all on-wire state for issued command requests
289 calling this waits for all on-wire state for issued command requests
290 to finish.
290 to finish.
291
291
292 When used as a context manager, this method is called when exiting the
292 When used as a context manager, this method is called when exiting the
293 context manager.
293 context manager.
294
294
295 This method may call ``sendcommands()`` if there are buffered commands.
295 This method may call ``sendcommands()`` if there are buffered commands.
296 """
296 """
297
297
298
298
299 class ipeerrequests(interfaceutil.Interface):
299 class ipeerrequests(interfaceutil.Interface):
300 """Interface for executing commands on a peer."""
300 """Interface for executing commands on a peer."""
301
301
302 limitedarguments = interfaceutil.Attribute(
302 limitedarguments = interfaceutil.Attribute(
303 """True if the peer cannot receive large argument value for commands."""
303 """True if the peer cannot receive large argument value for commands."""
304 )
304 )
305
305
306 def commandexecutor():
306 def commandexecutor():
307 """A context manager that resolves to an ipeercommandexecutor.
307 """A context manager that resolves to an ipeercommandexecutor.
308
308
309 The object this resolves to can be used to issue command requests
309 The object this resolves to can be used to issue command requests
310 to the peer.
310 to the peer.
311
311
312 Callers should call its ``callcommand`` method to issue command
312 Callers should call its ``callcommand`` method to issue command
313 requests.
313 requests.
314
314
315 A new executor should be obtained for each distinct set of commands
315 A new executor should be obtained for each distinct set of commands
316 (possibly just a single command) that the consumer wants to execute
316 (possibly just a single command) that the consumer wants to execute
317 as part of a single operation or round trip. This is because some
317 as part of a single operation or round trip. This is because some
318 peers are half-duplex and/or don't support persistent connections.
318 peers are half-duplex and/or don't support persistent connections.
319 e.g. in the case of HTTP peers, commands sent to an executor represent
319 e.g. in the case of HTTP peers, commands sent to an executor represent
320 a single HTTP request. While some peers may support multiple command
320 a single HTTP request. While some peers may support multiple command
321 sends over the wire per executor, consumers need to code to the least
321 sends over the wire per executor, consumers need to code to the least
322 capable peer. So it should be assumed that command executors buffer
322 capable peer. So it should be assumed that command executors buffer
323 called commands until they are told to send them and that each
323 called commands until they are told to send them and that each
324 command executor could result in a new connection or wire-level request
324 command executor could result in a new connection or wire-level request
325 being issued.
325 being issued.
326 """
326 """
327
327
328
328
329 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
329 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
330 """Unified interface for peer repositories.
330 """Unified interface for peer repositories.
331
331
332 All peer instances must conform to this interface.
332 All peer instances must conform to this interface.
333 """
333 """
334
334
335
335
336 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
336 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
337 """Unified peer interface for wire protocol version 2 peers."""
337 """Unified peer interface for wire protocol version 2 peers."""
338
338
339 apidescriptor = interfaceutil.Attribute(
339 apidescriptor = interfaceutil.Attribute(
340 """Data structure holding description of server API."""
340 """Data structure holding description of server API."""
341 )
341 )
342
342
343
343
344 @interfaceutil.implementer(ipeerbase)
344 @interfaceutil.implementer(ipeerbase)
345 class peer(object):
345 class peer(object):
346 """Base class for peer repositories."""
346 """Base class for peer repositories."""
347
347
348 limitedarguments = False
348 limitedarguments = False
349
349
350 def capable(self, name):
350 def capable(self, name):
351 caps = self.capabilities()
351 caps = self.capabilities()
352 if name in caps:
352 if name in caps:
353 return True
353 return True
354
354
355 name = b'%s=' % name
355 name = b'%s=' % name
356 for cap in caps:
356 for cap in caps:
357 if cap.startswith(name):
357 if cap.startswith(name):
358 return cap[len(name) :]
358 return cap[len(name) :]
359
359
360 return False
360 return False
361
361
362 def requirecap(self, name, purpose):
362 def requirecap(self, name, purpose):
363 if self.capable(name):
363 if self.capable(name):
364 return
364 return
365
365
366 raise error.CapabilityError(
366 raise error.CapabilityError(
367 _(
367 _(
368 b'cannot %s; remote repository does not support the '
368 b'cannot %s; remote repository does not support the '
369 b'\'%s\' capability'
369 b'\'%s\' capability'
370 )
370 )
371 % (purpose, name)
371 % (purpose, name)
372 )
372 )
373
373
374
374
375 class iverifyproblem(interfaceutil.Interface):
375 class iverifyproblem(interfaceutil.Interface):
376 """Represents a problem with the integrity of the repository.
376 """Represents a problem with the integrity of the repository.
377
377
378 Instances of this interface are emitted to describe an integrity issue
378 Instances of this interface are emitted to describe an integrity issue
379 with a repository (e.g. corrupt storage, missing data, etc).
379 with a repository (e.g. corrupt storage, missing data, etc).
380
380
381 Instances are essentially messages associated with severity.
381 Instances are essentially messages associated with severity.
382 """
382 """
383
383
384 warning = interfaceutil.Attribute(
384 warning = interfaceutil.Attribute(
385 """Message indicating a non-fatal problem."""
385 """Message indicating a non-fatal problem."""
386 )
386 )
387
387
388 error = interfaceutil.Attribute("""Message indicating a fatal problem.""")
388 error = interfaceutil.Attribute("""Message indicating a fatal problem.""")
389
389
390 node = interfaceutil.Attribute(
390 node = interfaceutil.Attribute(
391 """Revision encountering the problem.
391 """Revision encountering the problem.
392
392
393 ``None`` means the problem doesn't apply to a single revision.
393 ``None`` means the problem doesn't apply to a single revision.
394 """
394 """
395 )
395 )
396
396
397
397
398 class irevisiondelta(interfaceutil.Interface):
398 class irevisiondelta(interfaceutil.Interface):
399 """Represents a delta between one revision and another.
399 """Represents a delta between one revision and another.
400
400
401 Instances convey enough information to allow a revision to be exchanged
401 Instances convey enough information to allow a revision to be exchanged
402 with another repository.
402 with another repository.
403
403
404 Instances represent the fulltext revision data or a delta against
404 Instances represent the fulltext revision data or a delta against
405 another revision. Therefore the ``revision`` and ``delta`` attributes
405 another revision. Therefore the ``revision`` and ``delta`` attributes
406 are mutually exclusive.
406 are mutually exclusive.
407
407
408 Typically used for changegroup generation.
408 Typically used for changegroup generation.
409 """
409 """
410
410
411 node = interfaceutil.Attribute("""20 byte node of this revision.""")
411 node = interfaceutil.Attribute("""20 byte node of this revision.""")
412
412
413 p1node = interfaceutil.Attribute(
413 p1node = interfaceutil.Attribute(
414 """20 byte node of 1st parent of this revision."""
414 """20 byte node of 1st parent of this revision."""
415 )
415 )
416
416
417 p2node = interfaceutil.Attribute(
417 p2node = interfaceutil.Attribute(
418 """20 byte node of 2nd parent of this revision."""
418 """20 byte node of 2nd parent of this revision."""
419 )
419 )
420
420
421 linknode = interfaceutil.Attribute(
421 linknode = interfaceutil.Attribute(
422 """20 byte node of the changelog revision this node is linked to."""
422 """20 byte node of the changelog revision this node is linked to."""
423 )
423 )
424
424
425 flags = interfaceutil.Attribute(
425 flags = interfaceutil.Attribute(
426 """2 bytes of integer flags that apply to this revision.
426 """2 bytes of integer flags that apply to this revision.
427
427
428 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
428 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
429 """
429 """
430 )
430 )
431
431
432 basenode = interfaceutil.Attribute(
432 basenode = interfaceutil.Attribute(
433 """20 byte node of the revision this data is a delta against.
433 """20 byte node of the revision this data is a delta against.
434
434
435 ``nullid`` indicates that the revision is a full revision and not
435 ``nullid`` indicates that the revision is a full revision and not
436 a delta.
436 a delta.
437 """
437 """
438 )
438 )
439
439
440 baserevisionsize = interfaceutil.Attribute(
440 baserevisionsize = interfaceutil.Attribute(
441 """Size of base revision this delta is against.
441 """Size of base revision this delta is against.
442
442
443 May be ``None`` if ``basenode`` is ``nullid``.
443 May be ``None`` if ``basenode`` is ``nullid``.
444 """
444 """
445 )
445 )
446
446
447 revision = interfaceutil.Attribute(
447 revision = interfaceutil.Attribute(
448 """Raw fulltext of revision data for this node."""
448 """Raw fulltext of revision data for this node."""
449 )
449 )
450
450
451 delta = interfaceutil.Attribute(
451 delta = interfaceutil.Attribute(
452 """Delta between ``basenode`` and ``node``.
452 """Delta between ``basenode`` and ``node``.
453
453
454 Stored in the bdiff delta format.
454 Stored in the bdiff delta format.
455 """
455 """
456 )
456 )
457
457
458
458
459 class ifilerevisionssequence(interfaceutil.Interface):
459 class ifilerevisionssequence(interfaceutil.Interface):
460 """Contains index data for all revisions of a file.
460 """Contains index data for all revisions of a file.
461
461
462 Types implementing this behave like lists of tuples. The index
462 Types implementing this behave like lists of tuples. The index
463 in the list corresponds to the revision number. The values contain
463 in the list corresponds to the revision number. The values contain
464 index metadata.
464 index metadata.
465
465
466 The *null* revision (revision number -1) is always the last item
466 The *null* revision (revision number -1) is always the last item
467 in the index.
467 in the index.
468 """
468 """
469
469
470 def __len__():
470 def __len__():
471 """The total number of revisions."""
471 """The total number of revisions."""
472
472
473 def __getitem__(rev):
473 def __getitem__(rev):
474 """Returns the object having a specific revision number.
474 """Returns the object having a specific revision number.
475
475
476 Returns an 8-tuple with the following fields:
476 Returns an 8-tuple with the following fields:
477
477
478 offset+flags
478 offset+flags
479 Contains the offset and flags for the revision. 64-bit unsigned
479 Contains the offset and flags for the revision. 64-bit unsigned
480 integer where first 6 bytes are the offset and the next 2 bytes
480 integer where first 6 bytes are the offset and the next 2 bytes
481 are flags. The offset can be 0 if it is not used by the store.
481 are flags. The offset can be 0 if it is not used by the store.
482 compressed size
482 compressed size
483 Size of the revision data in the store. It can be 0 if it isn't
483 Size of the revision data in the store. It can be 0 if it isn't
484 needed by the store.
484 needed by the store.
485 uncompressed size
485 uncompressed size
486 Fulltext size. It can be 0 if it isn't needed by the store.
486 Fulltext size. It can be 0 if it isn't needed by the store.
487 base revision
487 base revision
488 Revision number of revision the delta for storage is encoded
488 Revision number of revision the delta for storage is encoded
489 against. -1 indicates not encoded against a base revision.
489 against. -1 indicates not encoded against a base revision.
490 link revision
490 link revision
491 Revision number of changelog revision this entry is related to.
491 Revision number of changelog revision this entry is related to.
492 p1 revision
492 p1 revision
493 Revision number of 1st parent. -1 if no 1st parent.
493 Revision number of 1st parent. -1 if no 1st parent.
494 p2 revision
494 p2 revision
495 Revision number of 2nd parent. -1 if no 1st parent.
495 Revision number of 2nd parent. -1 if no 1st parent.
496 node
496 node
497 Binary node value for this revision number.
497 Binary node value for this revision number.
498
498
499 Negative values should index off the end of the sequence. ``-1``
499 Negative values should index off the end of the sequence. ``-1``
500 should return the null revision. ``-2`` should return the most
500 should return the null revision. ``-2`` should return the most
501 recent revision.
501 recent revision.
502 """
502 """
503
503
504 def __contains__(rev):
504 def __contains__(rev):
505 """Whether a revision number exists."""
505 """Whether a revision number exists."""
506
506
507 def insert(self, i, entry):
507 def insert(self, i, entry):
508 """Add an item to the index at specific revision."""
508 """Add an item to the index at specific revision."""
509
509
510
510
511 class ifileindex(interfaceutil.Interface):
511 class ifileindex(interfaceutil.Interface):
512 """Storage interface for index data of a single file.
512 """Storage interface for index data of a single file.
513
513
514 File storage data is divided into index metadata and data storage.
514 File storage data is divided into index metadata and data storage.
515 This interface defines the index portion of the interface.
515 This interface defines the index portion of the interface.
516
516
517 The index logically consists of:
517 The index logically consists of:
518
518
519 * A mapping between revision numbers and nodes.
519 * A mapping between revision numbers and nodes.
520 * DAG data (storing and querying the relationship between nodes).
520 * DAG data (storing and querying the relationship between nodes).
521 * Metadata to facilitate storage.
521 * Metadata to facilitate storage.
522 """
522 """
523
523
524 def __len__():
524 def __len__():
525 """Obtain the number of revisions stored for this file."""
525 """Obtain the number of revisions stored for this file."""
526
526
527 def __iter__():
527 def __iter__():
528 """Iterate over revision numbers for this file."""
528 """Iterate over revision numbers for this file."""
529
529
530 def hasnode(node):
530 def hasnode(node):
531 """Returns a bool indicating if a node is known to this store.
531 """Returns a bool indicating if a node is known to this store.
532
532
533 Implementations must only return True for full, binary node values:
533 Implementations must only return True for full, binary node values:
534 hex nodes, revision numbers, and partial node matches must be
534 hex nodes, revision numbers, and partial node matches must be
535 rejected.
535 rejected.
536
536
537 The null node is never present.
537 The null node is never present.
538 """
538 """
539
539
540 def revs(start=0, stop=None):
540 def revs(start=0, stop=None):
541 """Iterate over revision numbers for this file, with control."""
541 """Iterate over revision numbers for this file, with control."""
542
542
543 def parents(node):
543 def parents(node):
544 """Returns a 2-tuple of parent nodes for a revision.
544 """Returns a 2-tuple of parent nodes for a revision.
545
545
546 Values will be ``nullid`` if the parent is empty.
546 Values will be ``nullid`` if the parent is empty.
547 """
547 """
548
548
549 def parentrevs(rev):
549 def parentrevs(rev):
550 """Like parents() but operates on revision numbers."""
550 """Like parents() but operates on revision numbers."""
551
551
552 def rev(node):
552 def rev(node):
553 """Obtain the revision number given a node.
553 """Obtain the revision number given a node.
554
554
555 Raises ``error.LookupError`` if the node is not known.
555 Raises ``error.LookupError`` if the node is not known.
556 """
556 """
557
557
558 def node(rev):
558 def node(rev):
559 """Obtain the node value given a revision number.
559 """Obtain the node value given a revision number.
560
560
561 Raises ``IndexError`` if the node is not known.
561 Raises ``IndexError`` if the node is not known.
562 """
562 """
563
563
564 def lookup(node):
564 def lookup(node):
565 """Attempt to resolve a value to a node.
565 """Attempt to resolve a value to a node.
566
566
567 Value can be a binary node, hex node, revision number, or a string
567 Value can be a binary node, hex node, revision number, or a string
568 that can be converted to an integer.
568 that can be converted to an integer.
569
569
570 Raises ``error.LookupError`` if a node could not be resolved.
570 Raises ``error.LookupError`` if a node could not be resolved.
571 """
571 """
572
572
573 def linkrev(rev):
573 def linkrev(rev):
574 """Obtain the changeset revision number a revision is linked to."""
574 """Obtain the changeset revision number a revision is linked to."""
575
575
576 def iscensored(rev):
576 def iscensored(rev):
577 """Return whether a revision's content has been censored."""
577 """Return whether a revision's content has been censored."""
578
578
579 def commonancestorsheads(node1, node2):
579 def commonancestorsheads(node1, node2):
580 """Obtain an iterable of nodes containing heads of common ancestors.
580 """Obtain an iterable of nodes containing heads of common ancestors.
581
581
582 See ``ancestor.commonancestorsheads()``.
582 See ``ancestor.commonancestorsheads()``.
583 """
583 """
584
584
585 def descendants(revs):
585 def descendants(revs):
586 """Obtain descendant revision numbers for a set of revision numbers.
586 """Obtain descendant revision numbers for a set of revision numbers.
587
587
588 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
588 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
589 """
589 """
590
590
591 def heads(start=None, stop=None):
591 def heads(start=None, stop=None):
592 """Obtain a list of nodes that are DAG heads, with control.
592 """Obtain a list of nodes that are DAG heads, with control.
593
593
594 The set of revisions examined can be limited by specifying
594 The set of revisions examined can be limited by specifying
595 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
595 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
596 iterable of nodes. DAG traversal starts at earlier revision
596 iterable of nodes. DAG traversal starts at earlier revision
597 ``start`` and iterates forward until any node in ``stop`` is
597 ``start`` and iterates forward until any node in ``stop`` is
598 encountered.
598 encountered.
599 """
599 """
600
600
601 def children(node):
601 def children(node):
602 """Obtain nodes that are children of a node.
602 """Obtain nodes that are children of a node.
603
603
604 Returns a list of nodes.
604 Returns a list of nodes.
605 """
605 """
606
606
607
607
608 class ifiledata(interfaceutil.Interface):
608 class ifiledata(interfaceutil.Interface):
609 """Storage interface for data storage of a specific file.
609 """Storage interface for data storage of a specific file.
610
610
611 This complements ``ifileindex`` and provides an interface for accessing
611 This complements ``ifileindex`` and provides an interface for accessing
612 data for a tracked file.
612 data for a tracked file.
613 """
613 """
614
614
615 def size(rev):
615 def size(rev):
616 """Obtain the fulltext size of file data.
616 """Obtain the fulltext size of file data.
617
617
618 Any metadata is excluded from size measurements.
618 Any metadata is excluded from size measurements.
619 """
619 """
620
620
621 def revision(node, raw=False):
621 def revision(node, raw=False):
622 """"Obtain fulltext data for a node.
622 """"Obtain fulltext data for a node.
623
623
624 By default, any storage transformations are applied before the data
624 By default, any storage transformations are applied before the data
625 is returned. If ``raw`` is True, non-raw storage transformations
625 is returned. If ``raw`` is True, non-raw storage transformations
626 are not applied.
626 are not applied.
627
627
628 The fulltext data may contain a header containing metadata. Most
628 The fulltext data may contain a header containing metadata. Most
629 consumers should use ``read()`` to obtain the actual file data.
629 consumers should use ``read()`` to obtain the actual file data.
630 """
630 """
631
631
632 def rawdata(node):
632 def rawdata(node):
633 """Obtain raw data for a node.
633 """Obtain raw data for a node.
634 """
634 """
635
635
636 def read(node):
636 def read(node):
637 """Resolve file fulltext data.
637 """Resolve file fulltext data.
638
638
639 This is similar to ``revision()`` except any metadata in the data
639 This is similar to ``revision()`` except any metadata in the data
640 headers is stripped.
640 headers is stripped.
641 """
641 """
642
642
643 def renamed(node):
643 def renamed(node):
644 """Obtain copy metadata for a node.
644 """Obtain copy metadata for a node.
645
645
646 Returns ``False`` if no copy metadata is stored or a 2-tuple of
646 Returns ``False`` if no copy metadata is stored or a 2-tuple of
647 (path, node) from which this revision was copied.
647 (path, node) from which this revision was copied.
648 """
648 """
649
649
650 def cmp(node, fulltext):
650 def cmp(node, fulltext):
651 """Compare fulltext to another revision.
651 """Compare fulltext to another revision.
652
652
653 Returns True if the fulltext is different from what is stored.
653 Returns True if the fulltext is different from what is stored.
654
654
655 This takes copy metadata into account.
655 This takes copy metadata into account.
656
656
657 TODO better document the copy metadata and censoring logic.
657 TODO better document the copy metadata and censoring logic.
658 """
658 """
659
659
660 def emitrevisions(
660 def emitrevisions(
661 nodes,
661 nodes,
662 nodesorder=None,
662 nodesorder=None,
663 revisiondata=False,
663 revisiondata=False,
664 assumehaveparentrevisions=False,
664 assumehaveparentrevisions=False,
665 deltamode=CG_DELTAMODE_STD,
665 deltamode=CG_DELTAMODE_STD,
666 ):
666 ):
667 """Produce ``irevisiondelta`` for revisions.
667 """Produce ``irevisiondelta`` for revisions.
668
668
669 Given an iterable of nodes, emits objects conforming to the
669 Given an iterable of nodes, emits objects conforming to the
670 ``irevisiondelta`` interface that describe revisions in storage.
670 ``irevisiondelta`` interface that describe revisions in storage.
671
671
672 This method is a generator.
672 This method is a generator.
673
673
674 The input nodes may be unordered. Implementations must ensure that a
674 The input nodes may be unordered. Implementations must ensure that a
675 node's parents are emitted before the node itself. Transitively, this
675 node's parents are emitted before the node itself. Transitively, this
676 means that a node may only be emitted once all its ancestors in
676 means that a node may only be emitted once all its ancestors in
677 ``nodes`` have also been emitted.
677 ``nodes`` have also been emitted.
678
678
679 By default, emits "index" data (the ``node``, ``p1node``, and
679 By default, emits "index" data (the ``node``, ``p1node``, and
680 ``p2node`` attributes). If ``revisiondata`` is set, revision data
680 ``p2node`` attributes). If ``revisiondata`` is set, revision data
681 will also be present on the emitted objects.
681 will also be present on the emitted objects.
682
682
683 With default argument values, implementations can choose to emit
683 With default argument values, implementations can choose to emit
684 either fulltext revision data or a delta. When emitting deltas,
684 either fulltext revision data or a delta. When emitting deltas,
685 implementations must consider whether the delta's base revision
685 implementations must consider whether the delta's base revision
686 fulltext is available to the receiver.
686 fulltext is available to the receiver.
687
687
688 The base revision fulltext is guaranteed to be available if any of
688 The base revision fulltext is guaranteed to be available if any of
689 the following are met:
689 the following are met:
690
690
691 * Its fulltext revision was emitted by this method call.
691 * Its fulltext revision was emitted by this method call.
692 * A delta for that revision was emitted by this method call.
692 * A delta for that revision was emitted by this method call.
693 * ``assumehaveparentrevisions`` is True and the base revision is a
693 * ``assumehaveparentrevisions`` is True and the base revision is a
694 parent of the node.
694 parent of the node.
695
695
696 ``nodesorder`` can be used to control the order that revisions are
696 ``nodesorder`` can be used to control the order that revisions are
697 emitted. By default, revisions can be reordered as long as they are
697 emitted. By default, revisions can be reordered as long as they are
698 in DAG topological order (see above). If the value is ``nodes``,
698 in DAG topological order (see above). If the value is ``nodes``,
699 the iteration order from ``nodes`` should be used. If the value is
699 the iteration order from ``nodes`` should be used. If the value is
700 ``storage``, then the native order from the backing storage layer
700 ``storage``, then the native order from the backing storage layer
701 is used. (Not all storage layers will have strong ordering and behavior
701 is used. (Not all storage layers will have strong ordering and behavior
702 of this mode is storage-dependent.) ``nodes`` ordering can force
702 of this mode is storage-dependent.) ``nodes`` ordering can force
703 revisions to be emitted before their ancestors, so consumers should
703 revisions to be emitted before their ancestors, so consumers should
704 use it with care.
704 use it with care.
705
705
706 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
706 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
707 be set and it is the caller's responsibility to resolve it, if needed.
707 be set and it is the caller's responsibility to resolve it, if needed.
708
708
709 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
709 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
710 all revision data should be emitted as deltas against the revision
710 all revision data should be emitted as deltas against the revision
711 emitted just prior. The initial revision should be a delta against its
711 emitted just prior. The initial revision should be a delta against its
712 1st parent.
712 1st parent.
713 """
713 """
714
714
715
715
716 class ifilemutation(interfaceutil.Interface):
716 class ifilemutation(interfaceutil.Interface):
717 """Storage interface for mutation events of a tracked file."""
717 """Storage interface for mutation events of a tracked file."""
718
718
719 def add(filedata, meta, transaction, linkrev, p1, p2):
719 def add(filedata, meta, transaction, linkrev, p1, p2):
720 """Add a new revision to the store.
720 """Add a new revision to the store.
721
721
722 Takes file data, dictionary of metadata, a transaction, linkrev,
722 Takes file data, dictionary of metadata, a transaction, linkrev,
723 and parent nodes.
723 and parent nodes.
724
724
725 Returns the node that was added.
725 Returns the node that was added.
726
726
727 May no-op if a revision matching the supplied data is already stored.
727 May no-op if a revision matching the supplied data is already stored.
728 """
728 """
729
729
730 def addrevision(
730 def addrevision(
731 revisiondata,
731 revisiondata,
732 transaction,
732 transaction,
733 linkrev,
733 linkrev,
734 p1,
734 p1,
735 p2,
735 p2,
736 node=None,
736 node=None,
737 flags=0,
737 flags=0,
738 cachedelta=None,
738 cachedelta=None,
739 ):
739 ):
740 """Add a new revision to the store.
740 """Add a new revision to the store.
741
741
742 This is similar to ``add()`` except it operates at a lower level.
742 This is similar to ``add()`` except it operates at a lower level.
743
743
744 The data passed in already contains a metadata header, if any.
744 The data passed in already contains a metadata header, if any.
745
745
746 ``node`` and ``flags`` can be used to define the expected node and
746 ``node`` and ``flags`` can be used to define the expected node and
747 the flags to use with storage. ``flags`` is a bitwise value composed
747 the flags to use with storage. ``flags`` is a bitwise value composed
748 of the various ``REVISION_FLAG_*`` constants.
748 of the various ``REVISION_FLAG_*`` constants.
749
749
750 ``add()`` is usually called when adding files from e.g. the working
750 ``add()`` is usually called when adding files from e.g. the working
751 directory. ``addrevision()`` is often called by ``add()`` and for
751 directory. ``addrevision()`` is often called by ``add()`` and for
752 scenarios where revision data has already been computed, such as when
752 scenarios where revision data has already been computed, such as when
753 applying raw data from a peer repo.
753 applying raw data from a peer repo.
754 """
754 """
755
755
756 def addgroup(
756 def addgroup(
757 deltas,
757 deltas,
758 linkmapper,
758 linkmapper,
759 transaction,
759 transaction,
760 addrevisioncb=None,
760 addrevisioncb=None,
761 maybemissingparents=False,
761 maybemissingparents=False,
762 ):
762 ):
763 """Process a series of deltas for storage.
763 """Process a series of deltas for storage.
764
764
765 ``deltas`` is an iterable of 7-tuples of
765 ``deltas`` is an iterable of 7-tuples of
766 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
766 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
767 to add.
767 to add.
768
768
769 The ``delta`` field contains ``mpatch`` data to apply to a base
769 The ``delta`` field contains ``mpatch`` data to apply to a base
770 revision, identified by ``deltabase``. The base node can be
770 revision, identified by ``deltabase``. The base node can be
771 ``nullid``, in which case the header from the delta can be ignored
771 ``nullid``, in which case the header from the delta can be ignored
772 and the delta used as the fulltext.
772 and the delta used as the fulltext.
773
773
774 ``addrevisioncb`` should be called for each node as it is committed.
774 ``addrevisioncb`` should be called for each node as it is committed.
775
775
776 ``maybemissingparents`` is a bool indicating whether the incoming
776 ``maybemissingparents`` is a bool indicating whether the incoming
777 data may reference parents/ancestor revisions that aren't present.
777 data may reference parents/ancestor revisions that aren't present.
778 This flag is set when receiving data into a "shallow" store that
778 This flag is set when receiving data into a "shallow" store that
779 doesn't hold all history.
779 doesn't hold all history.
780
780
781 Returns a list of nodes that were processed. A node will be in the list
781 Returns a list of nodes that were processed. A node will be in the list
782 even if it existed in the store previously.
782 even if it existed in the store previously.
783 """
783 """
784
784
785 def censorrevision(tr, node, tombstone=b''):
785 def censorrevision(tr, node, tombstone=b''):
786 """Remove the content of a single revision.
786 """Remove the content of a single revision.
787
787
788 The specified ``node`` will have its content purged from storage.
788 The specified ``node`` will have its content purged from storage.
789 Future attempts to access the revision data for this node will
789 Future attempts to access the revision data for this node will
790 result in failure.
790 result in failure.
791
791
792 A ``tombstone`` message can optionally be stored. This message may be
792 A ``tombstone`` message can optionally be stored. This message may be
793 displayed to users when they attempt to access the missing revision
793 displayed to users when they attempt to access the missing revision
794 data.
794 data.
795
795
796 Storage backends may have stored deltas against the previous content
796 Storage backends may have stored deltas against the previous content
797 in this revision. As part of censoring a revision, these storage
797 in this revision. As part of censoring a revision, these storage
798 backends are expected to rewrite any internally stored deltas such
798 backends are expected to rewrite any internally stored deltas such
799 that they no longer reference the deleted content.
799 that they no longer reference the deleted content.
800 """
800 """
801
801
802 def getstrippoint(minlink):
802 def getstrippoint(minlink):
803 """Find the minimum revision that must be stripped to strip a linkrev.
803 """Find the minimum revision that must be stripped to strip a linkrev.
804
804
805 Returns a 2-tuple containing the minimum revision number and a set
805 Returns a 2-tuple containing the minimum revision number and a set
806 of all revisions numbers that would be broken by this strip.
806 of all revisions numbers that would be broken by this strip.
807
807
808 TODO this is highly revlog centric and should be abstracted into
808 TODO this is highly revlog centric and should be abstracted into
809 a higher-level deletion API. ``repair.strip()`` relies on this.
809 a higher-level deletion API. ``repair.strip()`` relies on this.
810 """
810 """
811
811
812 def strip(minlink, transaction):
812 def strip(minlink, transaction):
813 """Remove storage of items starting at a linkrev.
813 """Remove storage of items starting at a linkrev.
814
814
815 This uses ``getstrippoint()`` to determine the first node to remove.
815 This uses ``getstrippoint()`` to determine the first node to remove.
816 Then it effectively truncates storage for all revisions after that.
816 Then it effectively truncates storage for all revisions after that.
817
817
818 TODO this is highly revlog centric and should be abstracted into a
818 TODO this is highly revlog centric and should be abstracted into a
819 higher-level deletion API.
819 higher-level deletion API.
820 """
820 """
821
821
822
822
823 class ifilestorage(ifileindex, ifiledata, ifilemutation):
823 class ifilestorage(ifileindex, ifiledata, ifilemutation):
824 """Complete storage interface for a single tracked file."""
824 """Complete storage interface for a single tracked file."""
825
825
826 def files():
826 def files():
827 """Obtain paths that are backing storage for this file.
827 """Obtain paths that are backing storage for this file.
828
828
829 TODO this is used heavily by verify code and there should probably
829 TODO this is used heavily by verify code and there should probably
830 be a better API for that.
830 be a better API for that.
831 """
831 """
832
832
833 def storageinfo(
833 def storageinfo(
834 exclusivefiles=False,
834 exclusivefiles=False,
835 sharedfiles=False,
835 sharedfiles=False,
836 revisionscount=False,
836 revisionscount=False,
837 trackedsize=False,
837 trackedsize=False,
838 storedsize=False,
838 storedsize=False,
839 ):
839 ):
840 """Obtain information about storage for this file's data.
840 """Obtain information about storage for this file's data.
841
841
842 Returns a dict describing storage for this tracked path. The keys
842 Returns a dict describing storage for this tracked path. The keys
843 in the dict map to arguments of the same. The arguments are bools
843 in the dict map to arguments of the same. The arguments are bools
844 indicating whether to calculate and obtain that data.
844 indicating whether to calculate and obtain that data.
845
845
846 exclusivefiles
846 exclusivefiles
847 Iterable of (vfs, path) describing files that are exclusively
847 Iterable of (vfs, path) describing files that are exclusively
848 used to back storage for this tracked path.
848 used to back storage for this tracked path.
849
849
850 sharedfiles
850 sharedfiles
851 Iterable of (vfs, path) describing files that are used to back
851 Iterable of (vfs, path) describing files that are used to back
852 storage for this tracked path. Those files may also provide storage
852 storage for this tracked path. Those files may also provide storage
853 for other stored entities.
853 for other stored entities.
854
854
855 revisionscount
855 revisionscount
856 Number of revisions available for retrieval.
856 Number of revisions available for retrieval.
857
857
858 trackedsize
858 trackedsize
859 Total size in bytes of all tracked revisions. This is a sum of the
859 Total size in bytes of all tracked revisions. This is a sum of the
860 length of the fulltext of all revisions.
860 length of the fulltext of all revisions.
861
861
862 storedsize
862 storedsize
863 Total size in bytes used to store data for all tracked revisions.
863 Total size in bytes used to store data for all tracked revisions.
864 This is commonly less than ``trackedsize`` due to internal usage
864 This is commonly less than ``trackedsize`` due to internal usage
865 of deltas rather than fulltext revisions.
865 of deltas rather than fulltext revisions.
866
866
867 Not all storage backends may support all queries are have a reasonable
867 Not all storage backends may support all queries are have a reasonable
868 value to use. In that case, the value should be set to ``None`` and
868 value to use. In that case, the value should be set to ``None`` and
869 callers are expected to handle this special value.
869 callers are expected to handle this special value.
870 """
870 """
871
871
872 def verifyintegrity(state):
872 def verifyintegrity(state):
873 """Verifies the integrity of file storage.
873 """Verifies the integrity of file storage.
874
874
875 ``state`` is a dict holding state of the verifier process. It can be
875 ``state`` is a dict holding state of the verifier process. It can be
876 used to communicate data between invocations of multiple storage
876 used to communicate data between invocations of multiple storage
877 primitives.
877 primitives.
878
878
879 If individual revisions cannot have their revision content resolved,
879 If individual revisions cannot have their revision content resolved,
880 the method is expected to set the ``skipread`` key to a set of nodes
880 the method is expected to set the ``skipread`` key to a set of nodes
881 that encountered problems. If set, the method can also add the node(s)
881 that encountered problems. If set, the method can also add the node(s)
882 to ``safe_renamed`` in order to indicate nodes that may perform the
882 to ``safe_renamed`` in order to indicate nodes that may perform the
883 rename checks with currently accessible data.
883 rename checks with currently accessible data.
884
884
885 The method yields objects conforming to the ``iverifyproblem``
885 The method yields objects conforming to the ``iverifyproblem``
886 interface.
886 interface.
887 """
887 """
888
888
889
889
890 class idirs(interfaceutil.Interface):
890 class idirs(interfaceutil.Interface):
891 """Interface representing a collection of directories from paths.
891 """Interface representing a collection of directories from paths.
892
892
893 This interface is essentially a derived data structure representing
893 This interface is essentially a derived data structure representing
894 directories from a collection of paths.
894 directories from a collection of paths.
895 """
895 """
896
896
897 def addpath(path):
897 def addpath(path):
898 """Add a path to the collection.
898 """Add a path to the collection.
899
899
900 All directories in the path will be added to the collection.
900 All directories in the path will be added to the collection.
901 """
901 """
902
902
903 def delpath(path):
903 def delpath(path):
904 """Remove a path from the collection.
904 """Remove a path from the collection.
905
905
906 If the removal was the last path in a particular directory, the
906 If the removal was the last path in a particular directory, the
907 directory is removed from the collection.
907 directory is removed from the collection.
908 """
908 """
909
909
910 def __iter__():
910 def __iter__():
911 """Iterate over the directories in this collection of paths."""
911 """Iterate over the directories in this collection of paths."""
912
912
913 def __contains__(path):
913 def __contains__(path):
914 """Whether a specific directory is in this collection."""
914 """Whether a specific directory is in this collection."""
915
915
916
916
917 class imanifestdict(interfaceutil.Interface):
917 class imanifestdict(interfaceutil.Interface):
918 """Interface representing a manifest data structure.
918 """Interface representing a manifest data structure.
919
919
920 A manifest is effectively a dict mapping paths to entries. Each entry
920 A manifest is effectively a dict mapping paths to entries. Each entry
921 consists of a binary node and extra flags affecting that entry.
921 consists of a binary node and extra flags affecting that entry.
922 """
922 """
923
923
924 def __getitem__(path):
924 def __getitem__(path):
925 """Returns the binary node value for a path in the manifest.
925 """Returns the binary node value for a path in the manifest.
926
926
927 Raises ``KeyError`` if the path does not exist in the manifest.
927 Raises ``KeyError`` if the path does not exist in the manifest.
928
928
929 Equivalent to ``self.find(path)[0]``.
929 Equivalent to ``self.find(path)[0]``.
930 """
930 """
931
931
932 def find(path):
932 def find(path):
933 """Returns the entry for a path in the manifest.
933 """Returns the entry for a path in the manifest.
934
934
935 Returns a 2-tuple of (node, flags).
935 Returns a 2-tuple of (node, flags).
936
936
937 Raises ``KeyError`` if the path does not exist in the manifest.
937 Raises ``KeyError`` if the path does not exist in the manifest.
938 """
938 """
939
939
940 def __len__():
940 def __len__():
941 """Return the number of entries in the manifest."""
941 """Return the number of entries in the manifest."""
942
942
943 def __nonzero__():
943 def __nonzero__():
944 """Returns True if the manifest has entries, False otherwise."""
944 """Returns True if the manifest has entries, False otherwise."""
945
945
946 __bool__ = __nonzero__
946 __bool__ = __nonzero__
947
947
948 def __setitem__(path, node):
948 def __setitem__(path, node):
949 """Define the node value for a path in the manifest.
949 """Define the node value for a path in the manifest.
950
950
951 If the path is already in the manifest, its flags will be copied to
951 If the path is already in the manifest, its flags will be copied to
952 the new entry.
952 the new entry.
953 """
953 """
954
954
955 def __contains__(path):
955 def __contains__(path):
956 """Whether a path exists in the manifest."""
956 """Whether a path exists in the manifest."""
957
957
958 def __delitem__(path):
958 def __delitem__(path):
959 """Remove a path from the manifest.
959 """Remove a path from the manifest.
960
960
961 Raises ``KeyError`` if the path is not in the manifest.
961 Raises ``KeyError`` if the path is not in the manifest.
962 """
962 """
963
963
964 def __iter__():
964 def __iter__():
965 """Iterate over paths in the manifest."""
965 """Iterate over paths in the manifest."""
966
966
967 def iterkeys():
967 def iterkeys():
968 """Iterate over paths in the manifest."""
968 """Iterate over paths in the manifest."""
969
969
970 def keys():
970 def keys():
971 """Obtain a list of paths in the manifest."""
971 """Obtain a list of paths in the manifest."""
972
972
973 def filesnotin(other, match=None):
973 def filesnotin(other, match=None):
974 """Obtain the set of paths in this manifest but not in another.
974 """Obtain the set of paths in this manifest but not in another.
975
975
976 ``match`` is an optional matcher function to be applied to both
976 ``match`` is an optional matcher function to be applied to both
977 manifests.
977 manifests.
978
978
979 Returns a set of paths.
979 Returns a set of paths.
980 """
980 """
981
981
982 def dirs():
982 def dirs():
983 """Returns an object implementing the ``idirs`` interface."""
983 """Returns an object implementing the ``idirs`` interface."""
984
984
985 def hasdir(dir):
985 def hasdir(dir):
986 """Returns a bool indicating if a directory is in this manifest."""
986 """Returns a bool indicating if a directory is in this manifest."""
987
987
988 def matches(match):
989 """Generate a new manifest filtered through a matcher.
990
991 Returns an object conforming to the ``imanifestdict`` interface.
992 """
993
994 def walk(match):
988 def walk(match):
995 """Generator of paths in manifest satisfying a matcher.
989 """Generator of paths in manifest satisfying a matcher.
996
990
997 This is equivalent to ``self.matches(match).iterkeys()`` except a new
998 manifest object is not created.
999
1000 If the matcher has explicit files listed and they don't exist in
991 If the matcher has explicit files listed and they don't exist in
1001 the manifest, ``match.bad()`` is called for each missing file.
992 the manifest, ``match.bad()`` is called for each missing file.
1002 """
993 """
1003
994
1004 def diff(other, match=None, clean=False):
995 def diff(other, match=None, clean=False):
1005 """Find differences between this manifest and another.
996 """Find differences between this manifest and another.
1006
997
1007 This manifest is compared to ``other``.
998 This manifest is compared to ``other``.
1008
999
1009 If ``match`` is provided, the two manifests are filtered against this
1000 If ``match`` is provided, the two manifests are filtered against this
1010 matcher and only entries satisfying the matcher are compared.
1001 matcher and only entries satisfying the matcher are compared.
1011
1002
1012 If ``clean`` is True, unchanged files are included in the returned
1003 If ``clean`` is True, unchanged files are included in the returned
1013 object.
1004 object.
1014
1005
1015 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
1006 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
1016 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
1007 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
1017 represents the node and flags for this manifest and ``(node2, flag2)``
1008 represents the node and flags for this manifest and ``(node2, flag2)``
1018 are the same for the other manifest.
1009 are the same for the other manifest.
1019 """
1010 """
1020
1011
1021 def setflag(path, flag):
1012 def setflag(path, flag):
1022 """Set the flag value for a given path.
1013 """Set the flag value for a given path.
1023
1014
1024 Raises ``KeyError`` if the path is not already in the manifest.
1015 Raises ``KeyError`` if the path is not already in the manifest.
1025 """
1016 """
1026
1017
1027 def get(path, default=None):
1018 def get(path, default=None):
1028 """Obtain the node value for a path or a default value if missing."""
1019 """Obtain the node value for a path or a default value if missing."""
1029
1020
1030 def flags(path):
1021 def flags(path):
1031 """Return the flags value for a path (default: empty bytestring)."""
1022 """Return the flags value for a path (default: empty bytestring)."""
1032
1023
1033 def copy():
1024 def copy():
1034 """Return a copy of this manifest."""
1025 """Return a copy of this manifest."""
1035
1026
1036 def items():
1027 def items():
1037 """Returns an iterable of (path, node) for items in this manifest."""
1028 """Returns an iterable of (path, node) for items in this manifest."""
1038
1029
1039 def iteritems():
1030 def iteritems():
1040 """Identical to items()."""
1031 """Identical to items()."""
1041
1032
1042 def iterentries():
1033 def iterentries():
1043 """Returns an iterable of (path, node, flags) for this manifest.
1034 """Returns an iterable of (path, node, flags) for this manifest.
1044
1035
1045 Similar to ``iteritems()`` except items are a 3-tuple and include
1036 Similar to ``iteritems()`` except items are a 3-tuple and include
1046 flags.
1037 flags.
1047 """
1038 """
1048
1039
1049 def text():
1040 def text():
1050 """Obtain the raw data representation for this manifest.
1041 """Obtain the raw data representation for this manifest.
1051
1042
1052 Result is used to create a manifest revision.
1043 Result is used to create a manifest revision.
1053 """
1044 """
1054
1045
1055 def fastdelta(base, changes):
1046 def fastdelta(base, changes):
1056 """Obtain a delta between this manifest and another given changes.
1047 """Obtain a delta between this manifest and another given changes.
1057
1048
1058 ``base`` in the raw data representation for another manifest.
1049 ``base`` in the raw data representation for another manifest.
1059
1050
1060 ``changes`` is an iterable of ``(path, to_delete)``.
1051 ``changes`` is an iterable of ``(path, to_delete)``.
1061
1052
1062 Returns a 2-tuple containing ``bytearray(self.text())`` and the
1053 Returns a 2-tuple containing ``bytearray(self.text())`` and the
1063 delta between ``base`` and this manifest.
1054 delta between ``base`` and this manifest.
1064 """
1055 """
1065
1056
1066
1057
1067 class imanifestrevisionbase(interfaceutil.Interface):
1058 class imanifestrevisionbase(interfaceutil.Interface):
1068 """Base interface representing a single revision of a manifest.
1059 """Base interface representing a single revision of a manifest.
1069
1060
1070 Should not be used as a primary interface: should always be inherited
1061 Should not be used as a primary interface: should always be inherited
1071 as part of a larger interface.
1062 as part of a larger interface.
1072 """
1063 """
1073
1064
1074 def copy():
1065 def copy():
1075 """Obtain a copy of this manifest instance.
1066 """Obtain a copy of this manifest instance.
1076
1067
1077 Returns an object conforming to the ``imanifestrevisionwritable``
1068 Returns an object conforming to the ``imanifestrevisionwritable``
1078 interface. The instance will be associated with the same
1069 interface. The instance will be associated with the same
1079 ``imanifestlog`` collection as this instance.
1070 ``imanifestlog`` collection as this instance.
1080 """
1071 """
1081
1072
1082 def read():
1073 def read():
1083 """Obtain the parsed manifest data structure.
1074 """Obtain the parsed manifest data structure.
1084
1075
1085 The returned object conforms to the ``imanifestdict`` interface.
1076 The returned object conforms to the ``imanifestdict`` interface.
1086 """
1077 """
1087
1078
1088
1079
1089 class imanifestrevisionstored(imanifestrevisionbase):
1080 class imanifestrevisionstored(imanifestrevisionbase):
1090 """Interface representing a manifest revision committed to storage."""
1081 """Interface representing a manifest revision committed to storage."""
1091
1082
1092 def node():
1083 def node():
1093 """The binary node for this manifest."""
1084 """The binary node for this manifest."""
1094
1085
1095 parents = interfaceutil.Attribute(
1086 parents = interfaceutil.Attribute(
1096 """List of binary nodes that are parents for this manifest revision."""
1087 """List of binary nodes that are parents for this manifest revision."""
1097 )
1088 )
1098
1089
1099 def readdelta(shallow=False):
1090 def readdelta(shallow=False):
1100 """Obtain the manifest data structure representing changes from parent.
1091 """Obtain the manifest data structure representing changes from parent.
1101
1092
1102 This manifest is compared to its 1st parent. A new manifest representing
1093 This manifest is compared to its 1st parent. A new manifest representing
1103 those differences is constructed.
1094 those differences is constructed.
1104
1095
1105 The returned object conforms to the ``imanifestdict`` interface.
1096 The returned object conforms to the ``imanifestdict`` interface.
1106 """
1097 """
1107
1098
1108 def readfast(shallow=False):
1099 def readfast(shallow=False):
1109 """Calls either ``read()`` or ``readdelta()``.
1100 """Calls either ``read()`` or ``readdelta()``.
1110
1101
1111 The faster of the two options is called.
1102 The faster of the two options is called.
1112 """
1103 """
1113
1104
1114 def find(key):
1105 def find(key):
1115 """Calls self.read().find(key)``.
1106 """Calls self.read().find(key)``.
1116
1107
1117 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1108 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1118 """
1109 """
1119
1110
1120
1111
1121 class imanifestrevisionwritable(imanifestrevisionbase):
1112 class imanifestrevisionwritable(imanifestrevisionbase):
1122 """Interface representing a manifest revision that can be committed."""
1113 """Interface representing a manifest revision that can be committed."""
1123
1114
1124 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1115 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1125 """Add this revision to storage.
1116 """Add this revision to storage.
1126
1117
1127 Takes a transaction object, the changeset revision number it will
1118 Takes a transaction object, the changeset revision number it will
1128 be associated with, its parent nodes, and lists of added and
1119 be associated with, its parent nodes, and lists of added and
1129 removed paths.
1120 removed paths.
1130
1121
1131 If match is provided, storage can choose not to inspect or write out
1122 If match is provided, storage can choose not to inspect or write out
1132 items that do not match. Storage is still required to be able to provide
1123 items that do not match. Storage is still required to be able to provide
1133 the full manifest in the future for any directories written (these
1124 the full manifest in the future for any directories written (these
1134 manifests should not be "narrowed on disk").
1125 manifests should not be "narrowed on disk").
1135
1126
1136 Returns the binary node of the created revision.
1127 Returns the binary node of the created revision.
1137 """
1128 """
1138
1129
1139
1130
1140 class imanifeststorage(interfaceutil.Interface):
1131 class imanifeststorage(interfaceutil.Interface):
1141 """Storage interface for manifest data."""
1132 """Storage interface for manifest data."""
1142
1133
1143 tree = interfaceutil.Attribute(
1134 tree = interfaceutil.Attribute(
1144 """The path to the directory this manifest tracks.
1135 """The path to the directory this manifest tracks.
1145
1136
1146 The empty bytestring represents the root manifest.
1137 The empty bytestring represents the root manifest.
1147 """
1138 """
1148 )
1139 )
1149
1140
1150 index = interfaceutil.Attribute(
1141 index = interfaceutil.Attribute(
1151 """An ``ifilerevisionssequence`` instance."""
1142 """An ``ifilerevisionssequence`` instance."""
1152 )
1143 )
1153
1144
1154 indexfile = interfaceutil.Attribute(
1145 indexfile = interfaceutil.Attribute(
1155 """Path of revlog index file.
1146 """Path of revlog index file.
1156
1147
1157 TODO this is revlog specific and should not be exposed.
1148 TODO this is revlog specific and should not be exposed.
1158 """
1149 """
1159 )
1150 )
1160
1151
1161 opener = interfaceutil.Attribute(
1152 opener = interfaceutil.Attribute(
1162 """VFS opener to use to access underlying files used for storage.
1153 """VFS opener to use to access underlying files used for storage.
1163
1154
1164 TODO this is revlog specific and should not be exposed.
1155 TODO this is revlog specific and should not be exposed.
1165 """
1156 """
1166 )
1157 )
1167
1158
1168 version = interfaceutil.Attribute(
1159 version = interfaceutil.Attribute(
1169 """Revlog version number.
1160 """Revlog version number.
1170
1161
1171 TODO this is revlog specific and should not be exposed.
1162 TODO this is revlog specific and should not be exposed.
1172 """
1163 """
1173 )
1164 )
1174
1165
1175 _generaldelta = interfaceutil.Attribute(
1166 _generaldelta = interfaceutil.Attribute(
1176 """Whether generaldelta storage is being used.
1167 """Whether generaldelta storage is being used.
1177
1168
1178 TODO this is revlog specific and should not be exposed.
1169 TODO this is revlog specific and should not be exposed.
1179 """
1170 """
1180 )
1171 )
1181
1172
1182 fulltextcache = interfaceutil.Attribute(
1173 fulltextcache = interfaceutil.Attribute(
1183 """Dict with cache of fulltexts.
1174 """Dict with cache of fulltexts.
1184
1175
1185 TODO this doesn't feel appropriate for the storage interface.
1176 TODO this doesn't feel appropriate for the storage interface.
1186 """
1177 """
1187 )
1178 )
1188
1179
1189 def __len__():
1180 def __len__():
1190 """Obtain the number of revisions stored for this manifest."""
1181 """Obtain the number of revisions stored for this manifest."""
1191
1182
1192 def __iter__():
1183 def __iter__():
1193 """Iterate over revision numbers for this manifest."""
1184 """Iterate over revision numbers for this manifest."""
1194
1185
1195 def rev(node):
1186 def rev(node):
1196 """Obtain the revision number given a binary node.
1187 """Obtain the revision number given a binary node.
1197
1188
1198 Raises ``error.LookupError`` if the node is not known.
1189 Raises ``error.LookupError`` if the node is not known.
1199 """
1190 """
1200
1191
1201 def node(rev):
1192 def node(rev):
1202 """Obtain the node value given a revision number.
1193 """Obtain the node value given a revision number.
1203
1194
1204 Raises ``error.LookupError`` if the revision is not known.
1195 Raises ``error.LookupError`` if the revision is not known.
1205 """
1196 """
1206
1197
1207 def lookup(value):
1198 def lookup(value):
1208 """Attempt to resolve a value to a node.
1199 """Attempt to resolve a value to a node.
1209
1200
1210 Value can be a binary node, hex node, revision number, or a bytes
1201 Value can be a binary node, hex node, revision number, or a bytes
1211 that can be converted to an integer.
1202 that can be converted to an integer.
1212
1203
1213 Raises ``error.LookupError`` if a ndoe could not be resolved.
1204 Raises ``error.LookupError`` if a ndoe could not be resolved.
1214 """
1205 """
1215
1206
1216 def parents(node):
1207 def parents(node):
1217 """Returns a 2-tuple of parent nodes for a node.
1208 """Returns a 2-tuple of parent nodes for a node.
1218
1209
1219 Values will be ``nullid`` if the parent is empty.
1210 Values will be ``nullid`` if the parent is empty.
1220 """
1211 """
1221
1212
1222 def parentrevs(rev):
1213 def parentrevs(rev):
1223 """Like parents() but operates on revision numbers."""
1214 """Like parents() but operates on revision numbers."""
1224
1215
1225 def linkrev(rev):
1216 def linkrev(rev):
1226 """Obtain the changeset revision number a revision is linked to."""
1217 """Obtain the changeset revision number a revision is linked to."""
1227
1218
1228 def revision(node, _df=None, raw=False):
1219 def revision(node, _df=None, raw=False):
1229 """Obtain fulltext data for a node."""
1220 """Obtain fulltext data for a node."""
1230
1221
1231 def rawdata(node, _df=None):
1222 def rawdata(node, _df=None):
1232 """Obtain raw data for a node."""
1223 """Obtain raw data for a node."""
1233
1224
1234 def revdiff(rev1, rev2):
1225 def revdiff(rev1, rev2):
1235 """Obtain a delta between two revision numbers.
1226 """Obtain a delta between two revision numbers.
1236
1227
1237 The returned data is the result of ``bdiff.bdiff()`` on the raw
1228 The returned data is the result of ``bdiff.bdiff()`` on the raw
1238 revision data.
1229 revision data.
1239 """
1230 """
1240
1231
1241 def cmp(node, fulltext):
1232 def cmp(node, fulltext):
1242 """Compare fulltext to another revision.
1233 """Compare fulltext to another revision.
1243
1234
1244 Returns True if the fulltext is different from what is stored.
1235 Returns True if the fulltext is different from what is stored.
1245 """
1236 """
1246
1237
1247 def emitrevisions(
1238 def emitrevisions(
1248 nodes,
1239 nodes,
1249 nodesorder=None,
1240 nodesorder=None,
1250 revisiondata=False,
1241 revisiondata=False,
1251 assumehaveparentrevisions=False,
1242 assumehaveparentrevisions=False,
1252 ):
1243 ):
1253 """Produce ``irevisiondelta`` describing revisions.
1244 """Produce ``irevisiondelta`` describing revisions.
1254
1245
1255 See the documentation for ``ifiledata`` for more.
1246 See the documentation for ``ifiledata`` for more.
1256 """
1247 """
1257
1248
1258 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1249 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1259 """Process a series of deltas for storage.
1250 """Process a series of deltas for storage.
1260
1251
1261 See the documentation in ``ifilemutation`` for more.
1252 See the documentation in ``ifilemutation`` for more.
1262 """
1253 """
1263
1254
1264 def rawsize(rev):
1255 def rawsize(rev):
1265 """Obtain the size of tracked data.
1256 """Obtain the size of tracked data.
1266
1257
1267 Is equivalent to ``len(m.rawdata(node))``.
1258 Is equivalent to ``len(m.rawdata(node))``.
1268
1259
1269 TODO this method is only used by upgrade code and may be removed.
1260 TODO this method is only used by upgrade code and may be removed.
1270 """
1261 """
1271
1262
1272 def getstrippoint(minlink):
1263 def getstrippoint(minlink):
1273 """Find minimum revision that must be stripped to strip a linkrev.
1264 """Find minimum revision that must be stripped to strip a linkrev.
1274
1265
1275 See the documentation in ``ifilemutation`` for more.
1266 See the documentation in ``ifilemutation`` for more.
1276 """
1267 """
1277
1268
1278 def strip(minlink, transaction):
1269 def strip(minlink, transaction):
1279 """Remove storage of items starting at a linkrev.
1270 """Remove storage of items starting at a linkrev.
1280
1271
1281 See the documentation in ``ifilemutation`` for more.
1272 See the documentation in ``ifilemutation`` for more.
1282 """
1273 """
1283
1274
1284 def checksize():
1275 def checksize():
1285 """Obtain the expected sizes of backing files.
1276 """Obtain the expected sizes of backing files.
1286
1277
1287 TODO this is used by verify and it should not be part of the interface.
1278 TODO this is used by verify and it should not be part of the interface.
1288 """
1279 """
1289
1280
1290 def files():
1281 def files():
1291 """Obtain paths that are backing storage for this manifest.
1282 """Obtain paths that are backing storage for this manifest.
1292
1283
1293 TODO this is used by verify and there should probably be a better API
1284 TODO this is used by verify and there should probably be a better API
1294 for this functionality.
1285 for this functionality.
1295 """
1286 """
1296
1287
1297 def deltaparent(rev):
1288 def deltaparent(rev):
1298 """Obtain the revision that a revision is delta'd against.
1289 """Obtain the revision that a revision is delta'd against.
1299
1290
1300 TODO delta encoding is an implementation detail of storage and should
1291 TODO delta encoding is an implementation detail of storage and should
1301 not be exposed to the storage interface.
1292 not be exposed to the storage interface.
1302 """
1293 """
1303
1294
1304 def clone(tr, dest, **kwargs):
1295 def clone(tr, dest, **kwargs):
1305 """Clone this instance to another."""
1296 """Clone this instance to another."""
1306
1297
1307 def clearcaches(clear_persisted_data=False):
1298 def clearcaches(clear_persisted_data=False):
1308 """Clear any caches associated with this instance."""
1299 """Clear any caches associated with this instance."""
1309
1300
1310 def dirlog(d):
1301 def dirlog(d):
1311 """Obtain a manifest storage instance for a tree."""
1302 """Obtain a manifest storage instance for a tree."""
1312
1303
1313 def add(
1304 def add(
1314 m, transaction, link, p1, p2, added, removed, readtree=None, match=None
1305 m, transaction, link, p1, p2, added, removed, readtree=None, match=None
1315 ):
1306 ):
1316 """Add a revision to storage.
1307 """Add a revision to storage.
1317
1308
1318 ``m`` is an object conforming to ``imanifestdict``.
1309 ``m`` is an object conforming to ``imanifestdict``.
1319
1310
1320 ``link`` is the linkrev revision number.
1311 ``link`` is the linkrev revision number.
1321
1312
1322 ``p1`` and ``p2`` are the parent revision numbers.
1313 ``p1`` and ``p2`` are the parent revision numbers.
1323
1314
1324 ``added`` and ``removed`` are iterables of added and removed paths,
1315 ``added`` and ``removed`` are iterables of added and removed paths,
1325 respectively.
1316 respectively.
1326
1317
1327 ``readtree`` is a function that can be used to read the child tree(s)
1318 ``readtree`` is a function that can be used to read the child tree(s)
1328 when recursively writing the full tree structure when using
1319 when recursively writing the full tree structure when using
1329 treemanifets.
1320 treemanifets.
1330
1321
1331 ``match`` is a matcher that can be used to hint to storage that not all
1322 ``match`` is a matcher that can be used to hint to storage that not all
1332 paths must be inspected; this is an optimization and can be safely
1323 paths must be inspected; this is an optimization and can be safely
1333 ignored. Note that the storage must still be able to reproduce a full
1324 ignored. Note that the storage must still be able to reproduce a full
1334 manifest including files that did not match.
1325 manifest including files that did not match.
1335 """
1326 """
1336
1327
1337 def storageinfo(
1328 def storageinfo(
1338 exclusivefiles=False,
1329 exclusivefiles=False,
1339 sharedfiles=False,
1330 sharedfiles=False,
1340 revisionscount=False,
1331 revisionscount=False,
1341 trackedsize=False,
1332 trackedsize=False,
1342 storedsize=False,
1333 storedsize=False,
1343 ):
1334 ):
1344 """Obtain information about storage for this manifest's data.
1335 """Obtain information about storage for this manifest's data.
1345
1336
1346 See ``ifilestorage.storageinfo()`` for a description of this method.
1337 See ``ifilestorage.storageinfo()`` for a description of this method.
1347 This one behaves the same way, except for manifest data.
1338 This one behaves the same way, except for manifest data.
1348 """
1339 """
1349
1340
1350
1341
1351 class imanifestlog(interfaceutil.Interface):
1342 class imanifestlog(interfaceutil.Interface):
1352 """Interface representing a collection of manifest snapshots.
1343 """Interface representing a collection of manifest snapshots.
1353
1344
1354 Represents the root manifest in a repository.
1345 Represents the root manifest in a repository.
1355
1346
1356 Also serves as a means to access nested tree manifests and to cache
1347 Also serves as a means to access nested tree manifests and to cache
1357 tree manifests.
1348 tree manifests.
1358 """
1349 """
1359
1350
1360 def __getitem__(node):
1351 def __getitem__(node):
1361 """Obtain a manifest instance for a given binary node.
1352 """Obtain a manifest instance for a given binary node.
1362
1353
1363 Equivalent to calling ``self.get('', node)``.
1354 Equivalent to calling ``self.get('', node)``.
1364
1355
1365 The returned object conforms to the ``imanifestrevisionstored``
1356 The returned object conforms to the ``imanifestrevisionstored``
1366 interface.
1357 interface.
1367 """
1358 """
1368
1359
1369 def get(tree, node, verify=True):
1360 def get(tree, node, verify=True):
1370 """Retrieve the manifest instance for a given directory and binary node.
1361 """Retrieve the manifest instance for a given directory and binary node.
1371
1362
1372 ``node`` always refers to the node of the root manifest (which will be
1363 ``node`` always refers to the node of the root manifest (which will be
1373 the only manifest if flat manifests are being used).
1364 the only manifest if flat manifests are being used).
1374
1365
1375 If ``tree`` is the empty string, the root manifest is returned.
1366 If ``tree`` is the empty string, the root manifest is returned.
1376 Otherwise the manifest for the specified directory will be returned
1367 Otherwise the manifest for the specified directory will be returned
1377 (requires tree manifests).
1368 (requires tree manifests).
1378
1369
1379 If ``verify`` is True, ``LookupError`` is raised if the node is not
1370 If ``verify`` is True, ``LookupError`` is raised if the node is not
1380 known.
1371 known.
1381
1372
1382 The returned object conforms to the ``imanifestrevisionstored``
1373 The returned object conforms to the ``imanifestrevisionstored``
1383 interface.
1374 interface.
1384 """
1375 """
1385
1376
1386 def getstorage(tree):
1377 def getstorage(tree):
1387 """Retrieve an interface to storage for a particular tree.
1378 """Retrieve an interface to storage for a particular tree.
1388
1379
1389 If ``tree`` is the empty bytestring, storage for the root manifest will
1380 If ``tree`` is the empty bytestring, storage for the root manifest will
1390 be returned. Otherwise storage for a tree manifest is returned.
1381 be returned. Otherwise storage for a tree manifest is returned.
1391
1382
1392 TODO formalize interface for returned object.
1383 TODO formalize interface for returned object.
1393 """
1384 """
1394
1385
1395 def clearcaches():
1386 def clearcaches():
1396 """Clear caches associated with this collection."""
1387 """Clear caches associated with this collection."""
1397
1388
1398 def rev(node):
1389 def rev(node):
1399 """Obtain the revision number for a binary node.
1390 """Obtain the revision number for a binary node.
1400
1391
1401 Raises ``error.LookupError`` if the node is not known.
1392 Raises ``error.LookupError`` if the node is not known.
1402 """
1393 """
1403
1394
1404
1395
1405 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1396 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1406 """Local repository sub-interface providing access to tracked file storage.
1397 """Local repository sub-interface providing access to tracked file storage.
1407
1398
1408 This interface defines how a repository accesses storage for a single
1399 This interface defines how a repository accesses storage for a single
1409 tracked file path.
1400 tracked file path.
1410 """
1401 """
1411
1402
1412 def file(f):
1403 def file(f):
1413 """Obtain a filelog for a tracked path.
1404 """Obtain a filelog for a tracked path.
1414
1405
1415 The returned type conforms to the ``ifilestorage`` interface.
1406 The returned type conforms to the ``ifilestorage`` interface.
1416 """
1407 """
1417
1408
1418
1409
1419 class ilocalrepositorymain(interfaceutil.Interface):
1410 class ilocalrepositorymain(interfaceutil.Interface):
1420 """Main interface for local repositories.
1411 """Main interface for local repositories.
1421
1412
1422 This currently captures the reality of things - not how things should be.
1413 This currently captures the reality of things - not how things should be.
1423 """
1414 """
1424
1415
1425 supportedformats = interfaceutil.Attribute(
1416 supportedformats = interfaceutil.Attribute(
1426 """Set of requirements that apply to stream clone.
1417 """Set of requirements that apply to stream clone.
1427
1418
1428 This is actually a class attribute and is shared among all instances.
1419 This is actually a class attribute and is shared among all instances.
1429 """
1420 """
1430 )
1421 )
1431
1422
1432 supported = interfaceutil.Attribute(
1423 supported = interfaceutil.Attribute(
1433 """Set of requirements that this repo is capable of opening."""
1424 """Set of requirements that this repo is capable of opening."""
1434 )
1425 )
1435
1426
1436 requirements = interfaceutil.Attribute(
1427 requirements = interfaceutil.Attribute(
1437 """Set of requirements this repo uses."""
1428 """Set of requirements this repo uses."""
1438 )
1429 )
1439
1430
1440 features = interfaceutil.Attribute(
1431 features = interfaceutil.Attribute(
1441 """Set of "features" this repository supports.
1432 """Set of "features" this repository supports.
1442
1433
1443 A "feature" is a loosely-defined term. It can refer to a feature
1434 A "feature" is a loosely-defined term. It can refer to a feature
1444 in the classical sense or can describe an implementation detail
1435 in the classical sense or can describe an implementation detail
1445 of the repository. For example, a ``readonly`` feature may denote
1436 of the repository. For example, a ``readonly`` feature may denote
1446 the repository as read-only. Or a ``revlogfilestore`` feature may
1437 the repository as read-only. Or a ``revlogfilestore`` feature may
1447 denote that the repository is using revlogs for file storage.
1438 denote that the repository is using revlogs for file storage.
1448
1439
1449 The intent of features is to provide a machine-queryable mechanism
1440 The intent of features is to provide a machine-queryable mechanism
1450 for repo consumers to test for various repository characteristics.
1441 for repo consumers to test for various repository characteristics.
1451
1442
1452 Features are similar to ``requirements``. The main difference is that
1443 Features are similar to ``requirements``. The main difference is that
1453 requirements are stored on-disk and represent requirements to open the
1444 requirements are stored on-disk and represent requirements to open the
1454 repository. Features are more run-time capabilities of the repository
1445 repository. Features are more run-time capabilities of the repository
1455 and more granular capabilities (which may be derived from requirements).
1446 and more granular capabilities (which may be derived from requirements).
1456 """
1447 """
1457 )
1448 )
1458
1449
1459 filtername = interfaceutil.Attribute(
1450 filtername = interfaceutil.Attribute(
1460 """Name of the repoview that is active on this repo."""
1451 """Name of the repoview that is active on this repo."""
1461 )
1452 )
1462
1453
1463 wvfs = interfaceutil.Attribute(
1454 wvfs = interfaceutil.Attribute(
1464 """VFS used to access the working directory."""
1455 """VFS used to access the working directory."""
1465 )
1456 )
1466
1457
1467 vfs = interfaceutil.Attribute(
1458 vfs = interfaceutil.Attribute(
1468 """VFS rooted at the .hg directory.
1459 """VFS rooted at the .hg directory.
1469
1460
1470 Used to access repository data not in the store.
1461 Used to access repository data not in the store.
1471 """
1462 """
1472 )
1463 )
1473
1464
1474 svfs = interfaceutil.Attribute(
1465 svfs = interfaceutil.Attribute(
1475 """VFS rooted at the store.
1466 """VFS rooted at the store.
1476
1467
1477 Used to access repository data in the store. Typically .hg/store.
1468 Used to access repository data in the store. Typically .hg/store.
1478 But can point elsewhere if the store is shared.
1469 But can point elsewhere if the store is shared.
1479 """
1470 """
1480 )
1471 )
1481
1472
1482 root = interfaceutil.Attribute(
1473 root = interfaceutil.Attribute(
1483 """Path to the root of the working directory."""
1474 """Path to the root of the working directory."""
1484 )
1475 )
1485
1476
1486 path = interfaceutil.Attribute("""Path to the .hg directory.""")
1477 path = interfaceutil.Attribute("""Path to the .hg directory.""")
1487
1478
1488 origroot = interfaceutil.Attribute(
1479 origroot = interfaceutil.Attribute(
1489 """The filesystem path that was used to construct the repo."""
1480 """The filesystem path that was used to construct the repo."""
1490 )
1481 )
1491
1482
1492 auditor = interfaceutil.Attribute(
1483 auditor = interfaceutil.Attribute(
1493 """A pathauditor for the working directory.
1484 """A pathauditor for the working directory.
1494
1485
1495 This checks if a path refers to a nested repository.
1486 This checks if a path refers to a nested repository.
1496
1487
1497 Operates on the filesystem.
1488 Operates on the filesystem.
1498 """
1489 """
1499 )
1490 )
1500
1491
1501 nofsauditor = interfaceutil.Attribute(
1492 nofsauditor = interfaceutil.Attribute(
1502 """A pathauditor for the working directory.
1493 """A pathauditor for the working directory.
1503
1494
1504 This is like ``auditor`` except it doesn't do filesystem checks.
1495 This is like ``auditor`` except it doesn't do filesystem checks.
1505 """
1496 """
1506 )
1497 )
1507
1498
1508 baseui = interfaceutil.Attribute(
1499 baseui = interfaceutil.Attribute(
1509 """Original ui instance passed into constructor."""
1500 """Original ui instance passed into constructor."""
1510 )
1501 )
1511
1502
1512 ui = interfaceutil.Attribute("""Main ui instance for this instance.""")
1503 ui = interfaceutil.Attribute("""Main ui instance for this instance.""")
1513
1504
1514 sharedpath = interfaceutil.Attribute(
1505 sharedpath = interfaceutil.Attribute(
1515 """Path to the .hg directory of the repo this repo was shared from."""
1506 """Path to the .hg directory of the repo this repo was shared from."""
1516 )
1507 )
1517
1508
1518 store = interfaceutil.Attribute("""A store instance.""")
1509 store = interfaceutil.Attribute("""A store instance.""")
1519
1510
1520 spath = interfaceutil.Attribute("""Path to the store.""")
1511 spath = interfaceutil.Attribute("""Path to the store.""")
1521
1512
1522 sjoin = interfaceutil.Attribute("""Alias to self.store.join.""")
1513 sjoin = interfaceutil.Attribute("""Alias to self.store.join.""")
1523
1514
1524 cachevfs = interfaceutil.Attribute(
1515 cachevfs = interfaceutil.Attribute(
1525 """A VFS used to access the cache directory.
1516 """A VFS used to access the cache directory.
1526
1517
1527 Typically .hg/cache.
1518 Typically .hg/cache.
1528 """
1519 """
1529 )
1520 )
1530
1521
1531 wcachevfs = interfaceutil.Attribute(
1522 wcachevfs = interfaceutil.Attribute(
1532 """A VFS used to access the cache directory dedicated to working copy
1523 """A VFS used to access the cache directory dedicated to working copy
1533
1524
1534 Typically .hg/wcache.
1525 Typically .hg/wcache.
1535 """
1526 """
1536 )
1527 )
1537
1528
1538 filteredrevcache = interfaceutil.Attribute(
1529 filteredrevcache = interfaceutil.Attribute(
1539 """Holds sets of revisions to be filtered."""
1530 """Holds sets of revisions to be filtered."""
1540 )
1531 )
1541
1532
1542 names = interfaceutil.Attribute("""A ``namespaces`` instance.""")
1533 names = interfaceutil.Attribute("""A ``namespaces`` instance.""")
1543
1534
1544 filecopiesmode = interfaceutil.Attribute(
1535 filecopiesmode = interfaceutil.Attribute(
1545 """The way files copies should be dealt with in this repo."""
1536 """The way files copies should be dealt with in this repo."""
1546 )
1537 )
1547
1538
1548 def close():
1539 def close():
1549 """Close the handle on this repository."""
1540 """Close the handle on this repository."""
1550
1541
1551 def peer():
1542 def peer():
1552 """Obtain an object conforming to the ``peer`` interface."""
1543 """Obtain an object conforming to the ``peer`` interface."""
1553
1544
1554 def unfiltered():
1545 def unfiltered():
1555 """Obtain an unfiltered/raw view of this repo."""
1546 """Obtain an unfiltered/raw view of this repo."""
1556
1547
1557 def filtered(name, visibilityexceptions=None):
1548 def filtered(name, visibilityexceptions=None):
1558 """Obtain a named view of this repository."""
1549 """Obtain a named view of this repository."""
1559
1550
1560 obsstore = interfaceutil.Attribute("""A store of obsolescence data.""")
1551 obsstore = interfaceutil.Attribute("""A store of obsolescence data.""")
1561
1552
1562 changelog = interfaceutil.Attribute("""A handle on the changelog revlog.""")
1553 changelog = interfaceutil.Attribute("""A handle on the changelog revlog.""")
1563
1554
1564 manifestlog = interfaceutil.Attribute(
1555 manifestlog = interfaceutil.Attribute(
1565 """An instance conforming to the ``imanifestlog`` interface.
1556 """An instance conforming to the ``imanifestlog`` interface.
1566
1557
1567 Provides access to manifests for the repository.
1558 Provides access to manifests for the repository.
1568 """
1559 """
1569 )
1560 )
1570
1561
1571 dirstate = interfaceutil.Attribute("""Working directory state.""")
1562 dirstate = interfaceutil.Attribute("""Working directory state.""")
1572
1563
1573 narrowpats = interfaceutil.Attribute(
1564 narrowpats = interfaceutil.Attribute(
1574 """Matcher patterns for this repository's narrowspec."""
1565 """Matcher patterns for this repository's narrowspec."""
1575 )
1566 )
1576
1567
1577 def narrowmatch(match=None, includeexact=False):
1568 def narrowmatch(match=None, includeexact=False):
1578 """Obtain a matcher for the narrowspec."""
1569 """Obtain a matcher for the narrowspec."""
1579
1570
1580 def setnarrowpats(newincludes, newexcludes):
1571 def setnarrowpats(newincludes, newexcludes):
1581 """Define the narrowspec for this repository."""
1572 """Define the narrowspec for this repository."""
1582
1573
1583 def __getitem__(changeid):
1574 def __getitem__(changeid):
1584 """Try to resolve a changectx."""
1575 """Try to resolve a changectx."""
1585
1576
1586 def __contains__(changeid):
1577 def __contains__(changeid):
1587 """Whether a changeset exists."""
1578 """Whether a changeset exists."""
1588
1579
1589 def __nonzero__():
1580 def __nonzero__():
1590 """Always returns True."""
1581 """Always returns True."""
1591 return True
1582 return True
1592
1583
1593 __bool__ = __nonzero__
1584 __bool__ = __nonzero__
1594
1585
1595 def __len__():
1586 def __len__():
1596 """Returns the number of changesets in the repo."""
1587 """Returns the number of changesets in the repo."""
1597
1588
1598 def __iter__():
1589 def __iter__():
1599 """Iterate over revisions in the changelog."""
1590 """Iterate over revisions in the changelog."""
1600
1591
1601 def revs(expr, *args):
1592 def revs(expr, *args):
1602 """Evaluate a revset.
1593 """Evaluate a revset.
1603
1594
1604 Emits revisions.
1595 Emits revisions.
1605 """
1596 """
1606
1597
1607 def set(expr, *args):
1598 def set(expr, *args):
1608 """Evaluate a revset.
1599 """Evaluate a revset.
1609
1600
1610 Emits changectx instances.
1601 Emits changectx instances.
1611 """
1602 """
1612
1603
1613 def anyrevs(specs, user=False, localalias=None):
1604 def anyrevs(specs, user=False, localalias=None):
1614 """Find revisions matching one of the given revsets."""
1605 """Find revisions matching one of the given revsets."""
1615
1606
1616 def url():
1607 def url():
1617 """Returns a string representing the location of this repo."""
1608 """Returns a string representing the location of this repo."""
1618
1609
1619 def hook(name, throw=False, **args):
1610 def hook(name, throw=False, **args):
1620 """Call a hook."""
1611 """Call a hook."""
1621
1612
1622 def tags():
1613 def tags():
1623 """Return a mapping of tag to node."""
1614 """Return a mapping of tag to node."""
1624
1615
1625 def tagtype(tagname):
1616 def tagtype(tagname):
1626 """Return the type of a given tag."""
1617 """Return the type of a given tag."""
1627
1618
1628 def tagslist():
1619 def tagslist():
1629 """Return a list of tags ordered by revision."""
1620 """Return a list of tags ordered by revision."""
1630
1621
1631 def nodetags(node):
1622 def nodetags(node):
1632 """Return the tags associated with a node."""
1623 """Return the tags associated with a node."""
1633
1624
1634 def nodebookmarks(node):
1625 def nodebookmarks(node):
1635 """Return the list of bookmarks pointing to the specified node."""
1626 """Return the list of bookmarks pointing to the specified node."""
1636
1627
1637 def branchmap():
1628 def branchmap():
1638 """Return a mapping of branch to heads in that branch."""
1629 """Return a mapping of branch to heads in that branch."""
1639
1630
1640 def revbranchcache():
1631 def revbranchcache():
1641 pass
1632 pass
1642
1633
1643 def branchtip(branchtip, ignoremissing=False):
1634 def branchtip(branchtip, ignoremissing=False):
1644 """Return the tip node for a given branch."""
1635 """Return the tip node for a given branch."""
1645
1636
1646 def lookup(key):
1637 def lookup(key):
1647 """Resolve the node for a revision."""
1638 """Resolve the node for a revision."""
1648
1639
1649 def lookupbranch(key):
1640 def lookupbranch(key):
1650 """Look up the branch name of the given revision or branch name."""
1641 """Look up the branch name of the given revision or branch name."""
1651
1642
1652 def known(nodes):
1643 def known(nodes):
1653 """Determine whether a series of nodes is known.
1644 """Determine whether a series of nodes is known.
1654
1645
1655 Returns a list of bools.
1646 Returns a list of bools.
1656 """
1647 """
1657
1648
1658 def local():
1649 def local():
1659 """Whether the repository is local."""
1650 """Whether the repository is local."""
1660 return True
1651 return True
1661
1652
1662 def publishing():
1653 def publishing():
1663 """Whether the repository is a publishing repository."""
1654 """Whether the repository is a publishing repository."""
1664
1655
1665 def cancopy():
1656 def cancopy():
1666 pass
1657 pass
1667
1658
1668 def shared():
1659 def shared():
1669 """The type of shared repository or None."""
1660 """The type of shared repository or None."""
1670
1661
1671 def wjoin(f, *insidef):
1662 def wjoin(f, *insidef):
1672 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1663 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1673
1664
1674 def setparents(p1, p2):
1665 def setparents(p1, p2):
1675 """Set the parent nodes of the working directory."""
1666 """Set the parent nodes of the working directory."""
1676
1667
1677 def filectx(path, changeid=None, fileid=None):
1668 def filectx(path, changeid=None, fileid=None):
1678 """Obtain a filectx for the given file revision."""
1669 """Obtain a filectx for the given file revision."""
1679
1670
1680 def getcwd():
1671 def getcwd():
1681 """Obtain the current working directory from the dirstate."""
1672 """Obtain the current working directory from the dirstate."""
1682
1673
1683 def pathto(f, cwd=None):
1674 def pathto(f, cwd=None):
1684 """Obtain the relative path to a file."""
1675 """Obtain the relative path to a file."""
1685
1676
1686 def adddatafilter(name, fltr):
1677 def adddatafilter(name, fltr):
1687 pass
1678 pass
1688
1679
1689 def wread(filename):
1680 def wread(filename):
1690 """Read a file from wvfs, using data filters."""
1681 """Read a file from wvfs, using data filters."""
1691
1682
1692 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1683 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1693 """Write data to a file in the wvfs, using data filters."""
1684 """Write data to a file in the wvfs, using data filters."""
1694
1685
1695 def wwritedata(filename, data):
1686 def wwritedata(filename, data):
1696 """Resolve data for writing to the wvfs, using data filters."""
1687 """Resolve data for writing to the wvfs, using data filters."""
1697
1688
1698 def currenttransaction():
1689 def currenttransaction():
1699 """Obtain the current transaction instance or None."""
1690 """Obtain the current transaction instance or None."""
1700
1691
1701 def transaction(desc, report=None):
1692 def transaction(desc, report=None):
1702 """Open a new transaction to write to the repository."""
1693 """Open a new transaction to write to the repository."""
1703
1694
1704 def undofiles():
1695 def undofiles():
1705 """Returns a list of (vfs, path) for files to undo transactions."""
1696 """Returns a list of (vfs, path) for files to undo transactions."""
1706
1697
1707 def recover():
1698 def recover():
1708 """Roll back an interrupted transaction."""
1699 """Roll back an interrupted transaction."""
1709
1700
1710 def rollback(dryrun=False, force=False):
1701 def rollback(dryrun=False, force=False):
1711 """Undo the last transaction.
1702 """Undo the last transaction.
1712
1703
1713 DANGEROUS.
1704 DANGEROUS.
1714 """
1705 """
1715
1706
1716 def updatecaches(tr=None, full=False):
1707 def updatecaches(tr=None, full=False):
1717 """Warm repo caches."""
1708 """Warm repo caches."""
1718
1709
1719 def invalidatecaches():
1710 def invalidatecaches():
1720 """Invalidate cached data due to the repository mutating."""
1711 """Invalidate cached data due to the repository mutating."""
1721
1712
1722 def invalidatevolatilesets():
1713 def invalidatevolatilesets():
1723 pass
1714 pass
1724
1715
1725 def invalidatedirstate():
1716 def invalidatedirstate():
1726 """Invalidate the dirstate."""
1717 """Invalidate the dirstate."""
1727
1718
1728 def invalidate(clearfilecache=False):
1719 def invalidate(clearfilecache=False):
1729 pass
1720 pass
1730
1721
1731 def invalidateall():
1722 def invalidateall():
1732 pass
1723 pass
1733
1724
1734 def lock(wait=True):
1725 def lock(wait=True):
1735 """Lock the repository store and return a lock instance."""
1726 """Lock the repository store and return a lock instance."""
1736
1727
1737 def wlock(wait=True):
1728 def wlock(wait=True):
1738 """Lock the non-store parts of the repository."""
1729 """Lock the non-store parts of the repository."""
1739
1730
1740 def currentwlock():
1731 def currentwlock():
1741 """Return the wlock if it's held or None."""
1732 """Return the wlock if it's held or None."""
1742
1733
1743 def checkcommitpatterns(wctx, match, status, fail):
1734 def checkcommitpatterns(wctx, match, status, fail):
1744 pass
1735 pass
1745
1736
1746 def commit(
1737 def commit(
1747 text=b'',
1738 text=b'',
1748 user=None,
1739 user=None,
1749 date=None,
1740 date=None,
1750 match=None,
1741 match=None,
1751 force=False,
1742 force=False,
1752 editor=False,
1743 editor=False,
1753 extra=None,
1744 extra=None,
1754 ):
1745 ):
1755 """Add a new revision to the repository."""
1746 """Add a new revision to the repository."""
1756
1747
1757 def commitctx(ctx, error=False, origctx=None):
1748 def commitctx(ctx, error=False, origctx=None):
1758 """Commit a commitctx instance to the repository."""
1749 """Commit a commitctx instance to the repository."""
1759
1750
1760 def destroying():
1751 def destroying():
1761 """Inform the repository that nodes are about to be destroyed."""
1752 """Inform the repository that nodes are about to be destroyed."""
1762
1753
1763 def destroyed():
1754 def destroyed():
1764 """Inform the repository that nodes have been destroyed."""
1755 """Inform the repository that nodes have been destroyed."""
1765
1756
1766 def status(
1757 def status(
1767 node1=b'.',
1758 node1=b'.',
1768 node2=None,
1759 node2=None,
1769 match=None,
1760 match=None,
1770 ignored=False,
1761 ignored=False,
1771 clean=False,
1762 clean=False,
1772 unknown=False,
1763 unknown=False,
1773 listsubrepos=False,
1764 listsubrepos=False,
1774 ):
1765 ):
1775 """Convenience method to call repo[x].status()."""
1766 """Convenience method to call repo[x].status()."""
1776
1767
1777 def addpostdsstatus(ps):
1768 def addpostdsstatus(ps):
1778 pass
1769 pass
1779
1770
1780 def postdsstatus():
1771 def postdsstatus():
1781 pass
1772 pass
1782
1773
1783 def clearpostdsstatus():
1774 def clearpostdsstatus():
1784 pass
1775 pass
1785
1776
1786 def heads(start=None):
1777 def heads(start=None):
1787 """Obtain list of nodes that are DAG heads."""
1778 """Obtain list of nodes that are DAG heads."""
1788
1779
1789 def branchheads(branch=None, start=None, closed=False):
1780 def branchheads(branch=None, start=None, closed=False):
1790 pass
1781 pass
1791
1782
1792 def branches(nodes):
1783 def branches(nodes):
1793 pass
1784 pass
1794
1785
1795 def between(pairs):
1786 def between(pairs):
1796 pass
1787 pass
1797
1788
1798 def checkpush(pushop):
1789 def checkpush(pushop):
1799 pass
1790 pass
1800
1791
1801 prepushoutgoinghooks = interfaceutil.Attribute("""util.hooks instance.""")
1792 prepushoutgoinghooks = interfaceutil.Attribute("""util.hooks instance.""")
1802
1793
1803 def pushkey(namespace, key, old, new):
1794 def pushkey(namespace, key, old, new):
1804 pass
1795 pass
1805
1796
1806 def listkeys(namespace):
1797 def listkeys(namespace):
1807 pass
1798 pass
1808
1799
1809 def debugwireargs(one, two, three=None, four=None, five=None):
1800 def debugwireargs(one, two, three=None, four=None, five=None):
1810 pass
1801 pass
1811
1802
1812 def savecommitmessage(text):
1803 def savecommitmessage(text):
1813 pass
1804 pass
1814
1805
1815
1806
1816 class completelocalrepository(
1807 class completelocalrepository(
1817 ilocalrepositorymain, ilocalrepositoryfilestorage
1808 ilocalrepositorymain, ilocalrepositoryfilestorage
1818 ):
1809 ):
1819 """Complete interface for a local repository."""
1810 """Complete interface for a local repository."""
1820
1811
1821
1812
1822 class iwireprotocolcommandcacher(interfaceutil.Interface):
1813 class iwireprotocolcommandcacher(interfaceutil.Interface):
1823 """Represents a caching backend for wire protocol commands.
1814 """Represents a caching backend for wire protocol commands.
1824
1815
1825 Wire protocol version 2 supports transparent caching of many commands.
1816 Wire protocol version 2 supports transparent caching of many commands.
1826 To leverage this caching, servers can activate objects that cache
1817 To leverage this caching, servers can activate objects that cache
1827 command responses. Objects handle both cache writing and reading.
1818 command responses. Objects handle both cache writing and reading.
1828 This interface defines how that response caching mechanism works.
1819 This interface defines how that response caching mechanism works.
1829
1820
1830 Wire protocol version 2 commands emit a series of objects that are
1821 Wire protocol version 2 commands emit a series of objects that are
1831 serialized and sent to the client. The caching layer exists between
1822 serialized and sent to the client. The caching layer exists between
1832 the invocation of the command function and the sending of its output
1823 the invocation of the command function and the sending of its output
1833 objects to an output layer.
1824 objects to an output layer.
1834
1825
1835 Instances of this interface represent a binding to a cache that
1826 Instances of this interface represent a binding to a cache that
1836 can serve a response (in place of calling a command function) and/or
1827 can serve a response (in place of calling a command function) and/or
1837 write responses to a cache for subsequent use.
1828 write responses to a cache for subsequent use.
1838
1829
1839 When a command request arrives, the following happens with regards
1830 When a command request arrives, the following happens with regards
1840 to this interface:
1831 to this interface:
1841
1832
1842 1. The server determines whether the command request is cacheable.
1833 1. The server determines whether the command request is cacheable.
1843 2. If it is, an instance of this interface is spawned.
1834 2. If it is, an instance of this interface is spawned.
1844 3. The cacher is activated in a context manager (``__enter__`` is called).
1835 3. The cacher is activated in a context manager (``__enter__`` is called).
1845 4. A cache *key* for that request is derived. This will call the
1836 4. A cache *key* for that request is derived. This will call the
1846 instance's ``adjustcachekeystate()`` method so the derivation
1837 instance's ``adjustcachekeystate()`` method so the derivation
1847 can be influenced.
1838 can be influenced.
1848 5. The cacher is informed of the derived cache key via a call to
1839 5. The cacher is informed of the derived cache key via a call to
1849 ``setcachekey()``.
1840 ``setcachekey()``.
1850 6. The cacher's ``lookup()`` method is called to test for presence of
1841 6. The cacher's ``lookup()`` method is called to test for presence of
1851 the derived key in the cache.
1842 the derived key in the cache.
1852 7. If ``lookup()`` returns a hit, that cached result is used in place
1843 7. If ``lookup()`` returns a hit, that cached result is used in place
1853 of invoking the command function. ``__exit__`` is called and the instance
1844 of invoking the command function. ``__exit__`` is called and the instance
1854 is discarded.
1845 is discarded.
1855 8. The command function is invoked.
1846 8. The command function is invoked.
1856 9. ``onobject()`` is called for each object emitted by the command
1847 9. ``onobject()`` is called for each object emitted by the command
1857 function.
1848 function.
1858 10. After the final object is seen, ``onfinished()`` is called.
1849 10. After the final object is seen, ``onfinished()`` is called.
1859 11. ``__exit__`` is called to signal the end of use of the instance.
1850 11. ``__exit__`` is called to signal the end of use of the instance.
1860
1851
1861 Cache *key* derivation can be influenced by the instance.
1852 Cache *key* derivation can be influenced by the instance.
1862
1853
1863 Cache keys are initially derived by a deterministic representation of
1854 Cache keys are initially derived by a deterministic representation of
1864 the command request. This includes the command name, arguments, protocol
1855 the command request. This includes the command name, arguments, protocol
1865 version, etc. This initial key derivation is performed by CBOR-encoding a
1856 version, etc. This initial key derivation is performed by CBOR-encoding a
1866 data structure and feeding that output into a hasher.
1857 data structure and feeding that output into a hasher.
1867
1858
1868 Instances of this interface can influence this initial key derivation
1859 Instances of this interface can influence this initial key derivation
1869 via ``adjustcachekeystate()``.
1860 via ``adjustcachekeystate()``.
1870
1861
1871 The instance is informed of the derived cache key via a call to
1862 The instance is informed of the derived cache key via a call to
1872 ``setcachekey()``. The instance must store the key locally so it can
1863 ``setcachekey()``. The instance must store the key locally so it can
1873 be consulted on subsequent operations that may require it.
1864 be consulted on subsequent operations that may require it.
1874
1865
1875 When constructed, the instance has access to a callable that can be used
1866 When constructed, the instance has access to a callable that can be used
1876 for encoding response objects. This callable receives as its single
1867 for encoding response objects. This callable receives as its single
1877 argument an object emitted by a command function. It returns an iterable
1868 argument an object emitted by a command function. It returns an iterable
1878 of bytes chunks representing the encoded object. Unless the cacher is
1869 of bytes chunks representing the encoded object. Unless the cacher is
1879 caching native Python objects in memory or has a way of reconstructing
1870 caching native Python objects in memory or has a way of reconstructing
1880 the original Python objects, implementations typically call this function
1871 the original Python objects, implementations typically call this function
1881 to produce bytes from the output objects and then store those bytes in
1872 to produce bytes from the output objects and then store those bytes in
1882 the cache. When it comes time to re-emit those bytes, they are wrapped
1873 the cache. When it comes time to re-emit those bytes, they are wrapped
1883 in a ``wireprototypes.encodedresponse`` instance to tell the output
1874 in a ``wireprototypes.encodedresponse`` instance to tell the output
1884 layer that they are pre-encoded.
1875 layer that they are pre-encoded.
1885
1876
1886 When receiving the objects emitted by the command function, instances
1877 When receiving the objects emitted by the command function, instances
1887 can choose what to do with those objects. The simplest thing to do is
1878 can choose what to do with those objects. The simplest thing to do is
1888 re-emit the original objects. They will be forwarded to the output
1879 re-emit the original objects. They will be forwarded to the output
1889 layer and will be processed as if the cacher did not exist.
1880 layer and will be processed as if the cacher did not exist.
1890
1881
1891 Implementations could also choose to not emit objects - instead locally
1882 Implementations could also choose to not emit objects - instead locally
1892 buffering objects or their encoded representation. They could then emit
1883 buffering objects or their encoded representation. They could then emit
1893 a single "coalesced" object when ``onfinished()`` is called. In
1884 a single "coalesced" object when ``onfinished()`` is called. In
1894 this way, the implementation would function as a filtering layer of
1885 this way, the implementation would function as a filtering layer of
1895 sorts.
1886 sorts.
1896
1887
1897 When caching objects, typically the encoded form of the object will
1888 When caching objects, typically the encoded form of the object will
1898 be stored. Keep in mind that if the original object is forwarded to
1889 be stored. Keep in mind that if the original object is forwarded to
1899 the output layer, it will need to be encoded there as well. For large
1890 the output layer, it will need to be encoded there as well. For large
1900 output, this redundant encoding could add overhead. Implementations
1891 output, this redundant encoding could add overhead. Implementations
1901 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1892 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1902 instances to avoid this overhead.
1893 instances to avoid this overhead.
1903 """
1894 """
1904
1895
1905 def __enter__():
1896 def __enter__():
1906 """Marks the instance as active.
1897 """Marks the instance as active.
1907
1898
1908 Should return self.
1899 Should return self.
1909 """
1900 """
1910
1901
1911 def __exit__(exctype, excvalue, exctb):
1902 def __exit__(exctype, excvalue, exctb):
1912 """Called when cacher is no longer used.
1903 """Called when cacher is no longer used.
1913
1904
1914 This can be used by implementations to perform cleanup actions (e.g.
1905 This can be used by implementations to perform cleanup actions (e.g.
1915 disconnecting network sockets, aborting a partially cached response.
1906 disconnecting network sockets, aborting a partially cached response.
1916 """
1907 """
1917
1908
1918 def adjustcachekeystate(state):
1909 def adjustcachekeystate(state):
1919 """Influences cache key derivation by adjusting state to derive key.
1910 """Influences cache key derivation by adjusting state to derive key.
1920
1911
1921 A dict defining the state used to derive the cache key is passed.
1912 A dict defining the state used to derive the cache key is passed.
1922
1913
1923 Implementations can modify this dict to record additional state that
1914 Implementations can modify this dict to record additional state that
1924 is wanted to influence key derivation.
1915 is wanted to influence key derivation.
1925
1916
1926 Implementations are *highly* encouraged to not modify or delete
1917 Implementations are *highly* encouraged to not modify or delete
1927 existing keys.
1918 existing keys.
1928 """
1919 """
1929
1920
1930 def setcachekey(key):
1921 def setcachekey(key):
1931 """Record the derived cache key for this request.
1922 """Record the derived cache key for this request.
1932
1923
1933 Instances may mutate the key for internal usage, as desired. e.g.
1924 Instances may mutate the key for internal usage, as desired. e.g.
1934 instances may wish to prepend the repo name, introduce path
1925 instances may wish to prepend the repo name, introduce path
1935 components for filesystem or URL addressing, etc. Behavior is up to
1926 components for filesystem or URL addressing, etc. Behavior is up to
1936 the cache.
1927 the cache.
1937
1928
1938 Returns a bool indicating if the request is cacheable by this
1929 Returns a bool indicating if the request is cacheable by this
1939 instance.
1930 instance.
1940 """
1931 """
1941
1932
1942 def lookup():
1933 def lookup():
1943 """Attempt to resolve an entry in the cache.
1934 """Attempt to resolve an entry in the cache.
1944
1935
1945 The instance is instructed to look for the cache key that it was
1936 The instance is instructed to look for the cache key that it was
1946 informed about via the call to ``setcachekey()``.
1937 informed about via the call to ``setcachekey()``.
1947
1938
1948 If there's no cache hit or the cacher doesn't wish to use the cached
1939 If there's no cache hit or the cacher doesn't wish to use the cached
1949 entry, ``None`` should be returned.
1940 entry, ``None`` should be returned.
1950
1941
1951 Else, a dict defining the cached result should be returned. The
1942 Else, a dict defining the cached result should be returned. The
1952 dict may have the following keys:
1943 dict may have the following keys:
1953
1944
1954 objs
1945 objs
1955 An iterable of objects that should be sent to the client. That
1946 An iterable of objects that should be sent to the client. That
1956 iterable of objects is expected to be what the command function
1947 iterable of objects is expected to be what the command function
1957 would return if invoked or an equivalent representation thereof.
1948 would return if invoked or an equivalent representation thereof.
1958 """
1949 """
1959
1950
1960 def onobject(obj):
1951 def onobject(obj):
1961 """Called when a new object is emitted from the command function.
1952 """Called when a new object is emitted from the command function.
1962
1953
1963 Receives as its argument the object that was emitted from the
1954 Receives as its argument the object that was emitted from the
1964 command function.
1955 command function.
1965
1956
1966 This method returns an iterator of objects to forward to the output
1957 This method returns an iterator of objects to forward to the output
1967 layer. The easiest implementation is a generator that just
1958 layer. The easiest implementation is a generator that just
1968 ``yield obj``.
1959 ``yield obj``.
1969 """
1960 """
1970
1961
1971 def onfinished():
1962 def onfinished():
1972 """Called after all objects have been emitted from the command function.
1963 """Called after all objects have been emitted from the command function.
1973
1964
1974 Implementations should return an iterator of objects to forward to
1965 Implementations should return an iterator of objects to forward to
1975 the output layer.
1966 the output layer.
1976
1967
1977 This method can be a generator.
1968 This method can be a generator.
1978 """
1969 """
@@ -1,2259 +1,2256 b''
1 # manifest.py - manifest revision class for mercurial
1 # manifest.py - manifest revision class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11 import itertools
11 import itertools
12 import struct
12 import struct
13 import weakref
13 import weakref
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 bin,
17 bin,
18 hex,
18 hex,
19 nullid,
19 nullid,
20 nullrev,
20 nullrev,
21 )
21 )
22 from .pycompat import getattr
22 from .pycompat import getattr
23 from . import (
23 from . import (
24 encoding,
24 encoding,
25 error,
25 error,
26 match as matchmod,
26 match as matchmod,
27 mdiff,
27 mdiff,
28 pathutil,
28 pathutil,
29 policy,
29 policy,
30 pycompat,
30 pycompat,
31 revlog,
31 revlog,
32 util,
32 util,
33 )
33 )
34 from .interfaces import (
34 from .interfaces import (
35 repository,
35 repository,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 propertycache = util.propertycache
40 propertycache = util.propertycache
41
41
42 # Allow tests to more easily test the alternate path in manifestdict.fastdelta()
42 # Allow tests to more easily test the alternate path in manifestdict.fastdelta()
43 FASTDELTA_TEXTDIFF_THRESHOLD = 1000
43 FASTDELTA_TEXTDIFF_THRESHOLD = 1000
44
44
45
45
46 def _parse(data):
46 def _parse(data):
47 # This method does a little bit of excessive-looking
47 # This method does a little bit of excessive-looking
48 # precondition checking. This is so that the behavior of this
48 # precondition checking. This is so that the behavior of this
49 # class exactly matches its C counterpart to try and help
49 # class exactly matches its C counterpart to try and help
50 # prevent surprise breakage for anyone that develops against
50 # prevent surprise breakage for anyone that develops against
51 # the pure version.
51 # the pure version.
52 if data and data[-1:] != b'\n':
52 if data and data[-1:] != b'\n':
53 raise ValueError(b'Manifest did not end in a newline.')
53 raise ValueError(b'Manifest did not end in a newline.')
54 prev = None
54 prev = None
55 for l in data.splitlines():
55 for l in data.splitlines():
56 if prev is not None and prev > l:
56 if prev is not None and prev > l:
57 raise ValueError(b'Manifest lines not in sorted order.')
57 raise ValueError(b'Manifest lines not in sorted order.')
58 prev = l
58 prev = l
59 f, n = l.split(b'\0')
59 f, n = l.split(b'\0')
60 if len(n) > 40:
60 if len(n) > 40:
61 yield f, bin(n[:40]), n[40:]
61 yield f, bin(n[:40]), n[40:]
62 else:
62 else:
63 yield f, bin(n), b''
63 yield f, bin(n), b''
64
64
65
65
66 def _text(it):
66 def _text(it):
67 files = []
67 files = []
68 lines = []
68 lines = []
69 for f, n, fl in it:
69 for f, n, fl in it:
70 files.append(f)
70 files.append(f)
71 # if this is changed to support newlines in filenames,
71 # if this is changed to support newlines in filenames,
72 # be sure to check the templates/ dir again (especially *-raw.tmpl)
72 # be sure to check the templates/ dir again (especially *-raw.tmpl)
73 lines.append(b"%s\0%s%s\n" % (f, hex(n), fl))
73 lines.append(b"%s\0%s%s\n" % (f, hex(n), fl))
74
74
75 _checkforbidden(files)
75 _checkforbidden(files)
76 return b''.join(lines)
76 return b''.join(lines)
77
77
78
78
79 class lazymanifestiter(object):
79 class lazymanifestiter(object):
80 def __init__(self, lm):
80 def __init__(self, lm):
81 self.pos = 0
81 self.pos = 0
82 self.lm = lm
82 self.lm = lm
83
83
84 def __iter__(self):
84 def __iter__(self):
85 return self
85 return self
86
86
87 def next(self):
87 def next(self):
88 try:
88 try:
89 data, pos = self.lm._get(self.pos)
89 data, pos = self.lm._get(self.pos)
90 except IndexError:
90 except IndexError:
91 raise StopIteration
91 raise StopIteration
92 if pos == -1:
92 if pos == -1:
93 self.pos += 1
93 self.pos += 1
94 return data[0]
94 return data[0]
95 self.pos += 1
95 self.pos += 1
96 zeropos = data.find(b'\x00', pos)
96 zeropos = data.find(b'\x00', pos)
97 return data[pos:zeropos]
97 return data[pos:zeropos]
98
98
99 __next__ = next
99 __next__ = next
100
100
101
101
102 class lazymanifestiterentries(object):
102 class lazymanifestiterentries(object):
103 def __init__(self, lm):
103 def __init__(self, lm):
104 self.lm = lm
104 self.lm = lm
105 self.pos = 0
105 self.pos = 0
106
106
107 def __iter__(self):
107 def __iter__(self):
108 return self
108 return self
109
109
110 def next(self):
110 def next(self):
111 try:
111 try:
112 data, pos = self.lm._get(self.pos)
112 data, pos = self.lm._get(self.pos)
113 except IndexError:
113 except IndexError:
114 raise StopIteration
114 raise StopIteration
115 if pos == -1:
115 if pos == -1:
116 self.pos += 1
116 self.pos += 1
117 return data
117 return data
118 zeropos = data.find(b'\x00', pos)
118 zeropos = data.find(b'\x00', pos)
119 hashval = unhexlify(data, self.lm.extrainfo[self.pos], zeropos + 1, 40)
119 hashval = unhexlify(data, self.lm.extrainfo[self.pos], zeropos + 1, 40)
120 flags = self.lm._getflags(data, self.pos, zeropos)
120 flags = self.lm._getflags(data, self.pos, zeropos)
121 self.pos += 1
121 self.pos += 1
122 return (data[pos:zeropos], hashval, flags)
122 return (data[pos:zeropos], hashval, flags)
123
123
124 __next__ = next
124 __next__ = next
125
125
126
126
127 def unhexlify(data, extra, pos, length):
127 def unhexlify(data, extra, pos, length):
128 s = bin(data[pos : pos + length])
128 s = bin(data[pos : pos + length])
129 if extra:
129 if extra:
130 s += chr(extra & 0xFF)
130 s += chr(extra & 0xFF)
131 return s
131 return s
132
132
133
133
134 def _cmp(a, b):
134 def _cmp(a, b):
135 return (a > b) - (a < b)
135 return (a > b) - (a < b)
136
136
137
137
138 class _lazymanifest(object):
138 class _lazymanifest(object):
139 """A pure python manifest backed by a byte string. It is supplimented with
139 """A pure python manifest backed by a byte string. It is supplimented with
140 internal lists as it is modified, until it is compacted back to a pure byte
140 internal lists as it is modified, until it is compacted back to a pure byte
141 string.
141 string.
142
142
143 ``data`` is the initial manifest data.
143 ``data`` is the initial manifest data.
144
144
145 ``positions`` is a list of offsets, one per manifest entry. Positive
145 ``positions`` is a list of offsets, one per manifest entry. Positive
146 values are offsets into ``data``, negative values are offsets into the
146 values are offsets into ``data``, negative values are offsets into the
147 ``extradata`` list. When an entry is removed, its entry is dropped from
147 ``extradata`` list. When an entry is removed, its entry is dropped from
148 ``positions``. The values are encoded such that when walking the list and
148 ``positions``. The values are encoded such that when walking the list and
149 indexing into ``data`` or ``extradata`` as appropriate, the entries are
149 indexing into ``data`` or ``extradata`` as appropriate, the entries are
150 sorted by filename.
150 sorted by filename.
151
151
152 ``extradata`` is a list of (key, hash, flags) for entries that were added or
152 ``extradata`` is a list of (key, hash, flags) for entries that were added or
153 modified since the manifest was created or compacted.
153 modified since the manifest was created or compacted.
154 """
154 """
155
155
156 def __init__(
156 def __init__(
157 self,
157 self,
158 data,
158 data,
159 positions=None,
159 positions=None,
160 extrainfo=None,
160 extrainfo=None,
161 extradata=None,
161 extradata=None,
162 hasremovals=False,
162 hasremovals=False,
163 ):
163 ):
164 if positions is None:
164 if positions is None:
165 self.positions = self.findlines(data)
165 self.positions = self.findlines(data)
166 self.extrainfo = [0] * len(self.positions)
166 self.extrainfo = [0] * len(self.positions)
167 self.data = data
167 self.data = data
168 self.extradata = []
168 self.extradata = []
169 self.hasremovals = False
169 self.hasremovals = False
170 else:
170 else:
171 self.positions = positions[:]
171 self.positions = positions[:]
172 self.extrainfo = extrainfo[:]
172 self.extrainfo = extrainfo[:]
173 self.extradata = extradata[:]
173 self.extradata = extradata[:]
174 self.data = data
174 self.data = data
175 self.hasremovals = hasremovals
175 self.hasremovals = hasremovals
176
176
177 def findlines(self, data):
177 def findlines(self, data):
178 if not data:
178 if not data:
179 return []
179 return []
180 pos = data.find(b"\n")
180 pos = data.find(b"\n")
181 if pos == -1 or data[-1:] != b'\n':
181 if pos == -1 or data[-1:] != b'\n':
182 raise ValueError(b"Manifest did not end in a newline.")
182 raise ValueError(b"Manifest did not end in a newline.")
183 positions = [0]
183 positions = [0]
184 prev = data[: data.find(b'\x00')]
184 prev = data[: data.find(b'\x00')]
185 while pos < len(data) - 1 and pos != -1:
185 while pos < len(data) - 1 and pos != -1:
186 positions.append(pos + 1)
186 positions.append(pos + 1)
187 nexts = data[pos + 1 : data.find(b'\x00', pos + 1)]
187 nexts = data[pos + 1 : data.find(b'\x00', pos + 1)]
188 if nexts < prev:
188 if nexts < prev:
189 raise ValueError(b"Manifest lines not in sorted order.")
189 raise ValueError(b"Manifest lines not in sorted order.")
190 prev = nexts
190 prev = nexts
191 pos = data.find(b"\n", pos + 1)
191 pos = data.find(b"\n", pos + 1)
192 return positions
192 return positions
193
193
194 def _get(self, index):
194 def _get(self, index):
195 # get the position encoded in pos:
195 # get the position encoded in pos:
196 # positive number is an index in 'data'
196 # positive number is an index in 'data'
197 # negative number is in extrapieces
197 # negative number is in extrapieces
198 pos = self.positions[index]
198 pos = self.positions[index]
199 if pos >= 0:
199 if pos >= 0:
200 return self.data, pos
200 return self.data, pos
201 return self.extradata[-pos - 1], -1
201 return self.extradata[-pos - 1], -1
202
202
203 def _getkey(self, pos):
203 def _getkey(self, pos):
204 if pos >= 0:
204 if pos >= 0:
205 return self.data[pos : self.data.find(b'\x00', pos + 1)]
205 return self.data[pos : self.data.find(b'\x00', pos + 1)]
206 return self.extradata[-pos - 1][0]
206 return self.extradata[-pos - 1][0]
207
207
208 def bsearch(self, key):
208 def bsearch(self, key):
209 first = 0
209 first = 0
210 last = len(self.positions) - 1
210 last = len(self.positions) - 1
211
211
212 while first <= last:
212 while first <= last:
213 midpoint = (first + last) // 2
213 midpoint = (first + last) // 2
214 nextpos = self.positions[midpoint]
214 nextpos = self.positions[midpoint]
215 candidate = self._getkey(nextpos)
215 candidate = self._getkey(nextpos)
216 r = _cmp(key, candidate)
216 r = _cmp(key, candidate)
217 if r == 0:
217 if r == 0:
218 return midpoint
218 return midpoint
219 else:
219 else:
220 if r < 0:
220 if r < 0:
221 last = midpoint - 1
221 last = midpoint - 1
222 else:
222 else:
223 first = midpoint + 1
223 first = midpoint + 1
224 return -1
224 return -1
225
225
226 def bsearch2(self, key):
226 def bsearch2(self, key):
227 # same as the above, but will always return the position
227 # same as the above, but will always return the position
228 # done for performance reasons
228 # done for performance reasons
229 first = 0
229 first = 0
230 last = len(self.positions) - 1
230 last = len(self.positions) - 1
231
231
232 while first <= last:
232 while first <= last:
233 midpoint = (first + last) // 2
233 midpoint = (first + last) // 2
234 nextpos = self.positions[midpoint]
234 nextpos = self.positions[midpoint]
235 candidate = self._getkey(nextpos)
235 candidate = self._getkey(nextpos)
236 r = _cmp(key, candidate)
236 r = _cmp(key, candidate)
237 if r == 0:
237 if r == 0:
238 return (midpoint, True)
238 return (midpoint, True)
239 else:
239 else:
240 if r < 0:
240 if r < 0:
241 last = midpoint - 1
241 last = midpoint - 1
242 else:
242 else:
243 first = midpoint + 1
243 first = midpoint + 1
244 return (first, False)
244 return (first, False)
245
245
246 def __contains__(self, key):
246 def __contains__(self, key):
247 return self.bsearch(key) != -1
247 return self.bsearch(key) != -1
248
248
249 def _getflags(self, data, needle, pos):
249 def _getflags(self, data, needle, pos):
250 start = pos + 41
250 start = pos + 41
251 end = data.find(b"\n", start)
251 end = data.find(b"\n", start)
252 if end == -1:
252 if end == -1:
253 end = len(data) - 1
253 end = len(data) - 1
254 if start == end:
254 if start == end:
255 return b''
255 return b''
256 return self.data[start:end]
256 return self.data[start:end]
257
257
258 def __getitem__(self, key):
258 def __getitem__(self, key):
259 if not isinstance(key, bytes):
259 if not isinstance(key, bytes):
260 raise TypeError(b"getitem: manifest keys must be a bytes.")
260 raise TypeError(b"getitem: manifest keys must be a bytes.")
261 needle = self.bsearch(key)
261 needle = self.bsearch(key)
262 if needle == -1:
262 if needle == -1:
263 raise KeyError
263 raise KeyError
264 data, pos = self._get(needle)
264 data, pos = self._get(needle)
265 if pos == -1:
265 if pos == -1:
266 return (data[1], data[2])
266 return (data[1], data[2])
267 zeropos = data.find(b'\x00', pos)
267 zeropos = data.find(b'\x00', pos)
268 assert 0 <= needle <= len(self.positions)
268 assert 0 <= needle <= len(self.positions)
269 assert len(self.extrainfo) == len(self.positions)
269 assert len(self.extrainfo) == len(self.positions)
270 hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, 40)
270 hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, 40)
271 flags = self._getflags(data, needle, zeropos)
271 flags = self._getflags(data, needle, zeropos)
272 return (hashval, flags)
272 return (hashval, flags)
273
273
274 def __delitem__(self, key):
274 def __delitem__(self, key):
275 needle, found = self.bsearch2(key)
275 needle, found = self.bsearch2(key)
276 if not found:
276 if not found:
277 raise KeyError
277 raise KeyError
278 cur = self.positions[needle]
278 cur = self.positions[needle]
279 self.positions = self.positions[:needle] + self.positions[needle + 1 :]
279 self.positions = self.positions[:needle] + self.positions[needle + 1 :]
280 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1 :]
280 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1 :]
281 if cur >= 0:
281 if cur >= 0:
282 # This does NOT unsort the list as far as the search functions are
282 # This does NOT unsort the list as far as the search functions are
283 # concerned, as they only examine lines mapped by self.positions.
283 # concerned, as they only examine lines mapped by self.positions.
284 self.data = self.data[:cur] + b'\x00' + self.data[cur + 1 :]
284 self.data = self.data[:cur] + b'\x00' + self.data[cur + 1 :]
285 self.hasremovals = True
285 self.hasremovals = True
286
286
287 def __setitem__(self, key, value):
287 def __setitem__(self, key, value):
288 if not isinstance(key, bytes):
288 if not isinstance(key, bytes):
289 raise TypeError(b"setitem: manifest keys must be a byte string.")
289 raise TypeError(b"setitem: manifest keys must be a byte string.")
290 if not isinstance(value, tuple) or len(value) != 2:
290 if not isinstance(value, tuple) or len(value) != 2:
291 raise TypeError(
291 raise TypeError(
292 b"Manifest values must be a tuple of (node, flags)."
292 b"Manifest values must be a tuple of (node, flags)."
293 )
293 )
294 hashval = value[0]
294 hashval = value[0]
295 if not isinstance(hashval, bytes) or not 20 <= len(hashval) <= 22:
295 if not isinstance(hashval, bytes) or not 20 <= len(hashval) <= 22:
296 raise TypeError(b"node must be a 20-byte byte string")
296 raise TypeError(b"node must be a 20-byte byte string")
297 flags = value[1]
297 flags = value[1]
298 if len(hashval) == 22:
298 if len(hashval) == 22:
299 hashval = hashval[:-1]
299 hashval = hashval[:-1]
300 if not isinstance(flags, bytes) or len(flags) > 1:
300 if not isinstance(flags, bytes) or len(flags) > 1:
301 raise TypeError(b"flags must a 0 or 1 byte string, got %r", flags)
301 raise TypeError(b"flags must a 0 or 1 byte string, got %r", flags)
302 needle, found = self.bsearch2(key)
302 needle, found = self.bsearch2(key)
303 if found:
303 if found:
304 # put the item
304 # put the item
305 pos = self.positions[needle]
305 pos = self.positions[needle]
306 if pos < 0:
306 if pos < 0:
307 self.extradata[-pos - 1] = (key, hashval, value[1])
307 self.extradata[-pos - 1] = (key, hashval, value[1])
308 else:
308 else:
309 # just don't bother
309 # just don't bother
310 self.extradata.append((key, hashval, value[1]))
310 self.extradata.append((key, hashval, value[1]))
311 self.positions[needle] = -len(self.extradata)
311 self.positions[needle] = -len(self.extradata)
312 else:
312 else:
313 # not found, put it in with extra positions
313 # not found, put it in with extra positions
314 self.extradata.append((key, hashval, value[1]))
314 self.extradata.append((key, hashval, value[1]))
315 self.positions = (
315 self.positions = (
316 self.positions[:needle]
316 self.positions[:needle]
317 + [-len(self.extradata)]
317 + [-len(self.extradata)]
318 + self.positions[needle:]
318 + self.positions[needle:]
319 )
319 )
320 self.extrainfo = (
320 self.extrainfo = (
321 self.extrainfo[:needle] + [0] + self.extrainfo[needle:]
321 self.extrainfo[:needle] + [0] + self.extrainfo[needle:]
322 )
322 )
323
323
324 def copy(self):
324 def copy(self):
325 # XXX call _compact like in C?
325 # XXX call _compact like in C?
326 return _lazymanifest(
326 return _lazymanifest(
327 self.data,
327 self.data,
328 self.positions,
328 self.positions,
329 self.extrainfo,
329 self.extrainfo,
330 self.extradata,
330 self.extradata,
331 self.hasremovals,
331 self.hasremovals,
332 )
332 )
333
333
334 def _compact(self):
334 def _compact(self):
335 # hopefully not called TOO often
335 # hopefully not called TOO often
336 if len(self.extradata) == 0 and not self.hasremovals:
336 if len(self.extradata) == 0 and not self.hasremovals:
337 return
337 return
338 l = []
338 l = []
339 i = 0
339 i = 0
340 offset = 0
340 offset = 0
341 self.extrainfo = [0] * len(self.positions)
341 self.extrainfo = [0] * len(self.positions)
342 while i < len(self.positions):
342 while i < len(self.positions):
343 if self.positions[i] >= 0:
343 if self.positions[i] >= 0:
344 cur = self.positions[i]
344 cur = self.positions[i]
345 last_cut = cur
345 last_cut = cur
346
346
347 # Collect all contiguous entries in the buffer at the current
347 # Collect all contiguous entries in the buffer at the current
348 # offset, breaking out only for added/modified items held in
348 # offset, breaking out only for added/modified items held in
349 # extradata, or a deleted line prior to the next position.
349 # extradata, or a deleted line prior to the next position.
350 while True:
350 while True:
351 self.positions[i] = offset
351 self.positions[i] = offset
352 i += 1
352 i += 1
353 if i == len(self.positions) or self.positions[i] < 0:
353 if i == len(self.positions) or self.positions[i] < 0:
354 break
354 break
355
355
356 # A removed file has no positions[] entry, but does have an
356 # A removed file has no positions[] entry, but does have an
357 # overwritten first byte. Break out and find the end of the
357 # overwritten first byte. Break out and find the end of the
358 # current good entry/entries if there is a removed file
358 # current good entry/entries if there is a removed file
359 # before the next position.
359 # before the next position.
360 if (
360 if (
361 self.hasremovals
361 self.hasremovals
362 and self.data.find(b'\n\x00', cur, self.positions[i])
362 and self.data.find(b'\n\x00', cur, self.positions[i])
363 != -1
363 != -1
364 ):
364 ):
365 break
365 break
366
366
367 offset += self.positions[i] - cur
367 offset += self.positions[i] - cur
368 cur = self.positions[i]
368 cur = self.positions[i]
369 end_cut = self.data.find(b'\n', cur)
369 end_cut = self.data.find(b'\n', cur)
370 if end_cut != -1:
370 if end_cut != -1:
371 end_cut += 1
371 end_cut += 1
372 offset += end_cut - cur
372 offset += end_cut - cur
373 l.append(self.data[last_cut:end_cut])
373 l.append(self.data[last_cut:end_cut])
374 else:
374 else:
375 while i < len(self.positions) and self.positions[i] < 0:
375 while i < len(self.positions) and self.positions[i] < 0:
376 cur = self.positions[i]
376 cur = self.positions[i]
377 t = self.extradata[-cur - 1]
377 t = self.extradata[-cur - 1]
378 l.append(self._pack(t))
378 l.append(self._pack(t))
379 self.positions[i] = offset
379 self.positions[i] = offset
380 if len(t[1]) > 20:
380 if len(t[1]) > 20:
381 self.extrainfo[i] = ord(t[1][21])
381 self.extrainfo[i] = ord(t[1][21])
382 offset += len(l[-1])
382 offset += len(l[-1])
383 i += 1
383 i += 1
384 self.data = b''.join(l)
384 self.data = b''.join(l)
385 self.hasremovals = False
385 self.hasremovals = False
386 self.extradata = []
386 self.extradata = []
387
387
388 def _pack(self, d):
388 def _pack(self, d):
389 return d[0] + b'\x00' + hex(d[1][:20]) + d[2] + b'\n'
389 return d[0] + b'\x00' + hex(d[1][:20]) + d[2] + b'\n'
390
390
391 def text(self):
391 def text(self):
392 self._compact()
392 self._compact()
393 return self.data
393 return self.data
394
394
395 def diff(self, m2, clean=False):
395 def diff(self, m2, clean=False):
396 '''Finds changes between the current manifest and m2.'''
396 '''Finds changes between the current manifest and m2.'''
397 # XXX think whether efficiency matters here
397 # XXX think whether efficiency matters here
398 diff = {}
398 diff = {}
399
399
400 for fn, e1, flags in self.iterentries():
400 for fn, e1, flags in self.iterentries():
401 if fn not in m2:
401 if fn not in m2:
402 diff[fn] = (e1, flags), (None, b'')
402 diff[fn] = (e1, flags), (None, b'')
403 else:
403 else:
404 e2 = m2[fn]
404 e2 = m2[fn]
405 if (e1, flags) != e2:
405 if (e1, flags) != e2:
406 diff[fn] = (e1, flags), e2
406 diff[fn] = (e1, flags), e2
407 elif clean:
407 elif clean:
408 diff[fn] = None
408 diff[fn] = None
409
409
410 for fn, e2, flags in m2.iterentries():
410 for fn, e2, flags in m2.iterentries():
411 if fn not in self:
411 if fn not in self:
412 diff[fn] = (None, b''), (e2, flags)
412 diff[fn] = (None, b''), (e2, flags)
413
413
414 return diff
414 return diff
415
415
416 def iterentries(self):
416 def iterentries(self):
417 return lazymanifestiterentries(self)
417 return lazymanifestiterentries(self)
418
418
419 def iterkeys(self):
419 def iterkeys(self):
420 return lazymanifestiter(self)
420 return lazymanifestiter(self)
421
421
422 def __iter__(self):
422 def __iter__(self):
423 return lazymanifestiter(self)
423 return lazymanifestiter(self)
424
424
425 def __len__(self):
425 def __len__(self):
426 return len(self.positions)
426 return len(self.positions)
427
427
428 def filtercopy(self, filterfn):
428 def filtercopy(self, filterfn):
429 # XXX should be optimized
429 # XXX should be optimized
430 c = _lazymanifest(b'')
430 c = _lazymanifest(b'')
431 for f, n, fl in self.iterentries():
431 for f, n, fl in self.iterentries():
432 if filterfn(f):
432 if filterfn(f):
433 c[f] = n, fl
433 c[f] = n, fl
434 return c
434 return c
435
435
436
436
437 try:
437 try:
438 _lazymanifest = parsers.lazymanifest
438 _lazymanifest = parsers.lazymanifest
439 except AttributeError:
439 except AttributeError:
440 pass
440 pass
441
441
442
442
443 @interfaceutil.implementer(repository.imanifestdict)
443 @interfaceutil.implementer(repository.imanifestdict)
444 class manifestdict(object):
444 class manifestdict(object):
445 def __init__(self, data=b''):
445 def __init__(self, data=b''):
446 self._lm = _lazymanifest(data)
446 self._lm = _lazymanifest(data)
447
447
448 def __getitem__(self, key):
448 def __getitem__(self, key):
449 return self._lm[key][0]
449 return self._lm[key][0]
450
450
451 def find(self, key):
451 def find(self, key):
452 return self._lm[key]
452 return self._lm[key]
453
453
454 def __len__(self):
454 def __len__(self):
455 return len(self._lm)
455 return len(self._lm)
456
456
457 def __nonzero__(self):
457 def __nonzero__(self):
458 # nonzero is covered by the __len__ function, but implementing it here
458 # nonzero is covered by the __len__ function, but implementing it here
459 # makes it easier for extensions to override.
459 # makes it easier for extensions to override.
460 return len(self._lm) != 0
460 return len(self._lm) != 0
461
461
462 __bool__ = __nonzero__
462 __bool__ = __nonzero__
463
463
464 def __setitem__(self, key, node):
464 def __setitem__(self, key, node):
465 self._lm[key] = node, self.flags(key)
465 self._lm[key] = node, self.flags(key)
466
466
467 def __contains__(self, key):
467 def __contains__(self, key):
468 if key is None:
468 if key is None:
469 return False
469 return False
470 return key in self._lm
470 return key in self._lm
471
471
472 def __delitem__(self, key):
472 def __delitem__(self, key):
473 del self._lm[key]
473 del self._lm[key]
474
474
475 def __iter__(self):
475 def __iter__(self):
476 return self._lm.__iter__()
476 return self._lm.__iter__()
477
477
478 def iterkeys(self):
478 def iterkeys(self):
479 return self._lm.iterkeys()
479 return self._lm.iterkeys()
480
480
481 def keys(self):
481 def keys(self):
482 return list(self.iterkeys())
482 return list(self.iterkeys())
483
483
484 def filesnotin(self, m2, match=None):
484 def filesnotin(self, m2, match=None):
485 '''Set of files in this manifest that are not in the other'''
485 '''Set of files in this manifest that are not in the other'''
486 if match is not None:
486 if match is not None:
487 match = matchmod.badmatch(match, lambda path, msg: None)
487 match = matchmod.badmatch(match, lambda path, msg: None)
488 sm2 = set(m2.walk(match))
488 sm2 = set(m2.walk(match))
489 return {f for f in self.walk(match) if f not in sm2}
489 return {f for f in self.walk(match) if f not in sm2}
490 return {f for f in self if f not in m2}
490 return {f for f in self if f not in m2}
491
491
492 @propertycache
492 @propertycache
493 def _dirs(self):
493 def _dirs(self):
494 return pathutil.dirs(self)
494 return pathutil.dirs(self)
495
495
496 def dirs(self):
496 def dirs(self):
497 return self._dirs
497 return self._dirs
498
498
499 def hasdir(self, dir):
499 def hasdir(self, dir):
500 return dir in self._dirs
500 return dir in self._dirs
501
501
502 def _filesfastpath(self, match):
502 def _filesfastpath(self, match):
503 '''Checks whether we can correctly and quickly iterate over matcher
503 '''Checks whether we can correctly and quickly iterate over matcher
504 files instead of over manifest files.'''
504 files instead of over manifest files.'''
505 files = match.files()
505 files = match.files()
506 return len(files) < 100 and (
506 return len(files) < 100 and (
507 match.isexact()
507 match.isexact()
508 or (match.prefix() and all(fn in self for fn in files))
508 or (match.prefix() and all(fn in self for fn in files))
509 )
509 )
510
510
511 def walk(self, match):
511 def walk(self, match):
512 '''Generates matching file names.
512 '''Generates matching file names.
513
513
514 Equivalent to manifest.matches(match).iterkeys(), but without creating
514 Equivalent to manifest.matches(match).iterkeys(), but without creating
515 an entirely new manifest.
515 an entirely new manifest.
516
516
517 It also reports nonexistent files by marking them bad with match.bad().
517 It also reports nonexistent files by marking them bad with match.bad().
518 '''
518 '''
519 if match.always():
519 if match.always():
520 for f in iter(self):
520 for f in iter(self):
521 yield f
521 yield f
522 return
522 return
523
523
524 fset = set(match.files())
524 fset = set(match.files())
525
525
526 # avoid the entire walk if we're only looking for specific files
526 # avoid the entire walk if we're only looking for specific files
527 if self._filesfastpath(match):
527 if self._filesfastpath(match):
528 for fn in sorted(fset):
528 for fn in sorted(fset):
529 if fn in self:
529 if fn in self:
530 yield fn
530 yield fn
531 return
531 return
532
532
533 for fn in self:
533 for fn in self:
534 if fn in fset:
534 if fn in fset:
535 # specified pattern is the exact name
535 # specified pattern is the exact name
536 fset.remove(fn)
536 fset.remove(fn)
537 if match(fn):
537 if match(fn):
538 yield fn
538 yield fn
539
539
540 # for dirstate.walk, files=[''] means "walk the whole tree".
540 # for dirstate.walk, files=[''] means "walk the whole tree".
541 # follow that here, too
541 # follow that here, too
542 fset.discard(b'')
542 fset.discard(b'')
543
543
544 for fn in sorted(fset):
544 for fn in sorted(fset):
545 if not self.hasdir(fn):
545 if not self.hasdir(fn):
546 match.bad(fn, None)
546 match.bad(fn, None)
547
547
548 def matches(self, match):
548 def _matches(self, match):
549 '''generate a new manifest filtered by the match argument'''
549 '''generate a new manifest filtered by the match argument'''
550 if match.always():
550 if match.always():
551 return self.copy()
551 return self.copy()
552
552
553 if self._filesfastpath(match):
553 if self._filesfastpath(match):
554 m = manifestdict()
554 m = manifestdict()
555 lm = self._lm
555 lm = self._lm
556 for fn in match.files():
556 for fn in match.files():
557 if fn in lm:
557 if fn in lm:
558 m._lm[fn] = lm[fn]
558 m._lm[fn] = lm[fn]
559 return m
559 return m
560
560
561 m = manifestdict()
561 m = manifestdict()
562 m._lm = self._lm.filtercopy(match)
562 m._lm = self._lm.filtercopy(match)
563 return m
563 return m
564
564
565 def diff(self, m2, match=None, clean=False):
565 def diff(self, m2, match=None, clean=False):
566 '''Finds changes between the current manifest and m2.
566 '''Finds changes between the current manifest and m2.
567
567
568 Args:
568 Args:
569 m2: the manifest to which this manifest should be compared.
569 m2: the manifest to which this manifest should be compared.
570 clean: if true, include files unchanged between these manifests
570 clean: if true, include files unchanged between these manifests
571 with a None value in the returned dictionary.
571 with a None value in the returned dictionary.
572
572
573 The result is returned as a dict with filename as key and
573 The result is returned as a dict with filename as key and
574 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
574 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
575 nodeid in the current/other manifest and fl1/fl2 is the flag
575 nodeid in the current/other manifest and fl1/fl2 is the flag
576 in the current/other manifest. Where the file does not exist,
576 in the current/other manifest. Where the file does not exist,
577 the nodeid will be None and the flags will be the empty
577 the nodeid will be None and the flags will be the empty
578 string.
578 string.
579 '''
579 '''
580 if match:
580 if match:
581 m1 = self.matches(match)
581 m1 = self._matches(match)
582 m2 = m2.matches(match)
582 m2 = m2._matches(match)
583 return m1.diff(m2, clean=clean)
583 return m1.diff(m2, clean=clean)
584 return self._lm.diff(m2._lm, clean)
584 return self._lm.diff(m2._lm, clean)
585
585
586 def setflag(self, key, flag):
586 def setflag(self, key, flag):
587 self._lm[key] = self[key], flag
587 self._lm[key] = self[key], flag
588
588
589 def get(self, key, default=None):
589 def get(self, key, default=None):
590 try:
590 try:
591 return self._lm[key][0]
591 return self._lm[key][0]
592 except KeyError:
592 except KeyError:
593 return default
593 return default
594
594
595 def flags(self, key):
595 def flags(self, key):
596 try:
596 try:
597 return self._lm[key][1]
597 return self._lm[key][1]
598 except KeyError:
598 except KeyError:
599 return b''
599 return b''
600
600
601 def copy(self):
601 def copy(self):
602 c = manifestdict()
602 c = manifestdict()
603 c._lm = self._lm.copy()
603 c._lm = self._lm.copy()
604 return c
604 return c
605
605
606 def items(self):
606 def items(self):
607 return (x[:2] for x in self._lm.iterentries())
607 return (x[:2] for x in self._lm.iterentries())
608
608
609 def iteritems(self):
609 def iteritems(self):
610 return (x[:2] for x in self._lm.iterentries())
610 return (x[:2] for x in self._lm.iterentries())
611
611
612 def iterentries(self):
612 def iterentries(self):
613 return self._lm.iterentries()
613 return self._lm.iterentries()
614
614
615 def text(self):
615 def text(self):
616 # most likely uses native version
616 # most likely uses native version
617 return self._lm.text()
617 return self._lm.text()
618
618
619 def fastdelta(self, base, changes):
619 def fastdelta(self, base, changes):
620 """Given a base manifest text as a bytearray and a list of changes
620 """Given a base manifest text as a bytearray and a list of changes
621 relative to that text, compute a delta that can be used by revlog.
621 relative to that text, compute a delta that can be used by revlog.
622 """
622 """
623 delta = []
623 delta = []
624 dstart = None
624 dstart = None
625 dend = None
625 dend = None
626 dline = [b""]
626 dline = [b""]
627 start = 0
627 start = 0
628 # zero copy representation of base as a buffer
628 # zero copy representation of base as a buffer
629 addbuf = util.buffer(base)
629 addbuf = util.buffer(base)
630
630
631 changes = list(changes)
631 changes = list(changes)
632 if len(changes) < FASTDELTA_TEXTDIFF_THRESHOLD:
632 if len(changes) < FASTDELTA_TEXTDIFF_THRESHOLD:
633 # start with a readonly loop that finds the offset of
633 # start with a readonly loop that finds the offset of
634 # each line and creates the deltas
634 # each line and creates the deltas
635 for f, todelete in changes:
635 for f, todelete in changes:
636 # bs will either be the index of the item or the insert point
636 # bs will either be the index of the item or the insert point
637 start, end = _msearch(addbuf, f, start)
637 start, end = _msearch(addbuf, f, start)
638 if not todelete:
638 if not todelete:
639 h, fl = self._lm[f]
639 h, fl = self._lm[f]
640 l = b"%s\0%s%s\n" % (f, hex(h), fl)
640 l = b"%s\0%s%s\n" % (f, hex(h), fl)
641 else:
641 else:
642 if start == end:
642 if start == end:
643 # item we want to delete was not found, error out
643 # item we want to delete was not found, error out
644 raise AssertionError(
644 raise AssertionError(
645 _(b"failed to remove %s from manifest") % f
645 _(b"failed to remove %s from manifest") % f
646 )
646 )
647 l = b""
647 l = b""
648 if dstart is not None and dstart <= start and dend >= start:
648 if dstart is not None and dstart <= start and dend >= start:
649 if dend < end:
649 if dend < end:
650 dend = end
650 dend = end
651 if l:
651 if l:
652 dline.append(l)
652 dline.append(l)
653 else:
653 else:
654 if dstart is not None:
654 if dstart is not None:
655 delta.append([dstart, dend, b"".join(dline)])
655 delta.append([dstart, dend, b"".join(dline)])
656 dstart = start
656 dstart = start
657 dend = end
657 dend = end
658 dline = [l]
658 dline = [l]
659
659
660 if dstart is not None:
660 if dstart is not None:
661 delta.append([dstart, dend, b"".join(dline)])
661 delta.append([dstart, dend, b"".join(dline)])
662 # apply the delta to the base, and get a delta for addrevision
662 # apply the delta to the base, and get a delta for addrevision
663 deltatext, arraytext = _addlistdelta(base, delta)
663 deltatext, arraytext = _addlistdelta(base, delta)
664 else:
664 else:
665 # For large changes, it's much cheaper to just build the text and
665 # For large changes, it's much cheaper to just build the text and
666 # diff it.
666 # diff it.
667 arraytext = bytearray(self.text())
667 arraytext = bytearray(self.text())
668 deltatext = mdiff.textdiff(
668 deltatext = mdiff.textdiff(
669 util.buffer(base), util.buffer(arraytext)
669 util.buffer(base), util.buffer(arraytext)
670 )
670 )
671
671
672 return arraytext, deltatext
672 return arraytext, deltatext
673
673
674
674
675 def _msearch(m, s, lo=0, hi=None):
675 def _msearch(m, s, lo=0, hi=None):
676 '''return a tuple (start, end) that says where to find s within m.
676 '''return a tuple (start, end) that says where to find s within m.
677
677
678 If the string is found m[start:end] are the line containing
678 If the string is found m[start:end] are the line containing
679 that string. If start == end the string was not found and
679 that string. If start == end the string was not found and
680 they indicate the proper sorted insertion point.
680 they indicate the proper sorted insertion point.
681
681
682 m should be a buffer, a memoryview or a byte string.
682 m should be a buffer, a memoryview or a byte string.
683 s is a byte string'''
683 s is a byte string'''
684
684
685 def advance(i, c):
685 def advance(i, c):
686 while i < lenm and m[i : i + 1] != c:
686 while i < lenm and m[i : i + 1] != c:
687 i += 1
687 i += 1
688 return i
688 return i
689
689
690 if not s:
690 if not s:
691 return (lo, lo)
691 return (lo, lo)
692 lenm = len(m)
692 lenm = len(m)
693 if not hi:
693 if not hi:
694 hi = lenm
694 hi = lenm
695 while lo < hi:
695 while lo < hi:
696 mid = (lo + hi) // 2
696 mid = (lo + hi) // 2
697 start = mid
697 start = mid
698 while start > 0 and m[start - 1 : start] != b'\n':
698 while start > 0 and m[start - 1 : start] != b'\n':
699 start -= 1
699 start -= 1
700 end = advance(start, b'\0')
700 end = advance(start, b'\0')
701 if bytes(m[start:end]) < s:
701 if bytes(m[start:end]) < s:
702 # we know that after the null there are 40 bytes of sha1
702 # we know that after the null there are 40 bytes of sha1
703 # this translates to the bisect lo = mid + 1
703 # this translates to the bisect lo = mid + 1
704 lo = advance(end + 40, b'\n') + 1
704 lo = advance(end + 40, b'\n') + 1
705 else:
705 else:
706 # this translates to the bisect hi = mid
706 # this translates to the bisect hi = mid
707 hi = start
707 hi = start
708 end = advance(lo, b'\0')
708 end = advance(lo, b'\0')
709 found = m[lo:end]
709 found = m[lo:end]
710 if s == found:
710 if s == found:
711 # we know that after the null there are 40 bytes of sha1
711 # we know that after the null there are 40 bytes of sha1
712 end = advance(end + 40, b'\n')
712 end = advance(end + 40, b'\n')
713 return (lo, end + 1)
713 return (lo, end + 1)
714 else:
714 else:
715 return (lo, lo)
715 return (lo, lo)
716
716
717
717
718 def _checkforbidden(l):
718 def _checkforbidden(l):
719 """Check filenames for illegal characters."""
719 """Check filenames for illegal characters."""
720 for f in l:
720 for f in l:
721 if b'\n' in f or b'\r' in f:
721 if b'\n' in f or b'\r' in f:
722 raise error.StorageError(
722 raise error.StorageError(
723 _(b"'\\n' and '\\r' disallowed in filenames: %r")
723 _(b"'\\n' and '\\r' disallowed in filenames: %r")
724 % pycompat.bytestr(f)
724 % pycompat.bytestr(f)
725 )
725 )
726
726
727
727
728 # apply the changes collected during the bisect loop to our addlist
728 # apply the changes collected during the bisect loop to our addlist
729 # return a delta suitable for addrevision
729 # return a delta suitable for addrevision
730 def _addlistdelta(addlist, x):
730 def _addlistdelta(addlist, x):
731 # for large addlist arrays, building a new array is cheaper
731 # for large addlist arrays, building a new array is cheaper
732 # than repeatedly modifying the existing one
732 # than repeatedly modifying the existing one
733 currentposition = 0
733 currentposition = 0
734 newaddlist = bytearray()
734 newaddlist = bytearray()
735
735
736 for start, end, content in x:
736 for start, end, content in x:
737 newaddlist += addlist[currentposition:start]
737 newaddlist += addlist[currentposition:start]
738 if content:
738 if content:
739 newaddlist += bytearray(content)
739 newaddlist += bytearray(content)
740
740
741 currentposition = end
741 currentposition = end
742
742
743 newaddlist += addlist[currentposition:]
743 newaddlist += addlist[currentposition:]
744
744
745 deltatext = b"".join(
745 deltatext = b"".join(
746 struct.pack(b">lll", start, end, len(content)) + content
746 struct.pack(b">lll", start, end, len(content)) + content
747 for start, end, content in x
747 for start, end, content in x
748 )
748 )
749 return deltatext, newaddlist
749 return deltatext, newaddlist
750
750
751
751
752 def _splittopdir(f):
752 def _splittopdir(f):
753 if b'/' in f:
753 if b'/' in f:
754 dir, subpath = f.split(b'/', 1)
754 dir, subpath = f.split(b'/', 1)
755 return dir + b'/', subpath
755 return dir + b'/', subpath
756 else:
756 else:
757 return b'', f
757 return b'', f
758
758
759
759
760 _noop = lambda s: None
760 _noop = lambda s: None
761
761
762
762
763 class treemanifest(object):
763 class treemanifest(object):
764 def __init__(self, dir=b'', text=b''):
764 def __init__(self, dir=b'', text=b''):
765 self._dir = dir
765 self._dir = dir
766 self._node = nullid
766 self._node = nullid
767 self._loadfunc = _noop
767 self._loadfunc = _noop
768 self._copyfunc = _noop
768 self._copyfunc = _noop
769 self._dirty = False
769 self._dirty = False
770 self._dirs = {}
770 self._dirs = {}
771 self._lazydirs = {}
771 self._lazydirs = {}
772 # Using _lazymanifest here is a little slower than plain old dicts
772 # Using _lazymanifest here is a little slower than plain old dicts
773 self._files = {}
773 self._files = {}
774 self._flags = {}
774 self._flags = {}
775 if text:
775 if text:
776
776
777 def readsubtree(subdir, subm):
777 def readsubtree(subdir, subm):
778 raise AssertionError(
778 raise AssertionError(
779 b'treemanifest constructor only accepts flat manifests'
779 b'treemanifest constructor only accepts flat manifests'
780 )
780 )
781
781
782 self.parse(text, readsubtree)
782 self.parse(text, readsubtree)
783 self._dirty = True # Mark flat manifest dirty after parsing
783 self._dirty = True # Mark flat manifest dirty after parsing
784
784
785 def _subpath(self, path):
785 def _subpath(self, path):
786 return self._dir + path
786 return self._dir + path
787
787
788 def _loadalllazy(self):
788 def _loadalllazy(self):
789 selfdirs = self._dirs
789 selfdirs = self._dirs
790 for d, (path, node, readsubtree, docopy) in pycompat.iteritems(
790 for d, (path, node, readsubtree, docopy) in pycompat.iteritems(
791 self._lazydirs
791 self._lazydirs
792 ):
792 ):
793 if docopy:
793 if docopy:
794 selfdirs[d] = readsubtree(path, node).copy()
794 selfdirs[d] = readsubtree(path, node).copy()
795 else:
795 else:
796 selfdirs[d] = readsubtree(path, node)
796 selfdirs[d] = readsubtree(path, node)
797 self._lazydirs = {}
797 self._lazydirs = {}
798
798
799 def _loadlazy(self, d):
799 def _loadlazy(self, d):
800 v = self._lazydirs.get(d)
800 v = self._lazydirs.get(d)
801 if v:
801 if v:
802 path, node, readsubtree, docopy = v
802 path, node, readsubtree, docopy = v
803 if docopy:
803 if docopy:
804 self._dirs[d] = readsubtree(path, node).copy()
804 self._dirs[d] = readsubtree(path, node).copy()
805 else:
805 else:
806 self._dirs[d] = readsubtree(path, node)
806 self._dirs[d] = readsubtree(path, node)
807 del self._lazydirs[d]
807 del self._lazydirs[d]
808
808
809 def _loadchildrensetlazy(self, visit):
809 def _loadchildrensetlazy(self, visit):
810 if not visit:
810 if not visit:
811 return None
811 return None
812 if visit == b'all' or visit == b'this':
812 if visit == b'all' or visit == b'this':
813 self._loadalllazy()
813 self._loadalllazy()
814 return None
814 return None
815
815
816 loadlazy = self._loadlazy
816 loadlazy = self._loadlazy
817 for k in visit:
817 for k in visit:
818 loadlazy(k + b'/')
818 loadlazy(k + b'/')
819 return visit
819 return visit
820
820
821 def _loaddifflazy(self, t1, t2):
821 def _loaddifflazy(self, t1, t2):
822 """load items in t1 and t2 if they're needed for diffing.
822 """load items in t1 and t2 if they're needed for diffing.
823
823
824 The criteria currently is:
824 The criteria currently is:
825 - if it's not present in _lazydirs in either t1 or t2, load it in the
825 - if it's not present in _lazydirs in either t1 or t2, load it in the
826 other (it may already be loaded or it may not exist, doesn't matter)
826 other (it may already be loaded or it may not exist, doesn't matter)
827 - if it's present in _lazydirs in both, compare the nodeid; if it
827 - if it's present in _lazydirs in both, compare the nodeid; if it
828 differs, load it in both
828 differs, load it in both
829 """
829 """
830 toloadlazy = []
830 toloadlazy = []
831 for d, v1 in pycompat.iteritems(t1._lazydirs):
831 for d, v1 in pycompat.iteritems(t1._lazydirs):
832 v2 = t2._lazydirs.get(d)
832 v2 = t2._lazydirs.get(d)
833 if not v2 or v2[1] != v1[1]:
833 if not v2 or v2[1] != v1[1]:
834 toloadlazy.append(d)
834 toloadlazy.append(d)
835 for d, v1 in pycompat.iteritems(t2._lazydirs):
835 for d, v1 in pycompat.iteritems(t2._lazydirs):
836 if d not in t1._lazydirs:
836 if d not in t1._lazydirs:
837 toloadlazy.append(d)
837 toloadlazy.append(d)
838
838
839 for d in toloadlazy:
839 for d in toloadlazy:
840 t1._loadlazy(d)
840 t1._loadlazy(d)
841 t2._loadlazy(d)
841 t2._loadlazy(d)
842
842
843 def __len__(self):
843 def __len__(self):
844 self._load()
844 self._load()
845 size = len(self._files)
845 size = len(self._files)
846 self._loadalllazy()
846 self._loadalllazy()
847 for m in self._dirs.values():
847 for m in self._dirs.values():
848 size += m.__len__()
848 size += m.__len__()
849 return size
849 return size
850
850
851 def __nonzero__(self):
851 def __nonzero__(self):
852 # Faster than "__len() != 0" since it avoids loading sub-manifests
852 # Faster than "__len() != 0" since it avoids loading sub-manifests
853 return not self._isempty()
853 return not self._isempty()
854
854
855 __bool__ = __nonzero__
855 __bool__ = __nonzero__
856
856
857 def _isempty(self):
857 def _isempty(self):
858 self._load() # for consistency; already loaded by all callers
858 self._load() # for consistency; already loaded by all callers
859 # See if we can skip loading everything.
859 # See if we can skip loading everything.
860 if self._files or (
860 if self._files or (
861 self._dirs and any(not m._isempty() for m in self._dirs.values())
861 self._dirs and any(not m._isempty() for m in self._dirs.values())
862 ):
862 ):
863 return False
863 return False
864 self._loadalllazy()
864 self._loadalllazy()
865 return not self._dirs or all(m._isempty() for m in self._dirs.values())
865 return not self._dirs or all(m._isempty() for m in self._dirs.values())
866
866
867 @encoding.strmethod
867 @encoding.strmethod
868 def __repr__(self):
868 def __repr__(self):
869 return (
869 return (
870 b'<treemanifest dir=%s, node=%s, loaded=%r, dirty=%r at 0x%x>'
870 b'<treemanifest dir=%s, node=%s, loaded=%r, dirty=%r at 0x%x>'
871 % (
871 % (
872 self._dir,
872 self._dir,
873 hex(self._node),
873 hex(self._node),
874 bool(self._loadfunc is _noop),
874 bool(self._loadfunc is _noop),
875 self._dirty,
875 self._dirty,
876 id(self),
876 id(self),
877 )
877 )
878 )
878 )
879
879
880 def dir(self):
880 def dir(self):
881 '''The directory that this tree manifest represents, including a
881 '''The directory that this tree manifest represents, including a
882 trailing '/'. Empty string for the repo root directory.'''
882 trailing '/'. Empty string for the repo root directory.'''
883 return self._dir
883 return self._dir
884
884
885 def node(self):
885 def node(self):
886 '''This node of this instance. nullid for unsaved instances. Should
886 '''This node of this instance. nullid for unsaved instances. Should
887 be updated when the instance is read or written from a revlog.
887 be updated when the instance is read or written from a revlog.
888 '''
888 '''
889 assert not self._dirty
889 assert not self._dirty
890 return self._node
890 return self._node
891
891
892 def setnode(self, node):
892 def setnode(self, node):
893 self._node = node
893 self._node = node
894 self._dirty = False
894 self._dirty = False
895
895
896 def iterentries(self):
896 def iterentries(self):
897 self._load()
897 self._load()
898 self._loadalllazy()
898 self._loadalllazy()
899 for p, n in sorted(
899 for p, n in sorted(
900 itertools.chain(self._dirs.items(), self._files.items())
900 itertools.chain(self._dirs.items(), self._files.items())
901 ):
901 ):
902 if p in self._files:
902 if p in self._files:
903 yield self._subpath(p), n, self._flags.get(p, b'')
903 yield self._subpath(p), n, self._flags.get(p, b'')
904 else:
904 else:
905 for x in n.iterentries():
905 for x in n.iterentries():
906 yield x
906 yield x
907
907
908 def items(self):
908 def items(self):
909 self._load()
909 self._load()
910 self._loadalllazy()
910 self._loadalllazy()
911 for p, n in sorted(
911 for p, n in sorted(
912 itertools.chain(self._dirs.items(), self._files.items())
912 itertools.chain(self._dirs.items(), self._files.items())
913 ):
913 ):
914 if p in self._files:
914 if p in self._files:
915 yield self._subpath(p), n
915 yield self._subpath(p), n
916 else:
916 else:
917 for f, sn in pycompat.iteritems(n):
917 for f, sn in pycompat.iteritems(n):
918 yield f, sn
918 yield f, sn
919
919
920 iteritems = items
920 iteritems = items
921
921
922 def iterkeys(self):
922 def iterkeys(self):
923 self._load()
923 self._load()
924 self._loadalllazy()
924 self._loadalllazy()
925 for p in sorted(itertools.chain(self._dirs, self._files)):
925 for p in sorted(itertools.chain(self._dirs, self._files)):
926 if p in self._files:
926 if p in self._files:
927 yield self._subpath(p)
927 yield self._subpath(p)
928 else:
928 else:
929 for f in self._dirs[p]:
929 for f in self._dirs[p]:
930 yield f
930 yield f
931
931
932 def keys(self):
932 def keys(self):
933 return list(self.iterkeys())
933 return list(self.iterkeys())
934
934
935 def __iter__(self):
935 def __iter__(self):
936 return self.iterkeys()
936 return self.iterkeys()
937
937
938 def __contains__(self, f):
938 def __contains__(self, f):
939 if f is None:
939 if f is None:
940 return False
940 return False
941 self._load()
941 self._load()
942 dir, subpath = _splittopdir(f)
942 dir, subpath = _splittopdir(f)
943 if dir:
943 if dir:
944 self._loadlazy(dir)
944 self._loadlazy(dir)
945
945
946 if dir not in self._dirs:
946 if dir not in self._dirs:
947 return False
947 return False
948
948
949 return self._dirs[dir].__contains__(subpath)
949 return self._dirs[dir].__contains__(subpath)
950 else:
950 else:
951 return f in self._files
951 return f in self._files
952
952
953 def get(self, f, default=None):
953 def get(self, f, default=None):
954 self._load()
954 self._load()
955 dir, subpath = _splittopdir(f)
955 dir, subpath = _splittopdir(f)
956 if dir:
956 if dir:
957 self._loadlazy(dir)
957 self._loadlazy(dir)
958
958
959 if dir not in self._dirs:
959 if dir not in self._dirs:
960 return default
960 return default
961 return self._dirs[dir].get(subpath, default)
961 return self._dirs[dir].get(subpath, default)
962 else:
962 else:
963 return self._files.get(f, default)
963 return self._files.get(f, default)
964
964
965 def __getitem__(self, f):
965 def __getitem__(self, f):
966 self._load()
966 self._load()
967 dir, subpath = _splittopdir(f)
967 dir, subpath = _splittopdir(f)
968 if dir:
968 if dir:
969 self._loadlazy(dir)
969 self._loadlazy(dir)
970
970
971 return self._dirs[dir].__getitem__(subpath)
971 return self._dirs[dir].__getitem__(subpath)
972 else:
972 else:
973 return self._files[f]
973 return self._files[f]
974
974
975 def flags(self, f):
975 def flags(self, f):
976 self._load()
976 self._load()
977 dir, subpath = _splittopdir(f)
977 dir, subpath = _splittopdir(f)
978 if dir:
978 if dir:
979 self._loadlazy(dir)
979 self._loadlazy(dir)
980
980
981 if dir not in self._dirs:
981 if dir not in self._dirs:
982 return b''
982 return b''
983 return self._dirs[dir].flags(subpath)
983 return self._dirs[dir].flags(subpath)
984 else:
984 else:
985 if f in self._lazydirs or f in self._dirs:
985 if f in self._lazydirs or f in self._dirs:
986 return b''
986 return b''
987 return self._flags.get(f, b'')
987 return self._flags.get(f, b'')
988
988
989 def find(self, f):
989 def find(self, f):
990 self._load()
990 self._load()
991 dir, subpath = _splittopdir(f)
991 dir, subpath = _splittopdir(f)
992 if dir:
992 if dir:
993 self._loadlazy(dir)
993 self._loadlazy(dir)
994
994
995 return self._dirs[dir].find(subpath)
995 return self._dirs[dir].find(subpath)
996 else:
996 else:
997 return self._files[f], self._flags.get(f, b'')
997 return self._files[f], self._flags.get(f, b'')
998
998
999 def __delitem__(self, f):
999 def __delitem__(self, f):
1000 self._load()
1000 self._load()
1001 dir, subpath = _splittopdir(f)
1001 dir, subpath = _splittopdir(f)
1002 if dir:
1002 if dir:
1003 self._loadlazy(dir)
1003 self._loadlazy(dir)
1004
1004
1005 self._dirs[dir].__delitem__(subpath)
1005 self._dirs[dir].__delitem__(subpath)
1006 # If the directory is now empty, remove it
1006 # If the directory is now empty, remove it
1007 if self._dirs[dir]._isempty():
1007 if self._dirs[dir]._isempty():
1008 del self._dirs[dir]
1008 del self._dirs[dir]
1009 else:
1009 else:
1010 del self._files[f]
1010 del self._files[f]
1011 if f in self._flags:
1011 if f in self._flags:
1012 del self._flags[f]
1012 del self._flags[f]
1013 self._dirty = True
1013 self._dirty = True
1014
1014
1015 def __setitem__(self, f, n):
1015 def __setitem__(self, f, n):
1016 assert n is not None
1016 assert n is not None
1017 self._load()
1017 self._load()
1018 dir, subpath = _splittopdir(f)
1018 dir, subpath = _splittopdir(f)
1019 if dir:
1019 if dir:
1020 self._loadlazy(dir)
1020 self._loadlazy(dir)
1021 if dir not in self._dirs:
1021 if dir not in self._dirs:
1022 self._dirs[dir] = treemanifest(self._subpath(dir))
1022 self._dirs[dir] = treemanifest(self._subpath(dir))
1023 self._dirs[dir].__setitem__(subpath, n)
1023 self._dirs[dir].__setitem__(subpath, n)
1024 else:
1024 else:
1025 self._files[f] = n[:21] # to match manifestdict's behavior
1025 self._files[f] = n[:21] # to match manifestdict's behavior
1026 self._dirty = True
1026 self._dirty = True
1027
1027
1028 def _load(self):
1028 def _load(self):
1029 if self._loadfunc is not _noop:
1029 if self._loadfunc is not _noop:
1030 lf, self._loadfunc = self._loadfunc, _noop
1030 lf, self._loadfunc = self._loadfunc, _noop
1031 lf(self)
1031 lf(self)
1032 elif self._copyfunc is not _noop:
1032 elif self._copyfunc is not _noop:
1033 cf, self._copyfunc = self._copyfunc, _noop
1033 cf, self._copyfunc = self._copyfunc, _noop
1034 cf(self)
1034 cf(self)
1035
1035
1036 def setflag(self, f, flags):
1036 def setflag(self, f, flags):
1037 """Set the flags (symlink, executable) for path f."""
1037 """Set the flags (symlink, executable) for path f."""
1038 self._load()
1038 self._load()
1039 dir, subpath = _splittopdir(f)
1039 dir, subpath = _splittopdir(f)
1040 if dir:
1040 if dir:
1041 self._loadlazy(dir)
1041 self._loadlazy(dir)
1042 if dir not in self._dirs:
1042 if dir not in self._dirs:
1043 self._dirs[dir] = treemanifest(self._subpath(dir))
1043 self._dirs[dir] = treemanifest(self._subpath(dir))
1044 self._dirs[dir].setflag(subpath, flags)
1044 self._dirs[dir].setflag(subpath, flags)
1045 else:
1045 else:
1046 self._flags[f] = flags
1046 self._flags[f] = flags
1047 self._dirty = True
1047 self._dirty = True
1048
1048
1049 def copy(self):
1049 def copy(self):
1050 copy = treemanifest(self._dir)
1050 copy = treemanifest(self._dir)
1051 copy._node = self._node
1051 copy._node = self._node
1052 copy._dirty = self._dirty
1052 copy._dirty = self._dirty
1053 if self._copyfunc is _noop:
1053 if self._copyfunc is _noop:
1054
1054
1055 def _copyfunc(s):
1055 def _copyfunc(s):
1056 self._load()
1056 self._load()
1057 s._lazydirs = {
1057 s._lazydirs = {
1058 d: (p, n, r, True)
1058 d: (p, n, r, True)
1059 for d, (p, n, r, c) in pycompat.iteritems(self._lazydirs)
1059 for d, (p, n, r, c) in pycompat.iteritems(self._lazydirs)
1060 }
1060 }
1061 sdirs = s._dirs
1061 sdirs = s._dirs
1062 for d, v in pycompat.iteritems(self._dirs):
1062 for d, v in pycompat.iteritems(self._dirs):
1063 sdirs[d] = v.copy()
1063 sdirs[d] = v.copy()
1064 s._files = dict.copy(self._files)
1064 s._files = dict.copy(self._files)
1065 s._flags = dict.copy(self._flags)
1065 s._flags = dict.copy(self._flags)
1066
1066
1067 if self._loadfunc is _noop:
1067 if self._loadfunc is _noop:
1068 _copyfunc(copy)
1068 _copyfunc(copy)
1069 else:
1069 else:
1070 copy._copyfunc = _copyfunc
1070 copy._copyfunc = _copyfunc
1071 else:
1071 else:
1072 copy._copyfunc = self._copyfunc
1072 copy._copyfunc = self._copyfunc
1073 return copy
1073 return copy
1074
1074
1075 def filesnotin(self, m2, match=None):
1075 def filesnotin(self, m2, match=None):
1076 '''Set of files in this manifest that are not in the other'''
1076 '''Set of files in this manifest that are not in the other'''
1077 if match and not match.always():
1077 if match and not match.always():
1078 m1 = self.matches(match)
1078 m1 = self._matches(match)
1079 m2 = m2.matches(match)
1079 m2 = m2._matches(match)
1080 return m1.filesnotin(m2)
1080 return m1.filesnotin(m2)
1081
1081
1082 files = set()
1082 files = set()
1083
1083
1084 def _filesnotin(t1, t2):
1084 def _filesnotin(t1, t2):
1085 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1085 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1086 return
1086 return
1087 t1._load()
1087 t1._load()
1088 t2._load()
1088 t2._load()
1089 self._loaddifflazy(t1, t2)
1089 self._loaddifflazy(t1, t2)
1090 for d, m1 in pycompat.iteritems(t1._dirs):
1090 for d, m1 in pycompat.iteritems(t1._dirs):
1091 if d in t2._dirs:
1091 if d in t2._dirs:
1092 m2 = t2._dirs[d]
1092 m2 = t2._dirs[d]
1093 _filesnotin(m1, m2)
1093 _filesnotin(m1, m2)
1094 else:
1094 else:
1095 files.update(m1.iterkeys())
1095 files.update(m1.iterkeys())
1096
1096
1097 for fn in t1._files:
1097 for fn in t1._files:
1098 if fn not in t2._files:
1098 if fn not in t2._files:
1099 files.add(t1._subpath(fn))
1099 files.add(t1._subpath(fn))
1100
1100
1101 _filesnotin(self, m2)
1101 _filesnotin(self, m2)
1102 return files
1102 return files
1103
1103
1104 @propertycache
1104 @propertycache
1105 def _alldirs(self):
1105 def _alldirs(self):
1106 return pathutil.dirs(self)
1106 return pathutil.dirs(self)
1107
1107
1108 def dirs(self):
1108 def dirs(self):
1109 return self._alldirs
1109 return self._alldirs
1110
1110
1111 def hasdir(self, dir):
1111 def hasdir(self, dir):
1112 self._load()
1112 self._load()
1113 topdir, subdir = _splittopdir(dir)
1113 topdir, subdir = _splittopdir(dir)
1114 if topdir:
1114 if topdir:
1115 self._loadlazy(topdir)
1115 self._loadlazy(topdir)
1116 if topdir in self._dirs:
1116 if topdir in self._dirs:
1117 return self._dirs[topdir].hasdir(subdir)
1117 return self._dirs[topdir].hasdir(subdir)
1118 return False
1118 return False
1119 dirslash = dir + b'/'
1119 dirslash = dir + b'/'
1120 return dirslash in self._dirs or dirslash in self._lazydirs
1120 return dirslash in self._dirs or dirslash in self._lazydirs
1121
1121
1122 def walk(self, match):
1122 def walk(self, match):
1123 '''Generates matching file names.
1123 '''Generates matching file names.
1124
1124
1125 Equivalent to manifest.matches(match).iterkeys(), but without creating
1126 an entirely new manifest.
1127
1128 It also reports nonexistent files by marking them bad with match.bad().
1125 It also reports nonexistent files by marking them bad with match.bad().
1129 '''
1126 '''
1130 if match.always():
1127 if match.always():
1131 for f in iter(self):
1128 for f in iter(self):
1132 yield f
1129 yield f
1133 return
1130 return
1134
1131
1135 fset = set(match.files())
1132 fset = set(match.files())
1136
1133
1137 for fn in self._walk(match):
1134 for fn in self._walk(match):
1138 if fn in fset:
1135 if fn in fset:
1139 # specified pattern is the exact name
1136 # specified pattern is the exact name
1140 fset.remove(fn)
1137 fset.remove(fn)
1141 yield fn
1138 yield fn
1142
1139
1143 # for dirstate.walk, files=[''] means "walk the whole tree".
1140 # for dirstate.walk, files=[''] means "walk the whole tree".
1144 # follow that here, too
1141 # follow that here, too
1145 fset.discard(b'')
1142 fset.discard(b'')
1146
1143
1147 for fn in sorted(fset):
1144 for fn in sorted(fset):
1148 if not self.hasdir(fn):
1145 if not self.hasdir(fn):
1149 match.bad(fn, None)
1146 match.bad(fn, None)
1150
1147
1151 def _walk(self, match):
1148 def _walk(self, match):
1152 '''Recursively generates matching file names for walk().'''
1149 '''Recursively generates matching file names for walk().'''
1153 visit = match.visitchildrenset(self._dir[:-1])
1150 visit = match.visitchildrenset(self._dir[:-1])
1154 if not visit:
1151 if not visit:
1155 return
1152 return
1156
1153
1157 # yield this dir's files and walk its submanifests
1154 # yield this dir's files and walk its submanifests
1158 self._load()
1155 self._load()
1159 visit = self._loadchildrensetlazy(visit)
1156 visit = self._loadchildrensetlazy(visit)
1160 for p in sorted(list(self._dirs) + list(self._files)):
1157 for p in sorted(list(self._dirs) + list(self._files)):
1161 if p in self._files:
1158 if p in self._files:
1162 fullp = self._subpath(p)
1159 fullp = self._subpath(p)
1163 if match(fullp):
1160 if match(fullp):
1164 yield fullp
1161 yield fullp
1165 else:
1162 else:
1166 if not visit or p[:-1] in visit:
1163 if not visit or p[:-1] in visit:
1167 for f in self._dirs[p]._walk(match):
1164 for f in self._dirs[p]._walk(match):
1168 yield f
1165 yield f
1169
1166
1170 def matches(self, match):
1171 '''generate a new manifest filtered by the match argument'''
1172 if match.always():
1173 return self.copy()
1174
1175 return self._matches(match)
1176
1177 def _matches(self, match):
1167 def _matches(self, match):
1178 '''recursively generate a new manifest filtered by the match argument.
1168 '''recursively generate a new manifest filtered by the match argument.
1179 '''
1169 '''
1170 if match.always():
1171 return self.copy()
1172 return self._matches_inner(match)
1173
1174 def _matches_inner(self, match):
1175 if match.always():
1176 return self.copy()
1180
1177
1181 visit = match.visitchildrenset(self._dir[:-1])
1178 visit = match.visitchildrenset(self._dir[:-1])
1182 if visit == b'all':
1179 if visit == b'all':
1183 return self.copy()
1180 return self.copy()
1184 ret = treemanifest(self._dir)
1181 ret = treemanifest(self._dir)
1185 if not visit:
1182 if not visit:
1186 return ret
1183 return ret
1187
1184
1188 self._load()
1185 self._load()
1189 for fn in self._files:
1186 for fn in self._files:
1190 # While visitchildrenset *usually* lists only subdirs, this is
1187 # While visitchildrenset *usually* lists only subdirs, this is
1191 # actually up to the matcher and may have some files in the set().
1188 # actually up to the matcher and may have some files in the set().
1192 # If visit == 'this', we should obviously look at the files in this
1189 # If visit == 'this', we should obviously look at the files in this
1193 # directory; if visit is a set, and fn is in it, we should inspect
1190 # directory; if visit is a set, and fn is in it, we should inspect
1194 # fn (but no need to inspect things not in the set).
1191 # fn (but no need to inspect things not in the set).
1195 if visit != b'this' and fn not in visit:
1192 if visit != b'this' and fn not in visit:
1196 continue
1193 continue
1197 fullp = self._subpath(fn)
1194 fullp = self._subpath(fn)
1198 # visitchildrenset isn't perfect, we still need to call the regular
1195 # visitchildrenset isn't perfect, we still need to call the regular
1199 # matcher code to further filter results.
1196 # matcher code to further filter results.
1200 if not match(fullp):
1197 if not match(fullp):
1201 continue
1198 continue
1202 ret._files[fn] = self._files[fn]
1199 ret._files[fn] = self._files[fn]
1203 if fn in self._flags:
1200 if fn in self._flags:
1204 ret._flags[fn] = self._flags[fn]
1201 ret._flags[fn] = self._flags[fn]
1205
1202
1206 visit = self._loadchildrensetlazy(visit)
1203 visit = self._loadchildrensetlazy(visit)
1207 for dir, subm in pycompat.iteritems(self._dirs):
1204 for dir, subm in pycompat.iteritems(self._dirs):
1208 if visit and dir[:-1] not in visit:
1205 if visit and dir[:-1] not in visit:
1209 continue
1206 continue
1210 m = subm._matches(match)
1207 m = subm._matches_inner(match)
1211 if not m._isempty():
1208 if not m._isempty():
1212 ret._dirs[dir] = m
1209 ret._dirs[dir] = m
1213
1210
1214 if not ret._isempty():
1211 if not ret._isempty():
1215 ret._dirty = True
1212 ret._dirty = True
1216 return ret
1213 return ret
1217
1214
1218 def diff(self, m2, match=None, clean=False):
1215 def diff(self, m2, match=None, clean=False):
1219 '''Finds changes between the current manifest and m2.
1216 '''Finds changes between the current manifest and m2.
1220
1217
1221 Args:
1218 Args:
1222 m2: the manifest to which this manifest should be compared.
1219 m2: the manifest to which this manifest should be compared.
1223 clean: if true, include files unchanged between these manifests
1220 clean: if true, include files unchanged between these manifests
1224 with a None value in the returned dictionary.
1221 with a None value in the returned dictionary.
1225
1222
1226 The result is returned as a dict with filename as key and
1223 The result is returned as a dict with filename as key and
1227 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
1224 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
1228 nodeid in the current/other manifest and fl1/fl2 is the flag
1225 nodeid in the current/other manifest and fl1/fl2 is the flag
1229 in the current/other manifest. Where the file does not exist,
1226 in the current/other manifest. Where the file does not exist,
1230 the nodeid will be None and the flags will be the empty
1227 the nodeid will be None and the flags will be the empty
1231 string.
1228 string.
1232 '''
1229 '''
1233 if match and not match.always():
1230 if match and not match.always():
1234 m1 = self.matches(match)
1231 m1 = self._matches(match)
1235 m2 = m2.matches(match)
1232 m2 = m2._matches(match)
1236 return m1.diff(m2, clean=clean)
1233 return m1.diff(m2, clean=clean)
1237 result = {}
1234 result = {}
1238 emptytree = treemanifest()
1235 emptytree = treemanifest()
1239
1236
1240 def _iterativediff(t1, t2, stack):
1237 def _iterativediff(t1, t2, stack):
1241 """compares two tree manifests and append new tree-manifests which
1238 """compares two tree manifests and append new tree-manifests which
1242 needs to be compared to stack"""
1239 needs to be compared to stack"""
1243 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1240 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1244 return
1241 return
1245 t1._load()
1242 t1._load()
1246 t2._load()
1243 t2._load()
1247 self._loaddifflazy(t1, t2)
1244 self._loaddifflazy(t1, t2)
1248
1245
1249 for d, m1 in pycompat.iteritems(t1._dirs):
1246 for d, m1 in pycompat.iteritems(t1._dirs):
1250 m2 = t2._dirs.get(d, emptytree)
1247 m2 = t2._dirs.get(d, emptytree)
1251 stack.append((m1, m2))
1248 stack.append((m1, m2))
1252
1249
1253 for d, m2 in pycompat.iteritems(t2._dirs):
1250 for d, m2 in pycompat.iteritems(t2._dirs):
1254 if d not in t1._dirs:
1251 if d not in t1._dirs:
1255 stack.append((emptytree, m2))
1252 stack.append((emptytree, m2))
1256
1253
1257 for fn, n1 in pycompat.iteritems(t1._files):
1254 for fn, n1 in pycompat.iteritems(t1._files):
1258 fl1 = t1._flags.get(fn, b'')
1255 fl1 = t1._flags.get(fn, b'')
1259 n2 = t2._files.get(fn, None)
1256 n2 = t2._files.get(fn, None)
1260 fl2 = t2._flags.get(fn, b'')
1257 fl2 = t2._flags.get(fn, b'')
1261 if n1 != n2 or fl1 != fl2:
1258 if n1 != n2 or fl1 != fl2:
1262 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
1259 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
1263 elif clean:
1260 elif clean:
1264 result[t1._subpath(fn)] = None
1261 result[t1._subpath(fn)] = None
1265
1262
1266 for fn, n2 in pycompat.iteritems(t2._files):
1263 for fn, n2 in pycompat.iteritems(t2._files):
1267 if fn not in t1._files:
1264 if fn not in t1._files:
1268 fl2 = t2._flags.get(fn, b'')
1265 fl2 = t2._flags.get(fn, b'')
1269 result[t2._subpath(fn)] = ((None, b''), (n2, fl2))
1266 result[t2._subpath(fn)] = ((None, b''), (n2, fl2))
1270
1267
1271 stackls = []
1268 stackls = []
1272 _iterativediff(self, m2, stackls)
1269 _iterativediff(self, m2, stackls)
1273 while stackls:
1270 while stackls:
1274 t1, t2 = stackls.pop()
1271 t1, t2 = stackls.pop()
1275 # stackls is populated in the function call
1272 # stackls is populated in the function call
1276 _iterativediff(t1, t2, stackls)
1273 _iterativediff(t1, t2, stackls)
1277 return result
1274 return result
1278
1275
1279 def unmodifiedsince(self, m2):
1276 def unmodifiedsince(self, m2):
1280 return not self._dirty and not m2._dirty and self._node == m2._node
1277 return not self._dirty and not m2._dirty and self._node == m2._node
1281
1278
1282 def parse(self, text, readsubtree):
1279 def parse(self, text, readsubtree):
1283 selflazy = self._lazydirs
1280 selflazy = self._lazydirs
1284 subpath = self._subpath
1281 subpath = self._subpath
1285 for f, n, fl in _parse(text):
1282 for f, n, fl in _parse(text):
1286 if fl == b't':
1283 if fl == b't':
1287 f = f + b'/'
1284 f = f + b'/'
1288 # False below means "doesn't need to be copied" and can use the
1285 # False below means "doesn't need to be copied" and can use the
1289 # cached value from readsubtree directly.
1286 # cached value from readsubtree directly.
1290 selflazy[f] = (subpath(f), n, readsubtree, False)
1287 selflazy[f] = (subpath(f), n, readsubtree, False)
1291 elif b'/' in f:
1288 elif b'/' in f:
1292 # This is a flat manifest, so use __setitem__ and setflag rather
1289 # This is a flat manifest, so use __setitem__ and setflag rather
1293 # than assigning directly to _files and _flags, so we can
1290 # than assigning directly to _files and _flags, so we can
1294 # assign a path in a subdirectory, and to mark dirty (compared
1291 # assign a path in a subdirectory, and to mark dirty (compared
1295 # to nullid).
1292 # to nullid).
1296 self[f] = n
1293 self[f] = n
1297 if fl:
1294 if fl:
1298 self.setflag(f, fl)
1295 self.setflag(f, fl)
1299 else:
1296 else:
1300 # Assigning to _files and _flags avoids marking as dirty,
1297 # Assigning to _files and _flags avoids marking as dirty,
1301 # and should be a little faster.
1298 # and should be a little faster.
1302 self._files[f] = n
1299 self._files[f] = n
1303 if fl:
1300 if fl:
1304 self._flags[f] = fl
1301 self._flags[f] = fl
1305
1302
1306 def text(self):
1303 def text(self):
1307 """Get the full data of this manifest as a bytestring."""
1304 """Get the full data of this manifest as a bytestring."""
1308 self._load()
1305 self._load()
1309 return _text(self.iterentries())
1306 return _text(self.iterentries())
1310
1307
1311 def dirtext(self):
1308 def dirtext(self):
1312 """Get the full data of this directory as a bytestring. Make sure that
1309 """Get the full data of this directory as a bytestring. Make sure that
1313 any submanifests have been written first, so their nodeids are correct.
1310 any submanifests have been written first, so their nodeids are correct.
1314 """
1311 """
1315 self._load()
1312 self._load()
1316 flags = self.flags
1313 flags = self.flags
1317 lazydirs = [
1314 lazydirs = [
1318 (d[:-1], v[1], b't') for d, v in pycompat.iteritems(self._lazydirs)
1315 (d[:-1], v[1], b't') for d, v in pycompat.iteritems(self._lazydirs)
1319 ]
1316 ]
1320 dirs = [(d[:-1], self._dirs[d]._node, b't') for d in self._dirs]
1317 dirs = [(d[:-1], self._dirs[d]._node, b't') for d in self._dirs]
1321 files = [(f, self._files[f], flags(f)) for f in self._files]
1318 files = [(f, self._files[f], flags(f)) for f in self._files]
1322 return _text(sorted(dirs + files + lazydirs))
1319 return _text(sorted(dirs + files + lazydirs))
1323
1320
1324 def read(self, gettext, readsubtree):
1321 def read(self, gettext, readsubtree):
1325 def _load_for_read(s):
1322 def _load_for_read(s):
1326 s.parse(gettext(), readsubtree)
1323 s.parse(gettext(), readsubtree)
1327 s._dirty = False
1324 s._dirty = False
1328
1325
1329 self._loadfunc = _load_for_read
1326 self._loadfunc = _load_for_read
1330
1327
1331 def writesubtrees(self, m1, m2, writesubtree, match):
1328 def writesubtrees(self, m1, m2, writesubtree, match):
1332 self._load() # for consistency; should never have any effect here
1329 self._load() # for consistency; should never have any effect here
1333 m1._load()
1330 m1._load()
1334 m2._load()
1331 m2._load()
1335 emptytree = treemanifest()
1332 emptytree = treemanifest()
1336
1333
1337 def getnode(m, d):
1334 def getnode(m, d):
1338 ld = m._lazydirs.get(d)
1335 ld = m._lazydirs.get(d)
1339 if ld:
1336 if ld:
1340 return ld[1]
1337 return ld[1]
1341 return m._dirs.get(d, emptytree)._node
1338 return m._dirs.get(d, emptytree)._node
1342
1339
1343 # let's skip investigating things that `match` says we do not need.
1340 # let's skip investigating things that `match` says we do not need.
1344 visit = match.visitchildrenset(self._dir[:-1])
1341 visit = match.visitchildrenset(self._dir[:-1])
1345 visit = self._loadchildrensetlazy(visit)
1342 visit = self._loadchildrensetlazy(visit)
1346 if visit == b'this' or visit == b'all':
1343 if visit == b'this' or visit == b'all':
1347 visit = None
1344 visit = None
1348 for d, subm in pycompat.iteritems(self._dirs):
1345 for d, subm in pycompat.iteritems(self._dirs):
1349 if visit and d[:-1] not in visit:
1346 if visit and d[:-1] not in visit:
1350 continue
1347 continue
1351 subp1 = getnode(m1, d)
1348 subp1 = getnode(m1, d)
1352 subp2 = getnode(m2, d)
1349 subp2 = getnode(m2, d)
1353 if subp1 == nullid:
1350 if subp1 == nullid:
1354 subp1, subp2 = subp2, subp1
1351 subp1, subp2 = subp2, subp1
1355 writesubtree(subm, subp1, subp2, match)
1352 writesubtree(subm, subp1, subp2, match)
1356
1353
1357 def walksubtrees(self, matcher=None):
1354 def walksubtrees(self, matcher=None):
1358 """Returns an iterator of the subtrees of this manifest, including this
1355 """Returns an iterator of the subtrees of this manifest, including this
1359 manifest itself.
1356 manifest itself.
1360
1357
1361 If `matcher` is provided, it only returns subtrees that match.
1358 If `matcher` is provided, it only returns subtrees that match.
1362 """
1359 """
1363 if matcher and not matcher.visitdir(self._dir[:-1]):
1360 if matcher and not matcher.visitdir(self._dir[:-1]):
1364 return
1361 return
1365 if not matcher or matcher(self._dir[:-1]):
1362 if not matcher or matcher(self._dir[:-1]):
1366 yield self
1363 yield self
1367
1364
1368 self._load()
1365 self._load()
1369 # OPT: use visitchildrenset to avoid loading everything.
1366 # OPT: use visitchildrenset to avoid loading everything.
1370 self._loadalllazy()
1367 self._loadalllazy()
1371 for d, subm in pycompat.iteritems(self._dirs):
1368 for d, subm in pycompat.iteritems(self._dirs):
1372 for subtree in subm.walksubtrees(matcher=matcher):
1369 for subtree in subm.walksubtrees(matcher=matcher):
1373 yield subtree
1370 yield subtree
1374
1371
1375
1372
1376 class manifestfulltextcache(util.lrucachedict):
1373 class manifestfulltextcache(util.lrucachedict):
1377 """File-backed LRU cache for the manifest cache
1374 """File-backed LRU cache for the manifest cache
1378
1375
1379 File consists of entries, up to EOF:
1376 File consists of entries, up to EOF:
1380
1377
1381 - 20 bytes node, 4 bytes length, <length> manifest data
1378 - 20 bytes node, 4 bytes length, <length> manifest data
1382
1379
1383 These are written in reverse cache order (oldest to newest).
1380 These are written in reverse cache order (oldest to newest).
1384
1381
1385 """
1382 """
1386
1383
1387 _file = b'manifestfulltextcache'
1384 _file = b'manifestfulltextcache'
1388
1385
1389 def __init__(self, max):
1386 def __init__(self, max):
1390 super(manifestfulltextcache, self).__init__(max)
1387 super(manifestfulltextcache, self).__init__(max)
1391 self._dirty = False
1388 self._dirty = False
1392 self._read = False
1389 self._read = False
1393 self._opener = None
1390 self._opener = None
1394
1391
1395 def read(self):
1392 def read(self):
1396 if self._read or self._opener is None:
1393 if self._read or self._opener is None:
1397 return
1394 return
1398
1395
1399 try:
1396 try:
1400 with self._opener(self._file) as fp:
1397 with self._opener(self._file) as fp:
1401 set = super(manifestfulltextcache, self).__setitem__
1398 set = super(manifestfulltextcache, self).__setitem__
1402 # ignore trailing data, this is a cache, corruption is skipped
1399 # ignore trailing data, this is a cache, corruption is skipped
1403 while True:
1400 while True:
1404 node = fp.read(20)
1401 node = fp.read(20)
1405 if len(node) < 20:
1402 if len(node) < 20:
1406 break
1403 break
1407 try:
1404 try:
1408 size = struct.unpack(b'>L', fp.read(4))[0]
1405 size = struct.unpack(b'>L', fp.read(4))[0]
1409 except struct.error:
1406 except struct.error:
1410 break
1407 break
1411 value = bytearray(fp.read(size))
1408 value = bytearray(fp.read(size))
1412 if len(value) != size:
1409 if len(value) != size:
1413 break
1410 break
1414 set(node, value)
1411 set(node, value)
1415 except IOError:
1412 except IOError:
1416 # the file is allowed to be missing
1413 # the file is allowed to be missing
1417 pass
1414 pass
1418
1415
1419 self._read = True
1416 self._read = True
1420 self._dirty = False
1417 self._dirty = False
1421
1418
1422 def write(self):
1419 def write(self):
1423 if not self._dirty or self._opener is None:
1420 if not self._dirty or self._opener is None:
1424 return
1421 return
1425 # rotate backwards to the first used node
1422 # rotate backwards to the first used node
1426 with self._opener(
1423 with self._opener(
1427 self._file, b'w', atomictemp=True, checkambig=True
1424 self._file, b'w', atomictemp=True, checkambig=True
1428 ) as fp:
1425 ) as fp:
1429 node = self._head.prev
1426 node = self._head.prev
1430 while True:
1427 while True:
1431 if node.key in self._cache:
1428 if node.key in self._cache:
1432 fp.write(node.key)
1429 fp.write(node.key)
1433 fp.write(struct.pack(b'>L', len(node.value)))
1430 fp.write(struct.pack(b'>L', len(node.value)))
1434 fp.write(node.value)
1431 fp.write(node.value)
1435 if node is self._head:
1432 if node is self._head:
1436 break
1433 break
1437 node = node.prev
1434 node = node.prev
1438
1435
1439 def __len__(self):
1436 def __len__(self):
1440 if not self._read:
1437 if not self._read:
1441 self.read()
1438 self.read()
1442 return super(manifestfulltextcache, self).__len__()
1439 return super(manifestfulltextcache, self).__len__()
1443
1440
1444 def __contains__(self, k):
1441 def __contains__(self, k):
1445 if not self._read:
1442 if not self._read:
1446 self.read()
1443 self.read()
1447 return super(manifestfulltextcache, self).__contains__(k)
1444 return super(manifestfulltextcache, self).__contains__(k)
1448
1445
1449 def __iter__(self):
1446 def __iter__(self):
1450 if not self._read:
1447 if not self._read:
1451 self.read()
1448 self.read()
1452 return super(manifestfulltextcache, self).__iter__()
1449 return super(manifestfulltextcache, self).__iter__()
1453
1450
1454 def __getitem__(self, k):
1451 def __getitem__(self, k):
1455 if not self._read:
1452 if not self._read:
1456 self.read()
1453 self.read()
1457 # the cache lru order can change on read
1454 # the cache lru order can change on read
1458 setdirty = self._cache.get(k) is not self._head
1455 setdirty = self._cache.get(k) is not self._head
1459 value = super(manifestfulltextcache, self).__getitem__(k)
1456 value = super(manifestfulltextcache, self).__getitem__(k)
1460 if setdirty:
1457 if setdirty:
1461 self._dirty = True
1458 self._dirty = True
1462 return value
1459 return value
1463
1460
1464 def __setitem__(self, k, v):
1461 def __setitem__(self, k, v):
1465 if not self._read:
1462 if not self._read:
1466 self.read()
1463 self.read()
1467 super(manifestfulltextcache, self).__setitem__(k, v)
1464 super(manifestfulltextcache, self).__setitem__(k, v)
1468 self._dirty = True
1465 self._dirty = True
1469
1466
1470 def __delitem__(self, k):
1467 def __delitem__(self, k):
1471 if not self._read:
1468 if not self._read:
1472 self.read()
1469 self.read()
1473 super(manifestfulltextcache, self).__delitem__(k)
1470 super(manifestfulltextcache, self).__delitem__(k)
1474 self._dirty = True
1471 self._dirty = True
1475
1472
1476 def get(self, k, default=None):
1473 def get(self, k, default=None):
1477 if not self._read:
1474 if not self._read:
1478 self.read()
1475 self.read()
1479 return super(manifestfulltextcache, self).get(k, default=default)
1476 return super(manifestfulltextcache, self).get(k, default=default)
1480
1477
1481 def clear(self, clear_persisted_data=False):
1478 def clear(self, clear_persisted_data=False):
1482 super(manifestfulltextcache, self).clear()
1479 super(manifestfulltextcache, self).clear()
1483 if clear_persisted_data:
1480 if clear_persisted_data:
1484 self._dirty = True
1481 self._dirty = True
1485 self.write()
1482 self.write()
1486 self._read = False
1483 self._read = False
1487
1484
1488
1485
1489 # and upper bound of what we expect from compression
1486 # and upper bound of what we expect from compression
1490 # (real live value seems to be "3")
1487 # (real live value seems to be "3")
1491 MAXCOMPRESSION = 3
1488 MAXCOMPRESSION = 3
1492
1489
1493
1490
1494 @interfaceutil.implementer(repository.imanifeststorage)
1491 @interfaceutil.implementer(repository.imanifeststorage)
1495 class manifestrevlog(object):
1492 class manifestrevlog(object):
1496 '''A revlog that stores manifest texts. This is responsible for caching the
1493 '''A revlog that stores manifest texts. This is responsible for caching the
1497 full-text manifest contents.
1494 full-text manifest contents.
1498 '''
1495 '''
1499
1496
1500 def __init__(
1497 def __init__(
1501 self,
1498 self,
1502 opener,
1499 opener,
1503 tree=b'',
1500 tree=b'',
1504 dirlogcache=None,
1501 dirlogcache=None,
1505 indexfile=None,
1502 indexfile=None,
1506 treemanifest=False,
1503 treemanifest=False,
1507 ):
1504 ):
1508 """Constructs a new manifest revlog
1505 """Constructs a new manifest revlog
1509
1506
1510 `indexfile` - used by extensions to have two manifests at once, like
1507 `indexfile` - used by extensions to have two manifests at once, like
1511 when transitioning between flatmanifeset and treemanifests.
1508 when transitioning between flatmanifeset and treemanifests.
1512
1509
1513 `treemanifest` - used to indicate this is a tree manifest revlog. Opener
1510 `treemanifest` - used to indicate this is a tree manifest revlog. Opener
1514 options can also be used to make this a tree manifest revlog. The opener
1511 options can also be used to make this a tree manifest revlog. The opener
1515 option takes precedence, so if it is set to True, we ignore whatever
1512 option takes precedence, so if it is set to True, we ignore whatever
1516 value is passed in to the constructor.
1513 value is passed in to the constructor.
1517 """
1514 """
1518 # During normal operations, we expect to deal with not more than four
1515 # During normal operations, we expect to deal with not more than four
1519 # revs at a time (such as during commit --amend). When rebasing large
1516 # revs at a time (such as during commit --amend). When rebasing large
1520 # stacks of commits, the number can go up, hence the config knob below.
1517 # stacks of commits, the number can go up, hence the config knob below.
1521 cachesize = 4
1518 cachesize = 4
1522 optiontreemanifest = False
1519 optiontreemanifest = False
1523 opts = getattr(opener, 'options', None)
1520 opts = getattr(opener, 'options', None)
1524 if opts is not None:
1521 if opts is not None:
1525 cachesize = opts.get(b'manifestcachesize', cachesize)
1522 cachesize = opts.get(b'manifestcachesize', cachesize)
1526 optiontreemanifest = opts.get(b'treemanifest', False)
1523 optiontreemanifest = opts.get(b'treemanifest', False)
1527
1524
1528 self._treeondisk = optiontreemanifest or treemanifest
1525 self._treeondisk = optiontreemanifest or treemanifest
1529
1526
1530 self._fulltextcache = manifestfulltextcache(cachesize)
1527 self._fulltextcache = manifestfulltextcache(cachesize)
1531
1528
1532 if tree:
1529 if tree:
1533 assert self._treeondisk, b'opts is %r' % opts
1530 assert self._treeondisk, b'opts is %r' % opts
1534
1531
1535 if indexfile is None:
1532 if indexfile is None:
1536 indexfile = b'00manifest.i'
1533 indexfile = b'00manifest.i'
1537 if tree:
1534 if tree:
1538 indexfile = b"meta/" + tree + indexfile
1535 indexfile = b"meta/" + tree + indexfile
1539
1536
1540 self.tree = tree
1537 self.tree = tree
1541
1538
1542 # The dirlogcache is kept on the root manifest log
1539 # The dirlogcache is kept on the root manifest log
1543 if tree:
1540 if tree:
1544 self._dirlogcache = dirlogcache
1541 self._dirlogcache = dirlogcache
1545 else:
1542 else:
1546 self._dirlogcache = {b'': self}
1543 self._dirlogcache = {b'': self}
1547
1544
1548 self._revlog = revlog.revlog(
1545 self._revlog = revlog.revlog(
1549 opener,
1546 opener,
1550 indexfile,
1547 indexfile,
1551 # only root indexfile is cached
1548 # only root indexfile is cached
1552 checkambig=not bool(tree),
1549 checkambig=not bool(tree),
1553 mmaplargeindex=True,
1550 mmaplargeindex=True,
1554 upperboundcomp=MAXCOMPRESSION,
1551 upperboundcomp=MAXCOMPRESSION,
1555 )
1552 )
1556
1553
1557 self.index = self._revlog.index
1554 self.index = self._revlog.index
1558 self.version = self._revlog.version
1555 self.version = self._revlog.version
1559 self._generaldelta = self._revlog._generaldelta
1556 self._generaldelta = self._revlog._generaldelta
1560
1557
1561 def _setupmanifestcachehooks(self, repo):
1558 def _setupmanifestcachehooks(self, repo):
1562 """Persist the manifestfulltextcache on lock release"""
1559 """Persist the manifestfulltextcache on lock release"""
1563 if not util.safehasattr(repo, b'_wlockref'):
1560 if not util.safehasattr(repo, b'_wlockref'):
1564 return
1561 return
1565
1562
1566 self._fulltextcache._opener = repo.wcachevfs
1563 self._fulltextcache._opener = repo.wcachevfs
1567 if repo._currentlock(repo._wlockref) is None:
1564 if repo._currentlock(repo._wlockref) is None:
1568 return
1565 return
1569
1566
1570 reporef = weakref.ref(repo)
1567 reporef = weakref.ref(repo)
1571 manifestrevlogref = weakref.ref(self)
1568 manifestrevlogref = weakref.ref(self)
1572
1569
1573 def persistmanifestcache(success):
1570 def persistmanifestcache(success):
1574 # Repo is in an unknown state, do not persist.
1571 # Repo is in an unknown state, do not persist.
1575 if not success:
1572 if not success:
1576 return
1573 return
1577
1574
1578 repo = reporef()
1575 repo = reporef()
1579 self = manifestrevlogref()
1576 self = manifestrevlogref()
1580 if repo is None or self is None:
1577 if repo is None or self is None:
1581 return
1578 return
1582 if repo.manifestlog.getstorage(b'') is not self:
1579 if repo.manifestlog.getstorage(b'') is not self:
1583 # there's a different manifest in play now, abort
1580 # there's a different manifest in play now, abort
1584 return
1581 return
1585 self._fulltextcache.write()
1582 self._fulltextcache.write()
1586
1583
1587 repo._afterlock(persistmanifestcache)
1584 repo._afterlock(persistmanifestcache)
1588
1585
1589 @property
1586 @property
1590 def fulltextcache(self):
1587 def fulltextcache(self):
1591 return self._fulltextcache
1588 return self._fulltextcache
1592
1589
1593 def clearcaches(self, clear_persisted_data=False):
1590 def clearcaches(self, clear_persisted_data=False):
1594 self._revlog.clearcaches()
1591 self._revlog.clearcaches()
1595 self._fulltextcache.clear(clear_persisted_data=clear_persisted_data)
1592 self._fulltextcache.clear(clear_persisted_data=clear_persisted_data)
1596 self._dirlogcache = {self.tree: self}
1593 self._dirlogcache = {self.tree: self}
1597
1594
1598 def dirlog(self, d):
1595 def dirlog(self, d):
1599 if d:
1596 if d:
1600 assert self._treeondisk
1597 assert self._treeondisk
1601 if d not in self._dirlogcache:
1598 if d not in self._dirlogcache:
1602 mfrevlog = manifestrevlog(
1599 mfrevlog = manifestrevlog(
1603 self.opener, d, self._dirlogcache, treemanifest=self._treeondisk
1600 self.opener, d, self._dirlogcache, treemanifest=self._treeondisk
1604 )
1601 )
1605 self._dirlogcache[d] = mfrevlog
1602 self._dirlogcache[d] = mfrevlog
1606 return self._dirlogcache[d]
1603 return self._dirlogcache[d]
1607
1604
1608 def add(
1605 def add(
1609 self,
1606 self,
1610 m,
1607 m,
1611 transaction,
1608 transaction,
1612 link,
1609 link,
1613 p1,
1610 p1,
1614 p2,
1611 p2,
1615 added,
1612 added,
1616 removed,
1613 removed,
1617 readtree=None,
1614 readtree=None,
1618 match=None,
1615 match=None,
1619 ):
1616 ):
1620 if p1 in self.fulltextcache and util.safehasattr(m, b'fastdelta'):
1617 if p1 in self.fulltextcache and util.safehasattr(m, b'fastdelta'):
1621 # If our first parent is in the manifest cache, we can
1618 # If our first parent is in the manifest cache, we can
1622 # compute a delta here using properties we know about the
1619 # compute a delta here using properties we know about the
1623 # manifest up-front, which may save time later for the
1620 # manifest up-front, which may save time later for the
1624 # revlog layer.
1621 # revlog layer.
1625
1622
1626 _checkforbidden(added)
1623 _checkforbidden(added)
1627 # combine the changed lists into one sorted iterator
1624 # combine the changed lists into one sorted iterator
1628 work = heapq.merge(
1625 work = heapq.merge(
1629 [(x, False) for x in sorted(added)],
1626 [(x, False) for x in sorted(added)],
1630 [(x, True) for x in sorted(removed)],
1627 [(x, True) for x in sorted(removed)],
1631 )
1628 )
1632
1629
1633 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1630 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1634 cachedelta = self._revlog.rev(p1), deltatext
1631 cachedelta = self._revlog.rev(p1), deltatext
1635 text = util.buffer(arraytext)
1632 text = util.buffer(arraytext)
1636 n = self._revlog.addrevision(
1633 n = self._revlog.addrevision(
1637 text, transaction, link, p1, p2, cachedelta
1634 text, transaction, link, p1, p2, cachedelta
1638 )
1635 )
1639 else:
1636 else:
1640 # The first parent manifest isn't already loaded, so we'll
1637 # The first parent manifest isn't already loaded, so we'll
1641 # just encode a fulltext of the manifest and pass that
1638 # just encode a fulltext of the manifest and pass that
1642 # through to the revlog layer, and let it handle the delta
1639 # through to the revlog layer, and let it handle the delta
1643 # process.
1640 # process.
1644 if self._treeondisk:
1641 if self._treeondisk:
1645 assert readtree, b"readtree must be set for treemanifest writes"
1642 assert readtree, b"readtree must be set for treemanifest writes"
1646 assert match, b"match must be specified for treemanifest writes"
1643 assert match, b"match must be specified for treemanifest writes"
1647 m1 = readtree(self.tree, p1)
1644 m1 = readtree(self.tree, p1)
1648 m2 = readtree(self.tree, p2)
1645 m2 = readtree(self.tree, p2)
1649 n = self._addtree(
1646 n = self._addtree(
1650 m, transaction, link, m1, m2, readtree, match=match
1647 m, transaction, link, m1, m2, readtree, match=match
1651 )
1648 )
1652 arraytext = None
1649 arraytext = None
1653 else:
1650 else:
1654 text = m.text()
1651 text = m.text()
1655 n = self._revlog.addrevision(text, transaction, link, p1, p2)
1652 n = self._revlog.addrevision(text, transaction, link, p1, p2)
1656 arraytext = bytearray(text)
1653 arraytext = bytearray(text)
1657
1654
1658 if arraytext is not None:
1655 if arraytext is not None:
1659 self.fulltextcache[n] = arraytext
1656 self.fulltextcache[n] = arraytext
1660
1657
1661 return n
1658 return n
1662
1659
1663 def _addtree(self, m, transaction, link, m1, m2, readtree, match):
1660 def _addtree(self, m, transaction, link, m1, m2, readtree, match):
1664 # If the manifest is unchanged compared to one parent,
1661 # If the manifest is unchanged compared to one parent,
1665 # don't write a new revision
1662 # don't write a new revision
1666 if self.tree != b'' and (
1663 if self.tree != b'' and (
1667 m.unmodifiedsince(m1) or m.unmodifiedsince(m2)
1664 m.unmodifiedsince(m1) or m.unmodifiedsince(m2)
1668 ):
1665 ):
1669 return m.node()
1666 return m.node()
1670
1667
1671 def writesubtree(subm, subp1, subp2, match):
1668 def writesubtree(subm, subp1, subp2, match):
1672 sublog = self.dirlog(subm.dir())
1669 sublog = self.dirlog(subm.dir())
1673 sublog.add(
1670 sublog.add(
1674 subm,
1671 subm,
1675 transaction,
1672 transaction,
1676 link,
1673 link,
1677 subp1,
1674 subp1,
1678 subp2,
1675 subp2,
1679 None,
1676 None,
1680 None,
1677 None,
1681 readtree=readtree,
1678 readtree=readtree,
1682 match=match,
1679 match=match,
1683 )
1680 )
1684
1681
1685 m.writesubtrees(m1, m2, writesubtree, match)
1682 m.writesubtrees(m1, m2, writesubtree, match)
1686 text = m.dirtext()
1683 text = m.dirtext()
1687 n = None
1684 n = None
1688 if self.tree != b'':
1685 if self.tree != b'':
1689 # Double-check whether contents are unchanged to one parent
1686 # Double-check whether contents are unchanged to one parent
1690 if text == m1.dirtext():
1687 if text == m1.dirtext():
1691 n = m1.node()
1688 n = m1.node()
1692 elif text == m2.dirtext():
1689 elif text == m2.dirtext():
1693 n = m2.node()
1690 n = m2.node()
1694
1691
1695 if not n:
1692 if not n:
1696 n = self._revlog.addrevision(
1693 n = self._revlog.addrevision(
1697 text, transaction, link, m1.node(), m2.node()
1694 text, transaction, link, m1.node(), m2.node()
1698 )
1695 )
1699
1696
1700 # Save nodeid so parent manifest can calculate its nodeid
1697 # Save nodeid so parent manifest can calculate its nodeid
1701 m.setnode(n)
1698 m.setnode(n)
1702 return n
1699 return n
1703
1700
1704 def __len__(self):
1701 def __len__(self):
1705 return len(self._revlog)
1702 return len(self._revlog)
1706
1703
1707 def __iter__(self):
1704 def __iter__(self):
1708 return self._revlog.__iter__()
1705 return self._revlog.__iter__()
1709
1706
1710 def rev(self, node):
1707 def rev(self, node):
1711 return self._revlog.rev(node)
1708 return self._revlog.rev(node)
1712
1709
1713 def node(self, rev):
1710 def node(self, rev):
1714 return self._revlog.node(rev)
1711 return self._revlog.node(rev)
1715
1712
1716 def lookup(self, value):
1713 def lookup(self, value):
1717 return self._revlog.lookup(value)
1714 return self._revlog.lookup(value)
1718
1715
1719 def parentrevs(self, rev):
1716 def parentrevs(self, rev):
1720 return self._revlog.parentrevs(rev)
1717 return self._revlog.parentrevs(rev)
1721
1718
1722 def parents(self, node):
1719 def parents(self, node):
1723 return self._revlog.parents(node)
1720 return self._revlog.parents(node)
1724
1721
1725 def linkrev(self, rev):
1722 def linkrev(self, rev):
1726 return self._revlog.linkrev(rev)
1723 return self._revlog.linkrev(rev)
1727
1724
1728 def checksize(self):
1725 def checksize(self):
1729 return self._revlog.checksize()
1726 return self._revlog.checksize()
1730
1727
1731 def revision(self, node, _df=None, raw=False):
1728 def revision(self, node, _df=None, raw=False):
1732 return self._revlog.revision(node, _df=_df, raw=raw)
1729 return self._revlog.revision(node, _df=_df, raw=raw)
1733
1730
1734 def rawdata(self, node, _df=None):
1731 def rawdata(self, node, _df=None):
1735 return self._revlog.rawdata(node, _df=_df)
1732 return self._revlog.rawdata(node, _df=_df)
1736
1733
1737 def revdiff(self, rev1, rev2):
1734 def revdiff(self, rev1, rev2):
1738 return self._revlog.revdiff(rev1, rev2)
1735 return self._revlog.revdiff(rev1, rev2)
1739
1736
1740 def cmp(self, node, text):
1737 def cmp(self, node, text):
1741 return self._revlog.cmp(node, text)
1738 return self._revlog.cmp(node, text)
1742
1739
1743 def deltaparent(self, rev):
1740 def deltaparent(self, rev):
1744 return self._revlog.deltaparent(rev)
1741 return self._revlog.deltaparent(rev)
1745
1742
1746 def emitrevisions(
1743 def emitrevisions(
1747 self,
1744 self,
1748 nodes,
1745 nodes,
1749 nodesorder=None,
1746 nodesorder=None,
1750 revisiondata=False,
1747 revisiondata=False,
1751 assumehaveparentrevisions=False,
1748 assumehaveparentrevisions=False,
1752 deltamode=repository.CG_DELTAMODE_STD,
1749 deltamode=repository.CG_DELTAMODE_STD,
1753 ):
1750 ):
1754 return self._revlog.emitrevisions(
1751 return self._revlog.emitrevisions(
1755 nodes,
1752 nodes,
1756 nodesorder=nodesorder,
1753 nodesorder=nodesorder,
1757 revisiondata=revisiondata,
1754 revisiondata=revisiondata,
1758 assumehaveparentrevisions=assumehaveparentrevisions,
1755 assumehaveparentrevisions=assumehaveparentrevisions,
1759 deltamode=deltamode,
1756 deltamode=deltamode,
1760 )
1757 )
1761
1758
1762 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
1759 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
1763 return self._revlog.addgroup(
1760 return self._revlog.addgroup(
1764 deltas, linkmapper, transaction, addrevisioncb=addrevisioncb
1761 deltas, linkmapper, transaction, addrevisioncb=addrevisioncb
1765 )
1762 )
1766
1763
1767 def rawsize(self, rev):
1764 def rawsize(self, rev):
1768 return self._revlog.rawsize(rev)
1765 return self._revlog.rawsize(rev)
1769
1766
1770 def getstrippoint(self, minlink):
1767 def getstrippoint(self, minlink):
1771 return self._revlog.getstrippoint(minlink)
1768 return self._revlog.getstrippoint(minlink)
1772
1769
1773 def strip(self, minlink, transaction):
1770 def strip(self, minlink, transaction):
1774 return self._revlog.strip(minlink, transaction)
1771 return self._revlog.strip(minlink, transaction)
1775
1772
1776 def files(self):
1773 def files(self):
1777 return self._revlog.files()
1774 return self._revlog.files()
1778
1775
1779 def clone(self, tr, destrevlog, **kwargs):
1776 def clone(self, tr, destrevlog, **kwargs):
1780 if not isinstance(destrevlog, manifestrevlog):
1777 if not isinstance(destrevlog, manifestrevlog):
1781 raise error.ProgrammingError(b'expected manifestrevlog to clone()')
1778 raise error.ProgrammingError(b'expected manifestrevlog to clone()')
1782
1779
1783 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
1780 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
1784
1781
1785 def storageinfo(
1782 def storageinfo(
1786 self,
1783 self,
1787 exclusivefiles=False,
1784 exclusivefiles=False,
1788 sharedfiles=False,
1785 sharedfiles=False,
1789 revisionscount=False,
1786 revisionscount=False,
1790 trackedsize=False,
1787 trackedsize=False,
1791 storedsize=False,
1788 storedsize=False,
1792 ):
1789 ):
1793 return self._revlog.storageinfo(
1790 return self._revlog.storageinfo(
1794 exclusivefiles=exclusivefiles,
1791 exclusivefiles=exclusivefiles,
1795 sharedfiles=sharedfiles,
1792 sharedfiles=sharedfiles,
1796 revisionscount=revisionscount,
1793 revisionscount=revisionscount,
1797 trackedsize=trackedsize,
1794 trackedsize=trackedsize,
1798 storedsize=storedsize,
1795 storedsize=storedsize,
1799 )
1796 )
1800
1797
1801 @property
1798 @property
1802 def indexfile(self):
1799 def indexfile(self):
1803 return self._revlog.indexfile
1800 return self._revlog.indexfile
1804
1801
1805 @indexfile.setter
1802 @indexfile.setter
1806 def indexfile(self, value):
1803 def indexfile(self, value):
1807 self._revlog.indexfile = value
1804 self._revlog.indexfile = value
1808
1805
1809 @property
1806 @property
1810 def opener(self):
1807 def opener(self):
1811 return self._revlog.opener
1808 return self._revlog.opener
1812
1809
1813 @opener.setter
1810 @opener.setter
1814 def opener(self, value):
1811 def opener(self, value):
1815 self._revlog.opener = value
1812 self._revlog.opener = value
1816
1813
1817
1814
1818 @interfaceutil.implementer(repository.imanifestlog)
1815 @interfaceutil.implementer(repository.imanifestlog)
1819 class manifestlog(object):
1816 class manifestlog(object):
1820 """A collection class representing the collection of manifest snapshots
1817 """A collection class representing the collection of manifest snapshots
1821 referenced by commits in the repository.
1818 referenced by commits in the repository.
1822
1819
1823 In this situation, 'manifest' refers to the abstract concept of a snapshot
1820 In this situation, 'manifest' refers to the abstract concept of a snapshot
1824 of the list of files in the given commit. Consumers of the output of this
1821 of the list of files in the given commit. Consumers of the output of this
1825 class do not care about the implementation details of the actual manifests
1822 class do not care about the implementation details of the actual manifests
1826 they receive (i.e. tree or flat or lazily loaded, etc)."""
1823 they receive (i.e. tree or flat or lazily loaded, etc)."""
1827
1824
1828 def __init__(self, opener, repo, rootstore, narrowmatch):
1825 def __init__(self, opener, repo, rootstore, narrowmatch):
1829 usetreemanifest = False
1826 usetreemanifest = False
1830 cachesize = 4
1827 cachesize = 4
1831
1828
1832 opts = getattr(opener, 'options', None)
1829 opts = getattr(opener, 'options', None)
1833 if opts is not None:
1830 if opts is not None:
1834 usetreemanifest = opts.get(b'treemanifest', usetreemanifest)
1831 usetreemanifest = opts.get(b'treemanifest', usetreemanifest)
1835 cachesize = opts.get(b'manifestcachesize', cachesize)
1832 cachesize = opts.get(b'manifestcachesize', cachesize)
1836
1833
1837 self._treemanifests = usetreemanifest
1834 self._treemanifests = usetreemanifest
1838
1835
1839 self._rootstore = rootstore
1836 self._rootstore = rootstore
1840 self._rootstore._setupmanifestcachehooks(repo)
1837 self._rootstore._setupmanifestcachehooks(repo)
1841 self._narrowmatch = narrowmatch
1838 self._narrowmatch = narrowmatch
1842
1839
1843 # A cache of the manifestctx or treemanifestctx for each directory
1840 # A cache of the manifestctx or treemanifestctx for each directory
1844 self._dirmancache = {}
1841 self._dirmancache = {}
1845 self._dirmancache[b''] = util.lrucachedict(cachesize)
1842 self._dirmancache[b''] = util.lrucachedict(cachesize)
1846
1843
1847 self._cachesize = cachesize
1844 self._cachesize = cachesize
1848
1845
1849 def __getitem__(self, node):
1846 def __getitem__(self, node):
1850 """Retrieves the manifest instance for the given node. Throws a
1847 """Retrieves the manifest instance for the given node. Throws a
1851 LookupError if not found.
1848 LookupError if not found.
1852 """
1849 """
1853 return self.get(b'', node)
1850 return self.get(b'', node)
1854
1851
1855 def get(self, tree, node, verify=True):
1852 def get(self, tree, node, verify=True):
1856 """Retrieves the manifest instance for the given node. Throws a
1853 """Retrieves the manifest instance for the given node. Throws a
1857 LookupError if not found.
1854 LookupError if not found.
1858
1855
1859 `verify` - if True an exception will be thrown if the node is not in
1856 `verify` - if True an exception will be thrown if the node is not in
1860 the revlog
1857 the revlog
1861 """
1858 """
1862 if node in self._dirmancache.get(tree, ()):
1859 if node in self._dirmancache.get(tree, ()):
1863 return self._dirmancache[tree][node]
1860 return self._dirmancache[tree][node]
1864
1861
1865 if not self._narrowmatch.always():
1862 if not self._narrowmatch.always():
1866 if not self._narrowmatch.visitdir(tree[:-1]):
1863 if not self._narrowmatch.visitdir(tree[:-1]):
1867 return excludeddirmanifestctx(tree, node)
1864 return excludeddirmanifestctx(tree, node)
1868 if tree:
1865 if tree:
1869 if self._rootstore._treeondisk:
1866 if self._rootstore._treeondisk:
1870 if verify:
1867 if verify:
1871 # Side-effect is LookupError is raised if node doesn't
1868 # Side-effect is LookupError is raised if node doesn't
1872 # exist.
1869 # exist.
1873 self.getstorage(tree).rev(node)
1870 self.getstorage(tree).rev(node)
1874
1871
1875 m = treemanifestctx(self, tree, node)
1872 m = treemanifestctx(self, tree, node)
1876 else:
1873 else:
1877 raise error.Abort(
1874 raise error.Abort(
1878 _(
1875 _(
1879 b"cannot ask for manifest directory '%s' in a flat "
1876 b"cannot ask for manifest directory '%s' in a flat "
1880 b"manifest"
1877 b"manifest"
1881 )
1878 )
1882 % tree
1879 % tree
1883 )
1880 )
1884 else:
1881 else:
1885 if verify:
1882 if verify:
1886 # Side-effect is LookupError is raised if node doesn't exist.
1883 # Side-effect is LookupError is raised if node doesn't exist.
1887 self._rootstore.rev(node)
1884 self._rootstore.rev(node)
1888
1885
1889 if self._treemanifests:
1886 if self._treemanifests:
1890 m = treemanifestctx(self, b'', node)
1887 m = treemanifestctx(self, b'', node)
1891 else:
1888 else:
1892 m = manifestctx(self, node)
1889 m = manifestctx(self, node)
1893
1890
1894 if node != nullid:
1891 if node != nullid:
1895 mancache = self._dirmancache.get(tree)
1892 mancache = self._dirmancache.get(tree)
1896 if not mancache:
1893 if not mancache:
1897 mancache = util.lrucachedict(self._cachesize)
1894 mancache = util.lrucachedict(self._cachesize)
1898 self._dirmancache[tree] = mancache
1895 self._dirmancache[tree] = mancache
1899 mancache[node] = m
1896 mancache[node] = m
1900 return m
1897 return m
1901
1898
1902 def getstorage(self, tree):
1899 def getstorage(self, tree):
1903 return self._rootstore.dirlog(tree)
1900 return self._rootstore.dirlog(tree)
1904
1901
1905 def clearcaches(self, clear_persisted_data=False):
1902 def clearcaches(self, clear_persisted_data=False):
1906 self._dirmancache.clear()
1903 self._dirmancache.clear()
1907 self._rootstore.clearcaches(clear_persisted_data=clear_persisted_data)
1904 self._rootstore.clearcaches(clear_persisted_data=clear_persisted_data)
1908
1905
1909 def rev(self, node):
1906 def rev(self, node):
1910 return self._rootstore.rev(node)
1907 return self._rootstore.rev(node)
1911
1908
1912
1909
1913 @interfaceutil.implementer(repository.imanifestrevisionwritable)
1910 @interfaceutil.implementer(repository.imanifestrevisionwritable)
1914 class memmanifestctx(object):
1911 class memmanifestctx(object):
1915 def __init__(self, manifestlog):
1912 def __init__(self, manifestlog):
1916 self._manifestlog = manifestlog
1913 self._manifestlog = manifestlog
1917 self._manifestdict = manifestdict()
1914 self._manifestdict = manifestdict()
1918
1915
1919 def _storage(self):
1916 def _storage(self):
1920 return self._manifestlog.getstorage(b'')
1917 return self._manifestlog.getstorage(b'')
1921
1918
1922 def copy(self):
1919 def copy(self):
1923 memmf = memmanifestctx(self._manifestlog)
1920 memmf = memmanifestctx(self._manifestlog)
1924 memmf._manifestdict = self.read().copy()
1921 memmf._manifestdict = self.read().copy()
1925 return memmf
1922 return memmf
1926
1923
1927 def read(self):
1924 def read(self):
1928 return self._manifestdict
1925 return self._manifestdict
1929
1926
1930 def write(self, transaction, link, p1, p2, added, removed, match=None):
1927 def write(self, transaction, link, p1, p2, added, removed, match=None):
1931 return self._storage().add(
1928 return self._storage().add(
1932 self._manifestdict,
1929 self._manifestdict,
1933 transaction,
1930 transaction,
1934 link,
1931 link,
1935 p1,
1932 p1,
1936 p2,
1933 p2,
1937 added,
1934 added,
1938 removed,
1935 removed,
1939 match=match,
1936 match=match,
1940 )
1937 )
1941
1938
1942
1939
1943 @interfaceutil.implementer(repository.imanifestrevisionstored)
1940 @interfaceutil.implementer(repository.imanifestrevisionstored)
1944 class manifestctx(object):
1941 class manifestctx(object):
1945 """A class representing a single revision of a manifest, including its
1942 """A class representing a single revision of a manifest, including its
1946 contents, its parent revs, and its linkrev.
1943 contents, its parent revs, and its linkrev.
1947 """
1944 """
1948
1945
1949 def __init__(self, manifestlog, node):
1946 def __init__(self, manifestlog, node):
1950 self._manifestlog = manifestlog
1947 self._manifestlog = manifestlog
1951 self._data = None
1948 self._data = None
1952
1949
1953 self._node = node
1950 self._node = node
1954
1951
1955 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
1952 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
1956 # but let's add it later when something needs it and we can load it
1953 # but let's add it later when something needs it and we can load it
1957 # lazily.
1954 # lazily.
1958 # self.p1, self.p2 = store.parents(node)
1955 # self.p1, self.p2 = store.parents(node)
1959 # rev = store.rev(node)
1956 # rev = store.rev(node)
1960 # self.linkrev = store.linkrev(rev)
1957 # self.linkrev = store.linkrev(rev)
1961
1958
1962 def _storage(self):
1959 def _storage(self):
1963 return self._manifestlog.getstorage(b'')
1960 return self._manifestlog.getstorage(b'')
1964
1961
1965 def node(self):
1962 def node(self):
1966 return self._node
1963 return self._node
1967
1964
1968 def copy(self):
1965 def copy(self):
1969 memmf = memmanifestctx(self._manifestlog)
1966 memmf = memmanifestctx(self._manifestlog)
1970 memmf._manifestdict = self.read().copy()
1967 memmf._manifestdict = self.read().copy()
1971 return memmf
1968 return memmf
1972
1969
1973 @propertycache
1970 @propertycache
1974 def parents(self):
1971 def parents(self):
1975 return self._storage().parents(self._node)
1972 return self._storage().parents(self._node)
1976
1973
1977 def read(self):
1974 def read(self):
1978 if self._data is None:
1975 if self._data is None:
1979 if self._node == nullid:
1976 if self._node == nullid:
1980 self._data = manifestdict()
1977 self._data = manifestdict()
1981 else:
1978 else:
1982 store = self._storage()
1979 store = self._storage()
1983 if self._node in store.fulltextcache:
1980 if self._node in store.fulltextcache:
1984 text = pycompat.bytestr(store.fulltextcache[self._node])
1981 text = pycompat.bytestr(store.fulltextcache[self._node])
1985 else:
1982 else:
1986 text = store.revision(self._node)
1983 text = store.revision(self._node)
1987 arraytext = bytearray(text)
1984 arraytext = bytearray(text)
1988 store.fulltextcache[self._node] = arraytext
1985 store.fulltextcache[self._node] = arraytext
1989 self._data = manifestdict(text)
1986 self._data = manifestdict(text)
1990 return self._data
1987 return self._data
1991
1988
1992 def readfast(self, shallow=False):
1989 def readfast(self, shallow=False):
1993 '''Calls either readdelta or read, based on which would be less work.
1990 '''Calls either readdelta or read, based on which would be less work.
1994 readdelta is called if the delta is against the p1, and therefore can be
1991 readdelta is called if the delta is against the p1, and therefore can be
1995 read quickly.
1992 read quickly.
1996
1993
1997 If `shallow` is True, nothing changes since this is a flat manifest.
1994 If `shallow` is True, nothing changes since this is a flat manifest.
1998 '''
1995 '''
1999 store = self._storage()
1996 store = self._storage()
2000 r = store.rev(self._node)
1997 r = store.rev(self._node)
2001 deltaparent = store.deltaparent(r)
1998 deltaparent = store.deltaparent(r)
2002 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
1999 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
2003 return self.readdelta()
2000 return self.readdelta()
2004 return self.read()
2001 return self.read()
2005
2002
2006 def readdelta(self, shallow=False):
2003 def readdelta(self, shallow=False):
2007 '''Returns a manifest containing just the entries that are present
2004 '''Returns a manifest containing just the entries that are present
2008 in this manifest, but not in its p1 manifest. This is efficient to read
2005 in this manifest, but not in its p1 manifest. This is efficient to read
2009 if the revlog delta is already p1.
2006 if the revlog delta is already p1.
2010
2007
2011 Changing the value of `shallow` has no effect on flat manifests.
2008 Changing the value of `shallow` has no effect on flat manifests.
2012 '''
2009 '''
2013 store = self._storage()
2010 store = self._storage()
2014 r = store.rev(self._node)
2011 r = store.rev(self._node)
2015 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
2012 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
2016 return manifestdict(d)
2013 return manifestdict(d)
2017
2014
2018 def find(self, key):
2015 def find(self, key):
2019 return self.read().find(key)
2016 return self.read().find(key)
2020
2017
2021
2018
2022 @interfaceutil.implementer(repository.imanifestrevisionwritable)
2019 @interfaceutil.implementer(repository.imanifestrevisionwritable)
2023 class memtreemanifestctx(object):
2020 class memtreemanifestctx(object):
2024 def __init__(self, manifestlog, dir=b''):
2021 def __init__(self, manifestlog, dir=b''):
2025 self._manifestlog = manifestlog
2022 self._manifestlog = manifestlog
2026 self._dir = dir
2023 self._dir = dir
2027 self._treemanifest = treemanifest()
2024 self._treemanifest = treemanifest()
2028
2025
2029 def _storage(self):
2026 def _storage(self):
2030 return self._manifestlog.getstorage(b'')
2027 return self._manifestlog.getstorage(b'')
2031
2028
2032 def copy(self):
2029 def copy(self):
2033 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
2030 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
2034 memmf._treemanifest = self._treemanifest.copy()
2031 memmf._treemanifest = self._treemanifest.copy()
2035 return memmf
2032 return memmf
2036
2033
2037 def read(self):
2034 def read(self):
2038 return self._treemanifest
2035 return self._treemanifest
2039
2036
2040 def write(self, transaction, link, p1, p2, added, removed, match=None):
2037 def write(self, transaction, link, p1, p2, added, removed, match=None):
2041 def readtree(dir, node):
2038 def readtree(dir, node):
2042 return self._manifestlog.get(dir, node).read()
2039 return self._manifestlog.get(dir, node).read()
2043
2040
2044 return self._storage().add(
2041 return self._storage().add(
2045 self._treemanifest,
2042 self._treemanifest,
2046 transaction,
2043 transaction,
2047 link,
2044 link,
2048 p1,
2045 p1,
2049 p2,
2046 p2,
2050 added,
2047 added,
2051 removed,
2048 removed,
2052 readtree=readtree,
2049 readtree=readtree,
2053 match=match,
2050 match=match,
2054 )
2051 )
2055
2052
2056
2053
2057 @interfaceutil.implementer(repository.imanifestrevisionstored)
2054 @interfaceutil.implementer(repository.imanifestrevisionstored)
2058 class treemanifestctx(object):
2055 class treemanifestctx(object):
2059 def __init__(self, manifestlog, dir, node):
2056 def __init__(self, manifestlog, dir, node):
2060 self._manifestlog = manifestlog
2057 self._manifestlog = manifestlog
2061 self._dir = dir
2058 self._dir = dir
2062 self._data = None
2059 self._data = None
2063
2060
2064 self._node = node
2061 self._node = node
2065
2062
2066 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
2063 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
2067 # we can instantiate treemanifestctx objects for directories we don't
2064 # we can instantiate treemanifestctx objects for directories we don't
2068 # have on disk.
2065 # have on disk.
2069 # self.p1, self.p2 = store.parents(node)
2066 # self.p1, self.p2 = store.parents(node)
2070 # rev = store.rev(node)
2067 # rev = store.rev(node)
2071 # self.linkrev = store.linkrev(rev)
2068 # self.linkrev = store.linkrev(rev)
2072
2069
2073 def _storage(self):
2070 def _storage(self):
2074 narrowmatch = self._manifestlog._narrowmatch
2071 narrowmatch = self._manifestlog._narrowmatch
2075 if not narrowmatch.always():
2072 if not narrowmatch.always():
2076 if not narrowmatch.visitdir(self._dir[:-1]):
2073 if not narrowmatch.visitdir(self._dir[:-1]):
2077 return excludedmanifestrevlog(self._dir)
2074 return excludedmanifestrevlog(self._dir)
2078 return self._manifestlog.getstorage(self._dir)
2075 return self._manifestlog.getstorage(self._dir)
2079
2076
2080 def read(self):
2077 def read(self):
2081 if self._data is None:
2078 if self._data is None:
2082 store = self._storage()
2079 store = self._storage()
2083 if self._node == nullid:
2080 if self._node == nullid:
2084 self._data = treemanifest()
2081 self._data = treemanifest()
2085 # TODO accessing non-public API
2082 # TODO accessing non-public API
2086 elif store._treeondisk:
2083 elif store._treeondisk:
2087 m = treemanifest(dir=self._dir)
2084 m = treemanifest(dir=self._dir)
2088
2085
2089 def gettext():
2086 def gettext():
2090 return store.revision(self._node)
2087 return store.revision(self._node)
2091
2088
2092 def readsubtree(dir, subm):
2089 def readsubtree(dir, subm):
2093 # Set verify to False since we need to be able to create
2090 # Set verify to False since we need to be able to create
2094 # subtrees for trees that don't exist on disk.
2091 # subtrees for trees that don't exist on disk.
2095 return self._manifestlog.get(dir, subm, verify=False).read()
2092 return self._manifestlog.get(dir, subm, verify=False).read()
2096
2093
2097 m.read(gettext, readsubtree)
2094 m.read(gettext, readsubtree)
2098 m.setnode(self._node)
2095 m.setnode(self._node)
2099 self._data = m
2096 self._data = m
2100 else:
2097 else:
2101 if self._node in store.fulltextcache:
2098 if self._node in store.fulltextcache:
2102 text = pycompat.bytestr(store.fulltextcache[self._node])
2099 text = pycompat.bytestr(store.fulltextcache[self._node])
2103 else:
2100 else:
2104 text = store.revision(self._node)
2101 text = store.revision(self._node)
2105 arraytext = bytearray(text)
2102 arraytext = bytearray(text)
2106 store.fulltextcache[self._node] = arraytext
2103 store.fulltextcache[self._node] = arraytext
2107 self._data = treemanifest(dir=self._dir, text=text)
2104 self._data = treemanifest(dir=self._dir, text=text)
2108
2105
2109 return self._data
2106 return self._data
2110
2107
2111 def node(self):
2108 def node(self):
2112 return self._node
2109 return self._node
2113
2110
2114 def copy(self):
2111 def copy(self):
2115 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
2112 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
2116 memmf._treemanifest = self.read().copy()
2113 memmf._treemanifest = self.read().copy()
2117 return memmf
2114 return memmf
2118
2115
2119 @propertycache
2116 @propertycache
2120 def parents(self):
2117 def parents(self):
2121 return self._storage().parents(self._node)
2118 return self._storage().parents(self._node)
2122
2119
2123 def readdelta(self, shallow=False):
2120 def readdelta(self, shallow=False):
2124 '''Returns a manifest containing just the entries that are present
2121 '''Returns a manifest containing just the entries that are present
2125 in this manifest, but not in its p1 manifest. This is efficient to read
2122 in this manifest, but not in its p1 manifest. This is efficient to read
2126 if the revlog delta is already p1.
2123 if the revlog delta is already p1.
2127
2124
2128 If `shallow` is True, this will read the delta for this directory,
2125 If `shallow` is True, this will read the delta for this directory,
2129 without recursively reading subdirectory manifests. Instead, any
2126 without recursively reading subdirectory manifests. Instead, any
2130 subdirectory entry will be reported as it appears in the manifest, i.e.
2127 subdirectory entry will be reported as it appears in the manifest, i.e.
2131 the subdirectory will be reported among files and distinguished only by
2128 the subdirectory will be reported among files and distinguished only by
2132 its 't' flag.
2129 its 't' flag.
2133 '''
2130 '''
2134 store = self._storage()
2131 store = self._storage()
2135 if shallow:
2132 if shallow:
2136 r = store.rev(self._node)
2133 r = store.rev(self._node)
2137 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
2134 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
2138 return manifestdict(d)
2135 return manifestdict(d)
2139 else:
2136 else:
2140 # Need to perform a slow delta
2137 # Need to perform a slow delta
2141 r0 = store.deltaparent(store.rev(self._node))
2138 r0 = store.deltaparent(store.rev(self._node))
2142 m0 = self._manifestlog.get(self._dir, store.node(r0)).read()
2139 m0 = self._manifestlog.get(self._dir, store.node(r0)).read()
2143 m1 = self.read()
2140 m1 = self.read()
2144 md = treemanifest(dir=self._dir)
2141 md = treemanifest(dir=self._dir)
2145 for f, ((n0, fl0), (n1, fl1)) in pycompat.iteritems(m0.diff(m1)):
2142 for f, ((n0, fl0), (n1, fl1)) in pycompat.iteritems(m0.diff(m1)):
2146 if n1:
2143 if n1:
2147 md[f] = n1
2144 md[f] = n1
2148 if fl1:
2145 if fl1:
2149 md.setflag(f, fl1)
2146 md.setflag(f, fl1)
2150 return md
2147 return md
2151
2148
2152 def readfast(self, shallow=False):
2149 def readfast(self, shallow=False):
2153 '''Calls either readdelta or read, based on which would be less work.
2150 '''Calls either readdelta or read, based on which would be less work.
2154 readdelta is called if the delta is against the p1, and therefore can be
2151 readdelta is called if the delta is against the p1, and therefore can be
2155 read quickly.
2152 read quickly.
2156
2153
2157 If `shallow` is True, it only returns the entries from this manifest,
2154 If `shallow` is True, it only returns the entries from this manifest,
2158 and not any submanifests.
2155 and not any submanifests.
2159 '''
2156 '''
2160 store = self._storage()
2157 store = self._storage()
2161 r = store.rev(self._node)
2158 r = store.rev(self._node)
2162 deltaparent = store.deltaparent(r)
2159 deltaparent = store.deltaparent(r)
2163 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
2160 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
2164 return self.readdelta(shallow=shallow)
2161 return self.readdelta(shallow=shallow)
2165
2162
2166 if shallow:
2163 if shallow:
2167 return manifestdict(store.revision(self._node))
2164 return manifestdict(store.revision(self._node))
2168 else:
2165 else:
2169 return self.read()
2166 return self.read()
2170
2167
2171 def find(self, key):
2168 def find(self, key):
2172 return self.read().find(key)
2169 return self.read().find(key)
2173
2170
2174
2171
2175 class excludeddir(treemanifest):
2172 class excludeddir(treemanifest):
2176 """Stand-in for a directory that is excluded from the repository.
2173 """Stand-in for a directory that is excluded from the repository.
2177
2174
2178 With narrowing active on a repository that uses treemanifests,
2175 With narrowing active on a repository that uses treemanifests,
2179 some of the directory revlogs will be excluded from the resulting
2176 some of the directory revlogs will be excluded from the resulting
2180 clone. This is a huge storage win for clients, but means we need
2177 clone. This is a huge storage win for clients, but means we need
2181 some sort of pseudo-manifest to surface to internals so we can
2178 some sort of pseudo-manifest to surface to internals so we can
2182 detect a merge conflict outside the narrowspec. That's what this
2179 detect a merge conflict outside the narrowspec. That's what this
2183 class is: it stands in for a directory whose node is known, but
2180 class is: it stands in for a directory whose node is known, but
2184 whose contents are unknown.
2181 whose contents are unknown.
2185 """
2182 """
2186
2183
2187 def __init__(self, dir, node):
2184 def __init__(self, dir, node):
2188 super(excludeddir, self).__init__(dir)
2185 super(excludeddir, self).__init__(dir)
2189 self._node = node
2186 self._node = node
2190 # Add an empty file, which will be included by iterators and such,
2187 # Add an empty file, which will be included by iterators and such,
2191 # appearing as the directory itself (i.e. something like "dir/")
2188 # appearing as the directory itself (i.e. something like "dir/")
2192 self._files[b''] = node
2189 self._files[b''] = node
2193 self._flags[b''] = b't'
2190 self._flags[b''] = b't'
2194
2191
2195 # Manifests outside the narrowspec should never be modified, so avoid
2192 # Manifests outside the narrowspec should never be modified, so avoid
2196 # copying. This makes a noticeable difference when there are very many
2193 # copying. This makes a noticeable difference when there are very many
2197 # directories outside the narrowspec. Also, it makes sense for the copy to
2194 # directories outside the narrowspec. Also, it makes sense for the copy to
2198 # be of the same type as the original, which would not happen with the
2195 # be of the same type as the original, which would not happen with the
2199 # super type's copy().
2196 # super type's copy().
2200 def copy(self):
2197 def copy(self):
2201 return self
2198 return self
2202
2199
2203
2200
2204 class excludeddirmanifestctx(treemanifestctx):
2201 class excludeddirmanifestctx(treemanifestctx):
2205 """context wrapper for excludeddir - see that docstring for rationale"""
2202 """context wrapper for excludeddir - see that docstring for rationale"""
2206
2203
2207 def __init__(self, dir, node):
2204 def __init__(self, dir, node):
2208 self._dir = dir
2205 self._dir = dir
2209 self._node = node
2206 self._node = node
2210
2207
2211 def read(self):
2208 def read(self):
2212 return excludeddir(self._dir, self._node)
2209 return excludeddir(self._dir, self._node)
2213
2210
2214 def write(self, *args):
2211 def write(self, *args):
2215 raise error.ProgrammingError(
2212 raise error.ProgrammingError(
2216 b'attempt to write manifest from excluded dir %s' % self._dir
2213 b'attempt to write manifest from excluded dir %s' % self._dir
2217 )
2214 )
2218
2215
2219
2216
2220 class excludedmanifestrevlog(manifestrevlog):
2217 class excludedmanifestrevlog(manifestrevlog):
2221 """Stand-in for excluded treemanifest revlogs.
2218 """Stand-in for excluded treemanifest revlogs.
2222
2219
2223 When narrowing is active on a treemanifest repository, we'll have
2220 When narrowing is active on a treemanifest repository, we'll have
2224 references to directories we can't see due to the revlog being
2221 references to directories we can't see due to the revlog being
2225 skipped. This class exists to conform to the manifestrevlog
2222 skipped. This class exists to conform to the manifestrevlog
2226 interface for those directories and proactively prevent writes to
2223 interface for those directories and proactively prevent writes to
2227 outside the narrowspec.
2224 outside the narrowspec.
2228 """
2225 """
2229
2226
2230 def __init__(self, dir):
2227 def __init__(self, dir):
2231 self._dir = dir
2228 self._dir = dir
2232
2229
2233 def __len__(self):
2230 def __len__(self):
2234 raise error.ProgrammingError(
2231 raise error.ProgrammingError(
2235 b'attempt to get length of excluded dir %s' % self._dir
2232 b'attempt to get length of excluded dir %s' % self._dir
2236 )
2233 )
2237
2234
2238 def rev(self, node):
2235 def rev(self, node):
2239 raise error.ProgrammingError(
2236 raise error.ProgrammingError(
2240 b'attempt to get rev from excluded dir %s' % self._dir
2237 b'attempt to get rev from excluded dir %s' % self._dir
2241 )
2238 )
2242
2239
2243 def linkrev(self, node):
2240 def linkrev(self, node):
2244 raise error.ProgrammingError(
2241 raise error.ProgrammingError(
2245 b'attempt to get linkrev from excluded dir %s' % self._dir
2242 b'attempt to get linkrev from excluded dir %s' % self._dir
2246 )
2243 )
2247
2244
2248 def node(self, rev):
2245 def node(self, rev):
2249 raise error.ProgrammingError(
2246 raise error.ProgrammingError(
2250 b'attempt to get node from excluded dir %s' % self._dir
2247 b'attempt to get node from excluded dir %s' % self._dir
2251 )
2248 )
2252
2249
2253 def add(self, *args, **kwargs):
2250 def add(self, *args, **kwargs):
2254 # We should never write entries in dirlogs outside the narrow clone.
2251 # We should never write entries in dirlogs outside the narrow clone.
2255 # However, the method still gets called from writesubtree() in
2252 # However, the method still gets called from writesubtree() in
2256 # _addtree(), so we need to handle it. We should possibly make that
2253 # _addtree(), so we need to handle it. We should possibly make that
2257 # avoid calling add() with a clean manifest (_dirty is always False
2254 # avoid calling add() with a clean manifest (_dirty is always False
2258 # in excludeddir instances).
2255 # in excludeddir instances).
2259 pass
2256 pass
@@ -1,483 +1,483 b''
1 from __future__ import absolute_import
1 from __future__ import absolute_import
2
2
3 import binascii
3 import binascii
4 import itertools
4 import itertools
5 import silenttestrunner
5 import silenttestrunner
6 import unittest
6 import unittest
7 import zlib
7 import zlib
8
8
9 from mercurial import (
9 from mercurial import (
10 manifest as manifestmod,
10 manifest as manifestmod,
11 match as matchmod,
11 match as matchmod,
12 util,
12 util,
13 )
13 )
14
14
15 EMTPY_MANIFEST = b''
15 EMTPY_MANIFEST = b''
16
16
17 HASH_1 = b'1' * 40
17 HASH_1 = b'1' * 40
18 BIN_HASH_1 = binascii.unhexlify(HASH_1)
18 BIN_HASH_1 = binascii.unhexlify(HASH_1)
19 HASH_2 = b'f' * 40
19 HASH_2 = b'f' * 40
20 BIN_HASH_2 = binascii.unhexlify(HASH_2)
20 BIN_HASH_2 = binascii.unhexlify(HASH_2)
21 HASH_3 = b'1234567890abcdef0987654321deadbeef0fcafe'
21 HASH_3 = b'1234567890abcdef0987654321deadbeef0fcafe'
22 BIN_HASH_3 = binascii.unhexlify(HASH_3)
22 BIN_HASH_3 = binascii.unhexlify(HASH_3)
23 A_SHORT_MANIFEST = (
23 A_SHORT_MANIFEST = (
24 b'bar/baz/qux.py\0%(hash2)s%(flag2)s\n' b'foo\0%(hash1)s%(flag1)s\n'
24 b'bar/baz/qux.py\0%(hash2)s%(flag2)s\n' b'foo\0%(hash1)s%(flag1)s\n'
25 ) % {b'hash1': HASH_1, b'flag1': b'', b'hash2': HASH_2, b'flag2': b'l',}
25 ) % {b'hash1': HASH_1, b'flag1': b'', b'hash2': HASH_2, b'flag2': b'l',}
26
26
27 A_DEEPER_MANIFEST = (
27 A_DEEPER_MANIFEST = (
28 b'a/b/c/bar.py\0%(hash3)s%(flag1)s\n'
28 b'a/b/c/bar.py\0%(hash3)s%(flag1)s\n'
29 b'a/b/c/bar.txt\0%(hash1)s%(flag1)s\n'
29 b'a/b/c/bar.txt\0%(hash1)s%(flag1)s\n'
30 b'a/b/c/foo.py\0%(hash3)s%(flag1)s\n'
30 b'a/b/c/foo.py\0%(hash3)s%(flag1)s\n'
31 b'a/b/c/foo.txt\0%(hash2)s%(flag2)s\n'
31 b'a/b/c/foo.txt\0%(hash2)s%(flag2)s\n'
32 b'a/b/d/baz.py\0%(hash3)s%(flag1)s\n'
32 b'a/b/d/baz.py\0%(hash3)s%(flag1)s\n'
33 b'a/b/d/qux.py\0%(hash1)s%(flag2)s\n'
33 b'a/b/d/qux.py\0%(hash1)s%(flag2)s\n'
34 b'a/b/d/ten.txt\0%(hash3)s%(flag2)s\n'
34 b'a/b/d/ten.txt\0%(hash3)s%(flag2)s\n'
35 b'a/b/dog.py\0%(hash3)s%(flag1)s\n'
35 b'a/b/dog.py\0%(hash3)s%(flag1)s\n'
36 b'a/b/fish.py\0%(hash2)s%(flag1)s\n'
36 b'a/b/fish.py\0%(hash2)s%(flag1)s\n'
37 b'a/c/london.py\0%(hash3)s%(flag2)s\n'
37 b'a/c/london.py\0%(hash3)s%(flag2)s\n'
38 b'a/c/paper.txt\0%(hash2)s%(flag2)s\n'
38 b'a/c/paper.txt\0%(hash2)s%(flag2)s\n'
39 b'a/c/paris.py\0%(hash2)s%(flag1)s\n'
39 b'a/c/paris.py\0%(hash2)s%(flag1)s\n'
40 b'a/d/apple.py\0%(hash3)s%(flag1)s\n'
40 b'a/d/apple.py\0%(hash3)s%(flag1)s\n'
41 b'a/d/pizza.py\0%(hash3)s%(flag2)s\n'
41 b'a/d/pizza.py\0%(hash3)s%(flag2)s\n'
42 b'a/green.py\0%(hash1)s%(flag2)s\n'
42 b'a/green.py\0%(hash1)s%(flag2)s\n'
43 b'a/purple.py\0%(hash2)s%(flag1)s\n'
43 b'a/purple.py\0%(hash2)s%(flag1)s\n'
44 b'app.py\0%(hash3)s%(flag1)s\n'
44 b'app.py\0%(hash3)s%(flag1)s\n'
45 b'readme.txt\0%(hash2)s%(flag1)s\n'
45 b'readme.txt\0%(hash2)s%(flag1)s\n'
46 ) % {
46 ) % {
47 b'hash1': HASH_1,
47 b'hash1': HASH_1,
48 b'flag1': b'',
48 b'flag1': b'',
49 b'hash2': HASH_2,
49 b'hash2': HASH_2,
50 b'flag2': b'l',
50 b'flag2': b'l',
51 b'hash3': HASH_3,
51 b'hash3': HASH_3,
52 }
52 }
53
53
54 HUGE_MANIFEST_ENTRIES = 200001
54 HUGE_MANIFEST_ENTRIES = 200001
55
55
56 izip = getattr(itertools, 'izip', zip)
56 izip = getattr(itertools, 'izip', zip)
57 if 'xrange' not in globals():
57 if 'xrange' not in globals():
58 xrange = range
58 xrange = range
59
59
60 A_HUGE_MANIFEST = b''.join(
60 A_HUGE_MANIFEST = b''.join(
61 sorted(
61 sorted(
62 b'file%d\0%s%s\n' % (i, h, f)
62 b'file%d\0%s%s\n' % (i, h, f)
63 for i, h, f in izip(
63 for i, h, f in izip(
64 xrange(200001),
64 xrange(200001),
65 itertools.cycle((HASH_1, HASH_2)),
65 itertools.cycle((HASH_1, HASH_2)),
66 itertools.cycle((b'', b'x', b'l')),
66 itertools.cycle((b'', b'x', b'l')),
67 )
67 )
68 )
68 )
69 )
69 )
70
70
71
71
72 class basemanifesttests(object):
72 class basemanifesttests(object):
73 def parsemanifest(self, text):
73 def parsemanifest(self, text):
74 raise NotImplementedError('parsemanifest not implemented by test case')
74 raise NotImplementedError('parsemanifest not implemented by test case')
75
75
76 def testEmptyManifest(self):
76 def testEmptyManifest(self):
77 m = self.parsemanifest(EMTPY_MANIFEST)
77 m = self.parsemanifest(EMTPY_MANIFEST)
78 self.assertEqual(0, len(m))
78 self.assertEqual(0, len(m))
79 self.assertEqual([], list(m))
79 self.assertEqual([], list(m))
80
80
81 def testManifest(self):
81 def testManifest(self):
82 m = self.parsemanifest(A_SHORT_MANIFEST)
82 m = self.parsemanifest(A_SHORT_MANIFEST)
83 self.assertEqual([b'bar/baz/qux.py', b'foo'], list(m))
83 self.assertEqual([b'bar/baz/qux.py', b'foo'], list(m))
84 self.assertEqual(BIN_HASH_2, m[b'bar/baz/qux.py'])
84 self.assertEqual(BIN_HASH_2, m[b'bar/baz/qux.py'])
85 self.assertEqual(b'l', m.flags(b'bar/baz/qux.py'))
85 self.assertEqual(b'l', m.flags(b'bar/baz/qux.py'))
86 self.assertEqual(BIN_HASH_1, m[b'foo'])
86 self.assertEqual(BIN_HASH_1, m[b'foo'])
87 self.assertEqual(b'', m.flags(b'foo'))
87 self.assertEqual(b'', m.flags(b'foo'))
88 with self.assertRaises(KeyError):
88 with self.assertRaises(KeyError):
89 m[b'wat']
89 m[b'wat']
90
90
91 def testSetItem(self):
91 def testSetItem(self):
92 want = BIN_HASH_1
92 want = BIN_HASH_1
93
93
94 m = self.parsemanifest(EMTPY_MANIFEST)
94 m = self.parsemanifest(EMTPY_MANIFEST)
95 m[b'a'] = want
95 m[b'a'] = want
96 self.assertIn(b'a', m)
96 self.assertIn(b'a', m)
97 self.assertEqual(want, m[b'a'])
97 self.assertEqual(want, m[b'a'])
98 self.assertEqual(b'a\0' + HASH_1 + b'\n', m.text())
98 self.assertEqual(b'a\0' + HASH_1 + b'\n', m.text())
99
99
100 m = self.parsemanifest(A_SHORT_MANIFEST)
100 m = self.parsemanifest(A_SHORT_MANIFEST)
101 m[b'a'] = want
101 m[b'a'] = want
102 self.assertEqual(want, m[b'a'])
102 self.assertEqual(want, m[b'a'])
103 self.assertEqual(b'a\0' + HASH_1 + b'\n' + A_SHORT_MANIFEST, m.text())
103 self.assertEqual(b'a\0' + HASH_1 + b'\n' + A_SHORT_MANIFEST, m.text())
104
104
105 def testSetFlag(self):
105 def testSetFlag(self):
106 want = b'x'
106 want = b'x'
107
107
108 m = self.parsemanifest(EMTPY_MANIFEST)
108 m = self.parsemanifest(EMTPY_MANIFEST)
109 # first add a file; a file-less flag makes no sense
109 # first add a file; a file-less flag makes no sense
110 m[b'a'] = BIN_HASH_1
110 m[b'a'] = BIN_HASH_1
111 m.setflag(b'a', want)
111 m.setflag(b'a', want)
112 self.assertEqual(want, m.flags(b'a'))
112 self.assertEqual(want, m.flags(b'a'))
113 self.assertEqual(b'a\0' + HASH_1 + want + b'\n', m.text())
113 self.assertEqual(b'a\0' + HASH_1 + want + b'\n', m.text())
114
114
115 m = self.parsemanifest(A_SHORT_MANIFEST)
115 m = self.parsemanifest(A_SHORT_MANIFEST)
116 # first add a file; a file-less flag makes no sense
116 # first add a file; a file-less flag makes no sense
117 m[b'a'] = BIN_HASH_1
117 m[b'a'] = BIN_HASH_1
118 m.setflag(b'a', want)
118 m.setflag(b'a', want)
119 self.assertEqual(want, m.flags(b'a'))
119 self.assertEqual(want, m.flags(b'a'))
120 self.assertEqual(
120 self.assertEqual(
121 b'a\0' + HASH_1 + want + b'\n' + A_SHORT_MANIFEST, m.text()
121 b'a\0' + HASH_1 + want + b'\n' + A_SHORT_MANIFEST, m.text()
122 )
122 )
123
123
124 def testCopy(self):
124 def testCopy(self):
125 m = self.parsemanifest(A_SHORT_MANIFEST)
125 m = self.parsemanifest(A_SHORT_MANIFEST)
126 m[b'a'] = BIN_HASH_1
126 m[b'a'] = BIN_HASH_1
127 m2 = m.copy()
127 m2 = m.copy()
128 del m
128 del m
129 del m2 # make sure we don't double free() anything
129 del m2 # make sure we don't double free() anything
130
130
131 def testCompaction(self):
131 def testCompaction(self):
132 unhex = binascii.unhexlify
132 unhex = binascii.unhexlify
133 h1, h2 = unhex(HASH_1), unhex(HASH_2)
133 h1, h2 = unhex(HASH_1), unhex(HASH_2)
134 m = self.parsemanifest(A_SHORT_MANIFEST)
134 m = self.parsemanifest(A_SHORT_MANIFEST)
135 m[b'alpha'] = h1
135 m[b'alpha'] = h1
136 m[b'beta'] = h2
136 m[b'beta'] = h2
137 del m[b'foo']
137 del m[b'foo']
138 want = b'alpha\0%s\nbar/baz/qux.py\0%sl\nbeta\0%s\n' % (
138 want = b'alpha\0%s\nbar/baz/qux.py\0%sl\nbeta\0%s\n' % (
139 HASH_1,
139 HASH_1,
140 HASH_2,
140 HASH_2,
141 HASH_2,
141 HASH_2,
142 )
142 )
143 self.assertEqual(want, m.text())
143 self.assertEqual(want, m.text())
144 self.assertEqual(3, len(m))
144 self.assertEqual(3, len(m))
145 self.assertEqual([b'alpha', b'bar/baz/qux.py', b'beta'], list(m))
145 self.assertEqual([b'alpha', b'bar/baz/qux.py', b'beta'], list(m))
146 self.assertEqual(h1, m[b'alpha'])
146 self.assertEqual(h1, m[b'alpha'])
147 self.assertEqual(h2, m[b'bar/baz/qux.py'])
147 self.assertEqual(h2, m[b'bar/baz/qux.py'])
148 self.assertEqual(h2, m[b'beta'])
148 self.assertEqual(h2, m[b'beta'])
149 self.assertEqual(b'', m.flags(b'alpha'))
149 self.assertEqual(b'', m.flags(b'alpha'))
150 self.assertEqual(b'l', m.flags(b'bar/baz/qux.py'))
150 self.assertEqual(b'l', m.flags(b'bar/baz/qux.py'))
151 self.assertEqual(b'', m.flags(b'beta'))
151 self.assertEqual(b'', m.flags(b'beta'))
152 with self.assertRaises(KeyError):
152 with self.assertRaises(KeyError):
153 m[b'foo']
153 m[b'foo']
154
154
155 def testSetGetNodeSuffix(self):
155 def testSetGetNodeSuffix(self):
156 clean = self.parsemanifest(A_SHORT_MANIFEST)
156 clean = self.parsemanifest(A_SHORT_MANIFEST)
157 m = self.parsemanifest(A_SHORT_MANIFEST)
157 m = self.parsemanifest(A_SHORT_MANIFEST)
158 h = m[b'foo']
158 h = m[b'foo']
159 f = m.flags(b'foo')
159 f = m.flags(b'foo')
160 want = h + b'a'
160 want = h + b'a'
161 # Merge code wants to set 21-byte fake hashes at times
161 # Merge code wants to set 21-byte fake hashes at times
162 m[b'foo'] = want
162 m[b'foo'] = want
163 self.assertEqual(want, m[b'foo'])
163 self.assertEqual(want, m[b'foo'])
164 self.assertEqual(
164 self.assertEqual(
165 [(b'bar/baz/qux.py', BIN_HASH_2), (b'foo', BIN_HASH_1 + b'a')],
165 [(b'bar/baz/qux.py', BIN_HASH_2), (b'foo', BIN_HASH_1 + b'a')],
166 list(m.items()),
166 list(m.items()),
167 )
167 )
168 # Sometimes it even tries a 22-byte fake hash, but we can
168 # Sometimes it even tries a 22-byte fake hash, but we can
169 # return 21 and it'll work out
169 # return 21 and it'll work out
170 m[b'foo'] = want + b'+'
170 m[b'foo'] = want + b'+'
171 self.assertEqual(want, m[b'foo'])
171 self.assertEqual(want, m[b'foo'])
172 # make sure the suffix survives a copy
172 # make sure the suffix survives a copy
173 match = matchmod.match(util.localpath(b'/repo'), b'', [b're:foo'])
173 match = matchmod.match(util.localpath(b'/repo'), b'', [b're:foo'])
174 m2 = m.matches(match)
174 m2 = m._matches(match)
175 self.assertEqual(want, m2[b'foo'])
175 self.assertEqual(want, m2[b'foo'])
176 self.assertEqual(1, len(m2))
176 self.assertEqual(1, len(m2))
177 m2 = m.copy()
177 m2 = m.copy()
178 self.assertEqual(want, m2[b'foo'])
178 self.assertEqual(want, m2[b'foo'])
179 # suffix with iteration
179 # suffix with iteration
180 self.assertEqual(
180 self.assertEqual(
181 [(b'bar/baz/qux.py', BIN_HASH_2), (b'foo', want)], list(m.items())
181 [(b'bar/baz/qux.py', BIN_HASH_2), (b'foo', want)], list(m.items())
182 )
182 )
183
183
184 # shows up in diff
184 # shows up in diff
185 self.assertEqual({b'foo': ((want, f), (h, b''))}, m.diff(clean))
185 self.assertEqual({b'foo': ((want, f), (h, b''))}, m.diff(clean))
186 self.assertEqual({b'foo': ((h, b''), (want, f))}, clean.diff(m))
186 self.assertEqual({b'foo': ((h, b''), (want, f))}, clean.diff(m))
187
187
188 def testMatchException(self):
188 def testMatchException(self):
189 m = self.parsemanifest(A_SHORT_MANIFEST)
189 m = self.parsemanifest(A_SHORT_MANIFEST)
190 match = matchmod.match(util.localpath(b'/repo'), b'', [b're:.*'])
190 match = matchmod.match(util.localpath(b'/repo'), b'', [b're:.*'])
191
191
192 def filt(path):
192 def filt(path):
193 if path == b'foo':
193 if path == b'foo':
194 assert False
194 assert False
195 return True
195 return True
196
196
197 match.matchfn = filt
197 match.matchfn = filt
198 with self.assertRaises(AssertionError):
198 with self.assertRaises(AssertionError):
199 m.matches(match)
199 m._matches(match)
200
200
201 def testRemoveItem(self):
201 def testRemoveItem(self):
202 m = self.parsemanifest(A_SHORT_MANIFEST)
202 m = self.parsemanifest(A_SHORT_MANIFEST)
203 del m[b'foo']
203 del m[b'foo']
204 with self.assertRaises(KeyError):
204 with self.assertRaises(KeyError):
205 m[b'foo']
205 m[b'foo']
206 self.assertEqual(1, len(m))
206 self.assertEqual(1, len(m))
207 self.assertEqual(1, len(list(m)))
207 self.assertEqual(1, len(list(m)))
208 # now restore and make sure everything works right
208 # now restore and make sure everything works right
209 m[b'foo'] = b'a' * 20
209 m[b'foo'] = b'a' * 20
210 self.assertEqual(2, len(m))
210 self.assertEqual(2, len(m))
211 self.assertEqual(2, len(list(m)))
211 self.assertEqual(2, len(list(m)))
212
212
213 def testManifestDiff(self):
213 def testManifestDiff(self):
214 MISSING = (None, b'')
214 MISSING = (None, b'')
215 addl = b'z-only-in-left\0' + HASH_1 + b'\n'
215 addl = b'z-only-in-left\0' + HASH_1 + b'\n'
216 addr = b'z-only-in-right\0' + HASH_2 + b'x\n'
216 addr = b'z-only-in-right\0' + HASH_2 + b'x\n'
217 left = self.parsemanifest(
217 left = self.parsemanifest(
218 A_SHORT_MANIFEST.replace(HASH_1, HASH_3 + b'x') + addl
218 A_SHORT_MANIFEST.replace(HASH_1, HASH_3 + b'x') + addl
219 )
219 )
220 right = self.parsemanifest(A_SHORT_MANIFEST + addr)
220 right = self.parsemanifest(A_SHORT_MANIFEST + addr)
221 want = {
221 want = {
222 b'foo': ((BIN_HASH_3, b'x'), (BIN_HASH_1, b'')),
222 b'foo': ((BIN_HASH_3, b'x'), (BIN_HASH_1, b'')),
223 b'z-only-in-left': ((BIN_HASH_1, b''), MISSING),
223 b'z-only-in-left': ((BIN_HASH_1, b''), MISSING),
224 b'z-only-in-right': (MISSING, (BIN_HASH_2, b'x')),
224 b'z-only-in-right': (MISSING, (BIN_HASH_2, b'x')),
225 }
225 }
226 self.assertEqual(want, left.diff(right))
226 self.assertEqual(want, left.diff(right))
227
227
228 want = {
228 want = {
229 b'bar/baz/qux.py': (MISSING, (BIN_HASH_2, b'l')),
229 b'bar/baz/qux.py': (MISSING, (BIN_HASH_2, b'l')),
230 b'foo': (MISSING, (BIN_HASH_3, b'x')),
230 b'foo': (MISSING, (BIN_HASH_3, b'x')),
231 b'z-only-in-left': (MISSING, (BIN_HASH_1, b'')),
231 b'z-only-in-left': (MISSING, (BIN_HASH_1, b'')),
232 }
232 }
233 self.assertEqual(want, self.parsemanifest(EMTPY_MANIFEST).diff(left))
233 self.assertEqual(want, self.parsemanifest(EMTPY_MANIFEST).diff(left))
234
234
235 want = {
235 want = {
236 b'bar/baz/qux.py': ((BIN_HASH_2, b'l'), MISSING),
236 b'bar/baz/qux.py': ((BIN_HASH_2, b'l'), MISSING),
237 b'foo': ((BIN_HASH_3, b'x'), MISSING),
237 b'foo': ((BIN_HASH_3, b'x'), MISSING),
238 b'z-only-in-left': ((BIN_HASH_1, b''), MISSING),
238 b'z-only-in-left': ((BIN_HASH_1, b''), MISSING),
239 }
239 }
240 self.assertEqual(want, left.diff(self.parsemanifest(EMTPY_MANIFEST)))
240 self.assertEqual(want, left.diff(self.parsemanifest(EMTPY_MANIFEST)))
241 copy = right.copy()
241 copy = right.copy()
242 del copy[b'z-only-in-right']
242 del copy[b'z-only-in-right']
243 del right[b'foo']
243 del right[b'foo']
244 want = {
244 want = {
245 b'foo': (MISSING, (BIN_HASH_1, b'')),
245 b'foo': (MISSING, (BIN_HASH_1, b'')),
246 b'z-only-in-right': ((BIN_HASH_2, b'x'), MISSING),
246 b'z-only-in-right': ((BIN_HASH_2, b'x'), MISSING),
247 }
247 }
248 self.assertEqual(want, right.diff(copy))
248 self.assertEqual(want, right.diff(copy))
249
249
250 short = self.parsemanifest(A_SHORT_MANIFEST)
250 short = self.parsemanifest(A_SHORT_MANIFEST)
251 pruned = short.copy()
251 pruned = short.copy()
252 del pruned[b'foo']
252 del pruned[b'foo']
253 want = {
253 want = {
254 b'foo': ((BIN_HASH_1, b''), MISSING),
254 b'foo': ((BIN_HASH_1, b''), MISSING),
255 }
255 }
256 self.assertEqual(want, short.diff(pruned))
256 self.assertEqual(want, short.diff(pruned))
257 want = {
257 want = {
258 b'foo': (MISSING, (BIN_HASH_1, b'')),
258 b'foo': (MISSING, (BIN_HASH_1, b'')),
259 }
259 }
260 self.assertEqual(want, pruned.diff(short))
260 self.assertEqual(want, pruned.diff(short))
261 want = {
261 want = {
262 b'bar/baz/qux.py': None,
262 b'bar/baz/qux.py': None,
263 b'foo': (MISSING, (BIN_HASH_1, b'')),
263 b'foo': (MISSING, (BIN_HASH_1, b'')),
264 }
264 }
265 self.assertEqual(want, pruned.diff(short, clean=True))
265 self.assertEqual(want, pruned.diff(short, clean=True))
266
266
267 def testReversedLines(self):
267 def testReversedLines(self):
268 backwards = b''.join(
268 backwards = b''.join(
269 l + b'\n' for l in reversed(A_SHORT_MANIFEST.split(b'\n')) if l
269 l + b'\n' for l in reversed(A_SHORT_MANIFEST.split(b'\n')) if l
270 )
270 )
271 try:
271 try:
272 self.parsemanifest(backwards)
272 self.parsemanifest(backwards)
273 self.fail('Should have raised ValueError')
273 self.fail('Should have raised ValueError')
274 except ValueError as v:
274 except ValueError as v:
275 self.assertIn('Manifest lines not in sorted order.', str(v))
275 self.assertIn('Manifest lines not in sorted order.', str(v))
276
276
277 def testNoTerminalNewline(self):
277 def testNoTerminalNewline(self):
278 try:
278 try:
279 self.parsemanifest(A_SHORT_MANIFEST + b'wat')
279 self.parsemanifest(A_SHORT_MANIFEST + b'wat')
280 self.fail('Should have raised ValueError')
280 self.fail('Should have raised ValueError')
281 except ValueError as v:
281 except ValueError as v:
282 self.assertIn('Manifest did not end in a newline.', str(v))
282 self.assertIn('Manifest did not end in a newline.', str(v))
283
283
284 def testNoNewLineAtAll(self):
284 def testNoNewLineAtAll(self):
285 try:
285 try:
286 self.parsemanifest(b'wat')
286 self.parsemanifest(b'wat')
287 self.fail('Should have raised ValueError')
287 self.fail('Should have raised ValueError')
288 except ValueError as v:
288 except ValueError as v:
289 self.assertIn('Manifest did not end in a newline.', str(v))
289 self.assertIn('Manifest did not end in a newline.', str(v))
290
290
291 def testHugeManifest(self):
291 def testHugeManifest(self):
292 m = self.parsemanifest(A_HUGE_MANIFEST)
292 m = self.parsemanifest(A_HUGE_MANIFEST)
293 self.assertEqual(HUGE_MANIFEST_ENTRIES, len(m))
293 self.assertEqual(HUGE_MANIFEST_ENTRIES, len(m))
294 self.assertEqual(len(m), len(list(m)))
294 self.assertEqual(len(m), len(list(m)))
295
295
296 def testMatchesMetadata(self):
296 def testMatchesMetadata(self):
297 '''Tests matches() for a few specific files to make sure that both
297 '''Tests matches() for a few specific files to make sure that both
298 the set of files as well as their flags and nodeids are correct in
298 the set of files as well as their flags and nodeids are correct in
299 the resulting manifest.'''
299 the resulting manifest.'''
300 m = self.parsemanifest(A_HUGE_MANIFEST)
300 m = self.parsemanifest(A_HUGE_MANIFEST)
301
301
302 match = matchmod.exact([b'file1', b'file200', b'file300'])
302 match = matchmod.exact([b'file1', b'file200', b'file300'])
303 m2 = m.matches(match)
303 m2 = m._matches(match)
304
304
305 w = (b'file1\0%sx\n' b'file200\0%sl\n' b'file300\0%s\n') % (
305 w = (b'file1\0%sx\n' b'file200\0%sl\n' b'file300\0%s\n') % (
306 HASH_2,
306 HASH_2,
307 HASH_1,
307 HASH_1,
308 HASH_1,
308 HASH_1,
309 )
309 )
310 self.assertEqual(w, m2.text())
310 self.assertEqual(w, m2.text())
311
311
312 def testMatchesNonexistentFile(self):
312 def testMatchesNonexistentFile(self):
313 '''Tests matches() for a small set of specific files, including one
313 '''Tests matches() for a small set of specific files, including one
314 nonexistent file to make sure in only matches against existing files.
314 nonexistent file to make sure in only matches against existing files.
315 '''
315 '''
316 m = self.parsemanifest(A_DEEPER_MANIFEST)
316 m = self.parsemanifest(A_DEEPER_MANIFEST)
317
317
318 match = matchmod.exact(
318 match = matchmod.exact(
319 [b'a/b/c/bar.txt', b'a/b/d/qux.py', b'readme.txt', b'nonexistent']
319 [b'a/b/c/bar.txt', b'a/b/d/qux.py', b'readme.txt', b'nonexistent']
320 )
320 )
321 m2 = m.matches(match)
321 m2 = m._matches(match)
322
322
323 self.assertEqual(
323 self.assertEqual(
324 [b'a/b/c/bar.txt', b'a/b/d/qux.py', b'readme.txt'], m2.keys()
324 [b'a/b/c/bar.txt', b'a/b/d/qux.py', b'readme.txt'], m2.keys()
325 )
325 )
326
326
327 def testMatchesNonexistentDirectory(self):
327 def testMatchesNonexistentDirectory(self):
328 '''Tests matches() for a relpath match on a directory that doesn't
328 '''Tests matches() for a relpath match on a directory that doesn't
329 actually exist.'''
329 actually exist.'''
330 m = self.parsemanifest(A_DEEPER_MANIFEST)
330 m = self.parsemanifest(A_DEEPER_MANIFEST)
331
331
332 match = matchmod.match(
332 match = matchmod.match(
333 util.localpath(b'/repo'), b'', [b'a/f'], default=b'relpath'
333 util.localpath(b'/repo'), b'', [b'a/f'], default=b'relpath'
334 )
334 )
335 m2 = m.matches(match)
335 m2 = m._matches(match)
336
336
337 self.assertEqual([], m2.keys())
337 self.assertEqual([], m2.keys())
338
338
339 def testMatchesExactLarge(self):
339 def testMatchesExactLarge(self):
340 '''Tests matches() for files matching a large list of exact files.
340 '''Tests matches() for files matching a large list of exact files.
341 '''
341 '''
342 m = self.parsemanifest(A_HUGE_MANIFEST)
342 m = self.parsemanifest(A_HUGE_MANIFEST)
343
343
344 flist = m.keys()[80:300]
344 flist = m.keys()[80:300]
345 match = matchmod.exact(flist)
345 match = matchmod.exact(flist)
346 m2 = m.matches(match)
346 m2 = m._matches(match)
347
347
348 self.assertEqual(flist, m2.keys())
348 self.assertEqual(flist, m2.keys())
349
349
350 def testMatchesFull(self):
350 def testMatchesFull(self):
351 '''Tests matches() for what should be a full match.'''
351 '''Tests matches() for what should be a full match.'''
352 m = self.parsemanifest(A_DEEPER_MANIFEST)
352 m = self.parsemanifest(A_DEEPER_MANIFEST)
353
353
354 match = matchmod.match(util.localpath(b'/repo'), b'', [b''])
354 match = matchmod.match(util.localpath(b'/repo'), b'', [b''])
355 m2 = m.matches(match)
355 m2 = m._matches(match)
356
356
357 self.assertEqual(m.keys(), m2.keys())
357 self.assertEqual(m.keys(), m2.keys())
358
358
359 def testMatchesDirectory(self):
359 def testMatchesDirectory(self):
360 '''Tests matches() on a relpath match on a directory, which should
360 '''Tests matches() on a relpath match on a directory, which should
361 match against all files within said directory.'''
361 match against all files within said directory.'''
362 m = self.parsemanifest(A_DEEPER_MANIFEST)
362 m = self.parsemanifest(A_DEEPER_MANIFEST)
363
363
364 match = matchmod.match(
364 match = matchmod.match(
365 util.localpath(b'/repo'), b'', [b'a/b'], default=b'relpath'
365 util.localpath(b'/repo'), b'', [b'a/b'], default=b'relpath'
366 )
366 )
367 m2 = m.matches(match)
367 m2 = m._matches(match)
368
368
369 self.assertEqual(
369 self.assertEqual(
370 [
370 [
371 b'a/b/c/bar.py',
371 b'a/b/c/bar.py',
372 b'a/b/c/bar.txt',
372 b'a/b/c/bar.txt',
373 b'a/b/c/foo.py',
373 b'a/b/c/foo.py',
374 b'a/b/c/foo.txt',
374 b'a/b/c/foo.txt',
375 b'a/b/d/baz.py',
375 b'a/b/d/baz.py',
376 b'a/b/d/qux.py',
376 b'a/b/d/qux.py',
377 b'a/b/d/ten.txt',
377 b'a/b/d/ten.txt',
378 b'a/b/dog.py',
378 b'a/b/dog.py',
379 b'a/b/fish.py',
379 b'a/b/fish.py',
380 ],
380 ],
381 m2.keys(),
381 m2.keys(),
382 )
382 )
383
383
384 def testMatchesExactPath(self):
384 def testMatchesExactPath(self):
385 '''Tests matches() on an exact match on a directory, which should
385 '''Tests matches() on an exact match on a directory, which should
386 result in an empty manifest because you can't perform an exact match
386 result in an empty manifest because you can't perform an exact match
387 against a directory.'''
387 against a directory.'''
388 m = self.parsemanifest(A_DEEPER_MANIFEST)
388 m = self.parsemanifest(A_DEEPER_MANIFEST)
389
389
390 match = matchmod.exact([b'a/b'])
390 match = matchmod.exact([b'a/b'])
391 m2 = m.matches(match)
391 m2 = m._matches(match)
392
392
393 self.assertEqual([], m2.keys())
393 self.assertEqual([], m2.keys())
394
394
395 def testMatchesCwd(self):
395 def testMatchesCwd(self):
396 '''Tests matches() on a relpath match with the current directory ('.')
396 '''Tests matches() on a relpath match with the current directory ('.')
397 when not in the root directory.'''
397 when not in the root directory.'''
398 m = self.parsemanifest(A_DEEPER_MANIFEST)
398 m = self.parsemanifest(A_DEEPER_MANIFEST)
399
399
400 match = matchmod.match(
400 match = matchmod.match(
401 util.localpath(b'/repo'), b'a/b', [b'.'], default=b'relpath'
401 util.localpath(b'/repo'), b'a/b', [b'.'], default=b'relpath'
402 )
402 )
403 m2 = m.matches(match)
403 m2 = m._matches(match)
404
404
405 self.assertEqual(
405 self.assertEqual(
406 [
406 [
407 b'a/b/c/bar.py',
407 b'a/b/c/bar.py',
408 b'a/b/c/bar.txt',
408 b'a/b/c/bar.txt',
409 b'a/b/c/foo.py',
409 b'a/b/c/foo.py',
410 b'a/b/c/foo.txt',
410 b'a/b/c/foo.txt',
411 b'a/b/d/baz.py',
411 b'a/b/d/baz.py',
412 b'a/b/d/qux.py',
412 b'a/b/d/qux.py',
413 b'a/b/d/ten.txt',
413 b'a/b/d/ten.txt',
414 b'a/b/dog.py',
414 b'a/b/dog.py',
415 b'a/b/fish.py',
415 b'a/b/fish.py',
416 ],
416 ],
417 m2.keys(),
417 m2.keys(),
418 )
418 )
419
419
420 def testMatchesWithPattern(self):
420 def testMatchesWithPattern(self):
421 '''Tests matches() for files matching a pattern that reside
421 '''Tests matches() for files matching a pattern that reside
422 deeper than the specified directory.'''
422 deeper than the specified directory.'''
423 m = self.parsemanifest(A_DEEPER_MANIFEST)
423 m = self.parsemanifest(A_DEEPER_MANIFEST)
424
424
425 match = matchmod.match(util.localpath(b'/repo'), b'', [b'a/b/*/*.txt'])
425 match = matchmod.match(util.localpath(b'/repo'), b'', [b'a/b/*/*.txt'])
426 m2 = m.matches(match)
426 m2 = m._matches(match)
427
427
428 self.assertEqual(
428 self.assertEqual(
429 [b'a/b/c/bar.txt', b'a/b/c/foo.txt', b'a/b/d/ten.txt'], m2.keys()
429 [b'a/b/c/bar.txt', b'a/b/c/foo.txt', b'a/b/d/ten.txt'], m2.keys()
430 )
430 )
431
431
432
432
433 class testmanifestdict(unittest.TestCase, basemanifesttests):
433 class testmanifestdict(unittest.TestCase, basemanifesttests):
434 def parsemanifest(self, text):
434 def parsemanifest(self, text):
435 return manifestmod.manifestdict(text)
435 return manifestmod.manifestdict(text)
436
436
437 def testObviouslyBogusManifest(self):
437 def testObviouslyBogusManifest(self):
438 # This is a 163k manifest that came from oss-fuzz. It was a
438 # This is a 163k manifest that came from oss-fuzz. It was a
439 # timeout there, but when run normally it doesn't seem to
439 # timeout there, but when run normally it doesn't seem to
440 # present any particular slowness.
440 # present any particular slowness.
441 data = zlib.decompress(
441 data = zlib.decompress(
442 b'x\x9c\xed\xce;\n\x83\x00\x10\x04\xd0\x8deNa\x93~\xf1\x03\xc9q\xf4'
442 b'x\x9c\xed\xce;\n\x83\x00\x10\x04\xd0\x8deNa\x93~\xf1\x03\xc9q\xf4'
443 b'\x14\xeaU\xbdB\xda\xd4\xe6Cj\xc1FA\xde+\x86\xe9f\xa2\xfci\xbb\xfb'
443 b'\x14\xeaU\xbdB\xda\xd4\xe6Cj\xc1FA\xde+\x86\xe9f\xa2\xfci\xbb\xfb'
444 b'\xa3\xef\xea\xba\xca\x7fk\x86q\x9a\xc6\xc8\xcc&\xb3\xcf\xf8\xb8|#'
444 b'\xa3\xef\xea\xba\xca\x7fk\x86q\x9a\xc6\xc8\xcc&\xb3\xcf\xf8\xb8|#'
445 b'\x8a9\x00\xd8\xe6v\xf4\x01N\xe1\n\x00\x00\x00\x00\x00\x00\x00\x00'
445 b'\x8a9\x00\xd8\xe6v\xf4\x01N\xe1\n\x00\x00\x00\x00\x00\x00\x00\x00'
446 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
446 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
447 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
447 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
448 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
448 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
449 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
449 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
450 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
450 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
451 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
451 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
452 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
452 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
453 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
453 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
454 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
454 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
455 b'\x00\x00\xc0\x8aey\x1d}\x01\xd8\xe0\xb9\xf3\xde\x1b\xcf\x17'
455 b'\x00\x00\xc0\x8aey\x1d}\x01\xd8\xe0\xb9\xf3\xde\x1b\xcf\x17'
456 b'\xac\xbe'
456 b'\xac\xbe'
457 )
457 )
458 with self.assertRaises(ValueError):
458 with self.assertRaises(ValueError):
459 self.parsemanifest(data)
459 self.parsemanifest(data)
460
460
461
461
462 class testtreemanifest(unittest.TestCase, basemanifesttests):
462 class testtreemanifest(unittest.TestCase, basemanifesttests):
463 def parsemanifest(self, text):
463 def parsemanifest(self, text):
464 return manifestmod.treemanifest(b'', text)
464 return manifestmod.treemanifest(b'', text)
465
465
466 def testWalkSubtrees(self):
466 def testWalkSubtrees(self):
467 m = self.parsemanifest(A_DEEPER_MANIFEST)
467 m = self.parsemanifest(A_DEEPER_MANIFEST)
468
468
469 dirs = [s._dir for s in m.walksubtrees()]
469 dirs = [s._dir for s in m.walksubtrees()]
470 self.assertEqual(
470 self.assertEqual(
471 sorted(
471 sorted(
472 [b'', b'a/', b'a/c/', b'a/d/', b'a/b/', b'a/b/c/', b'a/b/d/']
472 [b'', b'a/', b'a/c/', b'a/d/', b'a/b/', b'a/b/c/', b'a/b/d/']
473 ),
473 ),
474 sorted(dirs),
474 sorted(dirs),
475 )
475 )
476
476
477 match = matchmod.match(util.localpath(b'/repo'), b'', [b'path:a/b/'])
477 match = matchmod.match(util.localpath(b'/repo'), b'', [b'path:a/b/'])
478 dirs = [s._dir for s in m.walksubtrees(matcher=match)]
478 dirs = [s._dir for s in m.walksubtrees(matcher=match)]
479 self.assertEqual(sorted([b'a/b/', b'a/b/c/', b'a/b/d/']), sorted(dirs))
479 self.assertEqual(sorted([b'a/b/', b'a/b/c/', b'a/b/d/']), sorted(dirs))
480
480
481
481
482 if __name__ == '__main__':
482 if __name__ == '__main__':
483 silenttestrunner.main(__name__)
483 silenttestrunner.main(__name__)
General Comments 0
You need to be logged in to leave comments. Login now