##// END OF EJS Templates
error: introduce StorageError...
Gregory Szorc -
r39812:cb65d4b7 default
parent child Browse files
Show More
@@ -1,329 +1,336
1 # error.py - Mercurial exceptions
1 # error.py - Mercurial exceptions
2 #
2 #
3 # Copyright 2005-2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Mercurial exceptions.
8 """Mercurial exceptions.
9
9
10 This allows us to catch exceptions at higher levels without forcing
10 This allows us to catch exceptions at higher levels without forcing
11 imports.
11 imports.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 # Do not import anything but pycompat here, please
16 # Do not import anything but pycompat here, please
17 from . import pycompat
17 from . import pycompat
18
18
19 def _tobytes(exc):
19 def _tobytes(exc):
20 """Byte-stringify exception in the same way as BaseException_str()"""
20 """Byte-stringify exception in the same way as BaseException_str()"""
21 if not exc.args:
21 if not exc.args:
22 return b''
22 return b''
23 if len(exc.args) == 1:
23 if len(exc.args) == 1:
24 return pycompat.bytestr(exc.args[0])
24 return pycompat.bytestr(exc.args[0])
25 return b'(%s)' % b', '.join(b"'%s'" % pycompat.bytestr(a) for a in exc.args)
25 return b'(%s)' % b', '.join(b"'%s'" % pycompat.bytestr(a) for a in exc.args)
26
26
27 class Hint(object):
27 class Hint(object):
28 """Mix-in to provide a hint of an error
28 """Mix-in to provide a hint of an error
29
29
30 This should come first in the inheritance list to consume a hint and
30 This should come first in the inheritance list to consume a hint and
31 pass remaining arguments to the exception class.
31 pass remaining arguments to the exception class.
32 """
32 """
33 def __init__(self, *args, **kw):
33 def __init__(self, *args, **kw):
34 self.hint = kw.pop(r'hint', None)
34 self.hint = kw.pop(r'hint', None)
35 super(Hint, self).__init__(*args, **kw)
35 super(Hint, self).__init__(*args, **kw)
36
36
37 class RevlogError(Hint, Exception):
37 class StorageError(Hint, Exception):
38 """Raised when an error occurs in a storage layer.
39
40 Usually subclassed by a storage-specific exception.
41 """
42 __bytes__ = _tobytes
43
44 class RevlogError(StorageError):
38 __bytes__ = _tobytes
45 __bytes__ = _tobytes
39
46
40 class FilteredIndexError(IndexError):
47 class FilteredIndexError(IndexError):
41 __bytes__ = _tobytes
48 __bytes__ = _tobytes
42
49
43 class LookupError(RevlogError, KeyError):
50 class LookupError(RevlogError, KeyError):
44 def __init__(self, name, index, message):
51 def __init__(self, name, index, message):
45 self.name = name
52 self.name = name
46 self.index = index
53 self.index = index
47 # this can't be called 'message' because at least some installs of
54 # this can't be called 'message' because at least some installs of
48 # Python 2.6+ complain about the 'message' property being deprecated
55 # Python 2.6+ complain about the 'message' property being deprecated
49 self.lookupmessage = message
56 self.lookupmessage = message
50 if isinstance(name, bytes) and len(name) == 20:
57 if isinstance(name, bytes) and len(name) == 20:
51 from .node import short
58 from .node import short
52 name = short(name)
59 name = short(name)
53 RevlogError.__init__(self, '%s@%s: %s' % (index, name, message))
60 RevlogError.__init__(self, '%s@%s: %s' % (index, name, message))
54
61
55 def __bytes__(self):
62 def __bytes__(self):
56 return RevlogError.__bytes__(self)
63 return RevlogError.__bytes__(self)
57
64
58 def __str__(self):
65 def __str__(self):
59 return RevlogError.__str__(self)
66 return RevlogError.__str__(self)
60
67
61 class AmbiguousPrefixLookupError(LookupError):
68 class AmbiguousPrefixLookupError(LookupError):
62 pass
69 pass
63
70
64 class FilteredLookupError(LookupError):
71 class FilteredLookupError(LookupError):
65 pass
72 pass
66
73
67 class ManifestLookupError(LookupError):
74 class ManifestLookupError(LookupError):
68 pass
75 pass
69
76
70 class CommandError(Exception):
77 class CommandError(Exception):
71 """Exception raised on errors in parsing the command line."""
78 """Exception raised on errors in parsing the command line."""
72 __bytes__ = _tobytes
79 __bytes__ = _tobytes
73
80
74 class InterventionRequired(Hint, Exception):
81 class InterventionRequired(Hint, Exception):
75 """Exception raised when a command requires human intervention."""
82 """Exception raised when a command requires human intervention."""
76 __bytes__ = _tobytes
83 __bytes__ = _tobytes
77
84
78 class Abort(Hint, Exception):
85 class Abort(Hint, Exception):
79 """Raised if a command needs to print an error and exit."""
86 """Raised if a command needs to print an error and exit."""
80 __bytes__ = _tobytes
87 __bytes__ = _tobytes
81
88
82 class HookLoadError(Abort):
89 class HookLoadError(Abort):
83 """raised when loading a hook fails, aborting an operation
90 """raised when loading a hook fails, aborting an operation
84
91
85 Exists to allow more specialized catching."""
92 Exists to allow more specialized catching."""
86
93
87 class HookAbort(Abort):
94 class HookAbort(Abort):
88 """raised when a validation hook fails, aborting an operation
95 """raised when a validation hook fails, aborting an operation
89
96
90 Exists to allow more specialized catching."""
97 Exists to allow more specialized catching."""
91
98
92 class ConfigError(Abort):
99 class ConfigError(Abort):
93 """Exception raised when parsing config files"""
100 """Exception raised when parsing config files"""
94
101
95 class UpdateAbort(Abort):
102 class UpdateAbort(Abort):
96 """Raised when an update is aborted for destination issue"""
103 """Raised when an update is aborted for destination issue"""
97
104
98 class MergeDestAbort(Abort):
105 class MergeDestAbort(Abort):
99 """Raised when an update is aborted for destination issues"""
106 """Raised when an update is aborted for destination issues"""
100
107
101 class NoMergeDestAbort(MergeDestAbort):
108 class NoMergeDestAbort(MergeDestAbort):
102 """Raised when an update is aborted because there is nothing to merge"""
109 """Raised when an update is aborted because there is nothing to merge"""
103
110
104 class ManyMergeDestAbort(MergeDestAbort):
111 class ManyMergeDestAbort(MergeDestAbort):
105 """Raised when an update is aborted because destination is ambiguous"""
112 """Raised when an update is aborted because destination is ambiguous"""
106
113
107 class ResponseExpected(Abort):
114 class ResponseExpected(Abort):
108 """Raised when an EOF is received for a prompt"""
115 """Raised when an EOF is received for a prompt"""
109 def __init__(self):
116 def __init__(self):
110 from .i18n import _
117 from .i18n import _
111 Abort.__init__(self, _('response expected'))
118 Abort.__init__(self, _('response expected'))
112
119
113 class OutOfBandError(Hint, Exception):
120 class OutOfBandError(Hint, Exception):
114 """Exception raised when a remote repo reports failure"""
121 """Exception raised when a remote repo reports failure"""
115 __bytes__ = _tobytes
122 __bytes__ = _tobytes
116
123
117 class ParseError(Hint, Exception):
124 class ParseError(Hint, Exception):
118 """Raised when parsing config files and {rev,file}sets (msg[, pos])"""
125 """Raised when parsing config files and {rev,file}sets (msg[, pos])"""
119 __bytes__ = _tobytes
126 __bytes__ = _tobytes
120
127
121 class PatchError(Exception):
128 class PatchError(Exception):
122 __bytes__ = _tobytes
129 __bytes__ = _tobytes
123
130
124 class UnknownIdentifier(ParseError):
131 class UnknownIdentifier(ParseError):
125 """Exception raised when a {rev,file}set references an unknown identifier"""
132 """Exception raised when a {rev,file}set references an unknown identifier"""
126
133
127 def __init__(self, function, symbols):
134 def __init__(self, function, symbols):
128 from .i18n import _
135 from .i18n import _
129 ParseError.__init__(self, _("unknown identifier: %s") % function)
136 ParseError.__init__(self, _("unknown identifier: %s") % function)
130 self.function = function
137 self.function = function
131 self.symbols = symbols
138 self.symbols = symbols
132
139
133 class RepoError(Hint, Exception):
140 class RepoError(Hint, Exception):
134 __bytes__ = _tobytes
141 __bytes__ = _tobytes
135
142
136 class RepoLookupError(RepoError):
143 class RepoLookupError(RepoError):
137 pass
144 pass
138
145
139 class FilteredRepoLookupError(RepoLookupError):
146 class FilteredRepoLookupError(RepoLookupError):
140 pass
147 pass
141
148
142 class CapabilityError(RepoError):
149 class CapabilityError(RepoError):
143 pass
150 pass
144
151
145 class RequirementError(RepoError):
152 class RequirementError(RepoError):
146 """Exception raised if .hg/requires has an unknown entry."""
153 """Exception raised if .hg/requires has an unknown entry."""
147
154
148 class StdioError(IOError):
155 class StdioError(IOError):
149 """Raised if I/O to stdout or stderr fails"""
156 """Raised if I/O to stdout or stderr fails"""
150
157
151 def __init__(self, err):
158 def __init__(self, err):
152 IOError.__init__(self, err.errno, err.strerror)
159 IOError.__init__(self, err.errno, err.strerror)
153
160
154 # no __bytes__() because error message is derived from the standard IOError
161 # no __bytes__() because error message is derived from the standard IOError
155
162
156 class UnsupportedMergeRecords(Abort):
163 class UnsupportedMergeRecords(Abort):
157 def __init__(self, recordtypes):
164 def __init__(self, recordtypes):
158 from .i18n import _
165 from .i18n import _
159 self.recordtypes = sorted(recordtypes)
166 self.recordtypes = sorted(recordtypes)
160 s = ' '.join(self.recordtypes)
167 s = ' '.join(self.recordtypes)
161 Abort.__init__(
168 Abort.__init__(
162 self, _('unsupported merge state records: %s') % s,
169 self, _('unsupported merge state records: %s') % s,
163 hint=_('see https://mercurial-scm.org/wiki/MergeStateRecords for '
170 hint=_('see https://mercurial-scm.org/wiki/MergeStateRecords for '
164 'more information'))
171 'more information'))
165
172
166 class UnknownVersion(Abort):
173 class UnknownVersion(Abort):
167 """generic exception for aborting from an encounter with an unknown version
174 """generic exception for aborting from an encounter with an unknown version
168 """
175 """
169
176
170 def __init__(self, msg, hint=None, version=None):
177 def __init__(self, msg, hint=None, version=None):
171 self.version = version
178 self.version = version
172 super(UnknownVersion, self).__init__(msg, hint=hint)
179 super(UnknownVersion, self).__init__(msg, hint=hint)
173
180
174 class LockError(IOError):
181 class LockError(IOError):
175 def __init__(self, errno, strerror, filename, desc):
182 def __init__(self, errno, strerror, filename, desc):
176 IOError.__init__(self, errno, strerror, filename)
183 IOError.__init__(self, errno, strerror, filename)
177 self.desc = desc
184 self.desc = desc
178
185
179 # no __bytes__() because error message is derived from the standard IOError
186 # no __bytes__() because error message is derived from the standard IOError
180
187
181 class LockHeld(LockError):
188 class LockHeld(LockError):
182 def __init__(self, errno, filename, desc, locker):
189 def __init__(self, errno, filename, desc, locker):
183 LockError.__init__(self, errno, 'Lock held', filename, desc)
190 LockError.__init__(self, errno, 'Lock held', filename, desc)
184 self.locker = locker
191 self.locker = locker
185
192
186 class LockUnavailable(LockError):
193 class LockUnavailable(LockError):
187 pass
194 pass
188
195
189 # LockError is for errors while acquiring the lock -- this is unrelated
196 # LockError is for errors while acquiring the lock -- this is unrelated
190 class LockInheritanceContractViolation(RuntimeError):
197 class LockInheritanceContractViolation(RuntimeError):
191 __bytes__ = _tobytes
198 __bytes__ = _tobytes
192
199
193 class ResponseError(Exception):
200 class ResponseError(Exception):
194 """Raised to print an error with part of output and exit."""
201 """Raised to print an error with part of output and exit."""
195 __bytes__ = _tobytes
202 __bytes__ = _tobytes
196
203
197 class UnknownCommand(Exception):
204 class UnknownCommand(Exception):
198 """Exception raised if command is not in the command table."""
205 """Exception raised if command is not in the command table."""
199 __bytes__ = _tobytes
206 __bytes__ = _tobytes
200
207
201 class AmbiguousCommand(Exception):
208 class AmbiguousCommand(Exception):
202 """Exception raised if command shortcut matches more than one command."""
209 """Exception raised if command shortcut matches more than one command."""
203 __bytes__ = _tobytes
210 __bytes__ = _tobytes
204
211
205 # derived from KeyboardInterrupt to simplify some breakout code
212 # derived from KeyboardInterrupt to simplify some breakout code
206 class SignalInterrupt(KeyboardInterrupt):
213 class SignalInterrupt(KeyboardInterrupt):
207 """Exception raised on SIGTERM and SIGHUP."""
214 """Exception raised on SIGTERM and SIGHUP."""
208
215
209 class SignatureError(Exception):
216 class SignatureError(Exception):
210 __bytes__ = _tobytes
217 __bytes__ = _tobytes
211
218
212 class PushRaced(RuntimeError):
219 class PushRaced(RuntimeError):
213 """An exception raised during unbundling that indicate a push race"""
220 """An exception raised during unbundling that indicate a push race"""
214 __bytes__ = _tobytes
221 __bytes__ = _tobytes
215
222
216 class ProgrammingError(Hint, RuntimeError):
223 class ProgrammingError(Hint, RuntimeError):
217 """Raised if a mercurial (core or extension) developer made a mistake"""
224 """Raised if a mercurial (core or extension) developer made a mistake"""
218
225
219 def __init__(self, msg, *args, **kwargs):
226 def __init__(self, msg, *args, **kwargs):
220 # On Python 3, turn the message back into a string since this is
227 # On Python 3, turn the message back into a string since this is
221 # an internal-only error that won't be printed except in a
228 # an internal-only error that won't be printed except in a
222 # stack traces.
229 # stack traces.
223 msg = pycompat.sysstr(msg)
230 msg = pycompat.sysstr(msg)
224 super(ProgrammingError, self).__init__(msg, *args, **kwargs)
231 super(ProgrammingError, self).__init__(msg, *args, **kwargs)
225
232
226 __bytes__ = _tobytes
233 __bytes__ = _tobytes
227
234
228 class WdirUnsupported(Exception):
235 class WdirUnsupported(Exception):
229 """An exception which is raised when 'wdir()' is not supported"""
236 """An exception which is raised when 'wdir()' is not supported"""
230 __bytes__ = _tobytes
237 __bytes__ = _tobytes
231
238
232 # bundle2 related errors
239 # bundle2 related errors
233 class BundleValueError(ValueError):
240 class BundleValueError(ValueError):
234 """error raised when bundle2 cannot be processed"""
241 """error raised when bundle2 cannot be processed"""
235 __bytes__ = _tobytes
242 __bytes__ = _tobytes
236
243
237 class BundleUnknownFeatureError(BundleValueError):
244 class BundleUnknownFeatureError(BundleValueError):
238 def __init__(self, parttype=None, params=(), values=()):
245 def __init__(self, parttype=None, params=(), values=()):
239 self.parttype = parttype
246 self.parttype = parttype
240 self.params = params
247 self.params = params
241 self.values = values
248 self.values = values
242 if self.parttype is None:
249 if self.parttype is None:
243 msg = 'Stream Parameter'
250 msg = 'Stream Parameter'
244 else:
251 else:
245 msg = parttype
252 msg = parttype
246 entries = self.params
253 entries = self.params
247 if self.params and self.values:
254 if self.params and self.values:
248 assert len(self.params) == len(self.values)
255 assert len(self.params) == len(self.values)
249 entries = []
256 entries = []
250 for idx, par in enumerate(self.params):
257 for idx, par in enumerate(self.params):
251 val = self.values[idx]
258 val = self.values[idx]
252 if val is None:
259 if val is None:
253 entries.append(val)
260 entries.append(val)
254 else:
261 else:
255 entries.append("%s=%r" % (par, pycompat.maybebytestr(val)))
262 entries.append("%s=%r" % (par, pycompat.maybebytestr(val)))
256 if entries:
263 if entries:
257 msg = '%s - %s' % (msg, ', '.join(entries))
264 msg = '%s - %s' % (msg, ', '.join(entries))
258 ValueError.__init__(self, msg)
265 ValueError.__init__(self, msg)
259
266
260 class ReadOnlyPartError(RuntimeError):
267 class ReadOnlyPartError(RuntimeError):
261 """error raised when code tries to alter a part being generated"""
268 """error raised when code tries to alter a part being generated"""
262 __bytes__ = _tobytes
269 __bytes__ = _tobytes
263
270
264 class PushkeyFailed(Abort):
271 class PushkeyFailed(Abort):
265 """error raised when a pushkey part failed to update a value"""
272 """error raised when a pushkey part failed to update a value"""
266
273
267 def __init__(self, partid, namespace=None, key=None, new=None, old=None,
274 def __init__(self, partid, namespace=None, key=None, new=None, old=None,
268 ret=None):
275 ret=None):
269 self.partid = partid
276 self.partid = partid
270 self.namespace = namespace
277 self.namespace = namespace
271 self.key = key
278 self.key = key
272 self.new = new
279 self.new = new
273 self.old = old
280 self.old = old
274 self.ret = ret
281 self.ret = ret
275 # no i18n expected to be processed into a better message
282 # no i18n expected to be processed into a better message
276 Abort.__init__(self, 'failed to update value for "%s/%s"'
283 Abort.__init__(self, 'failed to update value for "%s/%s"'
277 % (namespace, key))
284 % (namespace, key))
278
285
279 class CensoredNodeError(RevlogError):
286 class CensoredNodeError(RevlogError):
280 """error raised when content verification fails on a censored node
287 """error raised when content verification fails on a censored node
281
288
282 Also contains the tombstone data substituted for the uncensored data.
289 Also contains the tombstone data substituted for the uncensored data.
283 """
290 """
284
291
285 def __init__(self, filename, node, tombstone):
292 def __init__(self, filename, node, tombstone):
286 from .node import short
293 from .node import short
287 RevlogError.__init__(self, '%s:%s' % (filename, short(node)))
294 RevlogError.__init__(self, '%s:%s' % (filename, short(node)))
288 self.tombstone = tombstone
295 self.tombstone = tombstone
289
296
290 class CensoredBaseError(RevlogError):
297 class CensoredBaseError(RevlogError):
291 """error raised when a delta is rejected because its base is censored
298 """error raised when a delta is rejected because its base is censored
292
299
293 A delta based on a censored revision must be formed as single patch
300 A delta based on a censored revision must be formed as single patch
294 operation which replaces the entire base with new content. This ensures
301 operation which replaces the entire base with new content. This ensures
295 the delta may be applied by clones which have not censored the base.
302 the delta may be applied by clones which have not censored the base.
296 """
303 """
297
304
298 class InvalidBundleSpecification(Exception):
305 class InvalidBundleSpecification(Exception):
299 """error raised when a bundle specification is invalid.
306 """error raised when a bundle specification is invalid.
300
307
301 This is used for syntax errors as opposed to support errors.
308 This is used for syntax errors as opposed to support errors.
302 """
309 """
303 __bytes__ = _tobytes
310 __bytes__ = _tobytes
304
311
305 class UnsupportedBundleSpecification(Exception):
312 class UnsupportedBundleSpecification(Exception):
306 """error raised when a bundle specification is not supported."""
313 """error raised when a bundle specification is not supported."""
307 __bytes__ = _tobytes
314 __bytes__ = _tobytes
308
315
309 class CorruptedState(Exception):
316 class CorruptedState(Exception):
310 """error raised when a command is not able to read its state from file"""
317 """error raised when a command is not able to read its state from file"""
311 __bytes__ = _tobytes
318 __bytes__ = _tobytes
312
319
313 class PeerTransportError(Abort):
320 class PeerTransportError(Abort):
314 """Transport-level I/O error when communicating with a peer repo."""
321 """Transport-level I/O error when communicating with a peer repo."""
315
322
316 class InMemoryMergeConflictsError(Exception):
323 class InMemoryMergeConflictsError(Exception):
317 """Exception raised when merge conflicts arose during an in-memory merge."""
324 """Exception raised when merge conflicts arose during an in-memory merge."""
318 __bytes__ = _tobytes
325 __bytes__ = _tobytes
319
326
320 class WireprotoCommandError(Exception):
327 class WireprotoCommandError(Exception):
321 """Represents an error during execution of a wire protocol command.
328 """Represents an error during execution of a wire protocol command.
322
329
323 Should only be thrown by wire protocol version 2 commands.
330 Should only be thrown by wire protocol version 2 commands.
324
331
325 The error is a formatter string and an optional iterable of arguments.
332 The error is a formatter string and an optional iterable of arguments.
326 """
333 """
327 def __init__(self, message, args=None):
334 def __init__(self, message, args=None):
328 self.message = message
335 self.message = message
329 self.messageargs = args
336 self.messageargs = args
@@ -1,1585 +1,1585
1 # repository.py - Interfaces and base classes for repositories and peers.
1 # repository.py - Interfaces and base classes for repositories and peers.
2 #
2 #
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from . import (
11 from . import (
12 error,
12 error,
13 )
13 )
14 from .utils import (
14 from .utils import (
15 interfaceutil,
15 interfaceutil,
16 )
16 )
17
17
18 # When narrowing is finalized and no longer subject to format changes,
18 # When narrowing is finalized and no longer subject to format changes,
19 # we should move this to just "narrow" or similar.
19 # we should move this to just "narrow" or similar.
20 NARROW_REQUIREMENT = 'narrowhg-experimental'
20 NARROW_REQUIREMENT = 'narrowhg-experimental'
21
21
22 class ipeerconnection(interfaceutil.Interface):
22 class ipeerconnection(interfaceutil.Interface):
23 """Represents a "connection" to a repository.
23 """Represents a "connection" to a repository.
24
24
25 This is the base interface for representing a connection to a repository.
25 This is the base interface for representing a connection to a repository.
26 It holds basic properties and methods applicable to all peer types.
26 It holds basic properties and methods applicable to all peer types.
27
27
28 This is not a complete interface definition and should not be used
28 This is not a complete interface definition and should not be used
29 outside of this module.
29 outside of this module.
30 """
30 """
31 ui = interfaceutil.Attribute("""ui.ui instance""")
31 ui = interfaceutil.Attribute("""ui.ui instance""")
32
32
33 def url():
33 def url():
34 """Returns a URL string representing this peer.
34 """Returns a URL string representing this peer.
35
35
36 Currently, implementations expose the raw URL used to construct the
36 Currently, implementations expose the raw URL used to construct the
37 instance. It may contain credentials as part of the URL. The
37 instance. It may contain credentials as part of the URL. The
38 expectations of the value aren't well-defined and this could lead to
38 expectations of the value aren't well-defined and this could lead to
39 data leakage.
39 data leakage.
40
40
41 TODO audit/clean consumers and more clearly define the contents of this
41 TODO audit/clean consumers and more clearly define the contents of this
42 value.
42 value.
43 """
43 """
44
44
45 def local():
45 def local():
46 """Returns a local repository instance.
46 """Returns a local repository instance.
47
47
48 If the peer represents a local repository, returns an object that
48 If the peer represents a local repository, returns an object that
49 can be used to interface with it. Otherwise returns ``None``.
49 can be used to interface with it. Otherwise returns ``None``.
50 """
50 """
51
51
52 def peer():
52 def peer():
53 """Returns an object conforming to this interface.
53 """Returns an object conforming to this interface.
54
54
55 Most implementations will ``return self``.
55 Most implementations will ``return self``.
56 """
56 """
57
57
58 def canpush():
58 def canpush():
59 """Returns a boolean indicating if this peer can be pushed to."""
59 """Returns a boolean indicating if this peer can be pushed to."""
60
60
61 def close():
61 def close():
62 """Close the connection to this peer.
62 """Close the connection to this peer.
63
63
64 This is called when the peer will no longer be used. Resources
64 This is called when the peer will no longer be used. Resources
65 associated with the peer should be cleaned up.
65 associated with the peer should be cleaned up.
66 """
66 """
67
67
68 class ipeercapabilities(interfaceutil.Interface):
68 class ipeercapabilities(interfaceutil.Interface):
69 """Peer sub-interface related to capabilities."""
69 """Peer sub-interface related to capabilities."""
70
70
71 def capable(name):
71 def capable(name):
72 """Determine support for a named capability.
72 """Determine support for a named capability.
73
73
74 Returns ``False`` if capability not supported.
74 Returns ``False`` if capability not supported.
75
75
76 Returns ``True`` if boolean capability is supported. Returns a string
76 Returns ``True`` if boolean capability is supported. Returns a string
77 if capability support is non-boolean.
77 if capability support is non-boolean.
78
78
79 Capability strings may or may not map to wire protocol capabilities.
79 Capability strings may or may not map to wire protocol capabilities.
80 """
80 """
81
81
82 def requirecap(name, purpose):
82 def requirecap(name, purpose):
83 """Require a capability to be present.
83 """Require a capability to be present.
84
84
85 Raises a ``CapabilityError`` if the capability isn't present.
85 Raises a ``CapabilityError`` if the capability isn't present.
86 """
86 """
87
87
88 class ipeercommands(interfaceutil.Interface):
88 class ipeercommands(interfaceutil.Interface):
89 """Client-side interface for communicating over the wire protocol.
89 """Client-side interface for communicating over the wire protocol.
90
90
91 This interface is used as a gateway to the Mercurial wire protocol.
91 This interface is used as a gateway to the Mercurial wire protocol.
92 methods commonly call wire protocol commands of the same name.
92 methods commonly call wire protocol commands of the same name.
93 """
93 """
94
94
95 def branchmap():
95 def branchmap():
96 """Obtain heads in named branches.
96 """Obtain heads in named branches.
97
97
98 Returns a dict mapping branch name to an iterable of nodes that are
98 Returns a dict mapping branch name to an iterable of nodes that are
99 heads on that branch.
99 heads on that branch.
100 """
100 """
101
101
102 def capabilities():
102 def capabilities():
103 """Obtain capabilities of the peer.
103 """Obtain capabilities of the peer.
104
104
105 Returns a set of string capabilities.
105 Returns a set of string capabilities.
106 """
106 """
107
107
108 def clonebundles():
108 def clonebundles():
109 """Obtains the clone bundles manifest for the repo.
109 """Obtains the clone bundles manifest for the repo.
110
110
111 Returns the manifest as unparsed bytes.
111 Returns the manifest as unparsed bytes.
112 """
112 """
113
113
114 def debugwireargs(one, two, three=None, four=None, five=None):
114 def debugwireargs(one, two, three=None, four=None, five=None):
115 """Used to facilitate debugging of arguments passed over the wire."""
115 """Used to facilitate debugging of arguments passed over the wire."""
116
116
117 def getbundle(source, **kwargs):
117 def getbundle(source, **kwargs):
118 """Obtain remote repository data as a bundle.
118 """Obtain remote repository data as a bundle.
119
119
120 This command is how the bulk of repository data is transferred from
120 This command is how the bulk of repository data is transferred from
121 the peer to the local repository
121 the peer to the local repository
122
122
123 Returns a generator of bundle data.
123 Returns a generator of bundle data.
124 """
124 """
125
125
126 def heads():
126 def heads():
127 """Determine all known head revisions in the peer.
127 """Determine all known head revisions in the peer.
128
128
129 Returns an iterable of binary nodes.
129 Returns an iterable of binary nodes.
130 """
130 """
131
131
132 def known(nodes):
132 def known(nodes):
133 """Determine whether multiple nodes are known.
133 """Determine whether multiple nodes are known.
134
134
135 Accepts an iterable of nodes whose presence to check for.
135 Accepts an iterable of nodes whose presence to check for.
136
136
137 Returns an iterable of booleans indicating of the corresponding node
137 Returns an iterable of booleans indicating of the corresponding node
138 at that index is known to the peer.
138 at that index is known to the peer.
139 """
139 """
140
140
141 def listkeys(namespace):
141 def listkeys(namespace):
142 """Obtain all keys in a pushkey namespace.
142 """Obtain all keys in a pushkey namespace.
143
143
144 Returns an iterable of key names.
144 Returns an iterable of key names.
145 """
145 """
146
146
147 def lookup(key):
147 def lookup(key):
148 """Resolve a value to a known revision.
148 """Resolve a value to a known revision.
149
149
150 Returns a binary node of the resolved revision on success.
150 Returns a binary node of the resolved revision on success.
151 """
151 """
152
152
153 def pushkey(namespace, key, old, new):
153 def pushkey(namespace, key, old, new):
154 """Set a value using the ``pushkey`` protocol.
154 """Set a value using the ``pushkey`` protocol.
155
155
156 Arguments correspond to the pushkey namespace and key to operate on and
156 Arguments correspond to the pushkey namespace and key to operate on and
157 the old and new values for that key.
157 the old and new values for that key.
158
158
159 Returns a string with the peer result. The value inside varies by the
159 Returns a string with the peer result. The value inside varies by the
160 namespace.
160 namespace.
161 """
161 """
162
162
163 def stream_out():
163 def stream_out():
164 """Obtain streaming clone data.
164 """Obtain streaming clone data.
165
165
166 Successful result should be a generator of data chunks.
166 Successful result should be a generator of data chunks.
167 """
167 """
168
168
169 def unbundle(bundle, heads, url):
169 def unbundle(bundle, heads, url):
170 """Transfer repository data to the peer.
170 """Transfer repository data to the peer.
171
171
172 This is how the bulk of data during a push is transferred.
172 This is how the bulk of data during a push is transferred.
173
173
174 Returns the integer number of heads added to the peer.
174 Returns the integer number of heads added to the peer.
175 """
175 """
176
176
177 class ipeerlegacycommands(interfaceutil.Interface):
177 class ipeerlegacycommands(interfaceutil.Interface):
178 """Interface for implementing support for legacy wire protocol commands.
178 """Interface for implementing support for legacy wire protocol commands.
179
179
180 Wire protocol commands transition to legacy status when they are no longer
180 Wire protocol commands transition to legacy status when they are no longer
181 used by modern clients. To facilitate identifying which commands are
181 used by modern clients. To facilitate identifying which commands are
182 legacy, the interfaces are split.
182 legacy, the interfaces are split.
183 """
183 """
184
184
185 def between(pairs):
185 def between(pairs):
186 """Obtain nodes between pairs of nodes.
186 """Obtain nodes between pairs of nodes.
187
187
188 ``pairs`` is an iterable of node pairs.
188 ``pairs`` is an iterable of node pairs.
189
189
190 Returns an iterable of iterables of nodes corresponding to each
190 Returns an iterable of iterables of nodes corresponding to each
191 requested pair.
191 requested pair.
192 """
192 """
193
193
194 def branches(nodes):
194 def branches(nodes):
195 """Obtain ancestor changesets of specific nodes back to a branch point.
195 """Obtain ancestor changesets of specific nodes back to a branch point.
196
196
197 For each requested node, the peer finds the first ancestor node that is
197 For each requested node, the peer finds the first ancestor node that is
198 a DAG root or is a merge.
198 a DAG root or is a merge.
199
199
200 Returns an iterable of iterables with the resolved values for each node.
200 Returns an iterable of iterables with the resolved values for each node.
201 """
201 """
202
202
203 def changegroup(nodes, source):
203 def changegroup(nodes, source):
204 """Obtain a changegroup with data for descendants of specified nodes."""
204 """Obtain a changegroup with data for descendants of specified nodes."""
205
205
206 def changegroupsubset(bases, heads, source):
206 def changegroupsubset(bases, heads, source):
207 pass
207 pass
208
208
209 class ipeercommandexecutor(interfaceutil.Interface):
209 class ipeercommandexecutor(interfaceutil.Interface):
210 """Represents a mechanism to execute remote commands.
210 """Represents a mechanism to execute remote commands.
211
211
212 This is the primary interface for requesting that wire protocol commands
212 This is the primary interface for requesting that wire protocol commands
213 be executed. Instances of this interface are active in a context manager
213 be executed. Instances of this interface are active in a context manager
214 and have a well-defined lifetime. When the context manager exits, all
214 and have a well-defined lifetime. When the context manager exits, all
215 outstanding requests are waited on.
215 outstanding requests are waited on.
216 """
216 """
217
217
218 def callcommand(name, args):
218 def callcommand(name, args):
219 """Request that a named command be executed.
219 """Request that a named command be executed.
220
220
221 Receives the command name and a dictionary of command arguments.
221 Receives the command name and a dictionary of command arguments.
222
222
223 Returns a ``concurrent.futures.Future`` that will resolve to the
223 Returns a ``concurrent.futures.Future`` that will resolve to the
224 result of that command request. That exact value is left up to
224 result of that command request. That exact value is left up to
225 the implementation and possibly varies by command.
225 the implementation and possibly varies by command.
226
226
227 Not all commands can coexist with other commands in an executor
227 Not all commands can coexist with other commands in an executor
228 instance: it depends on the underlying wire protocol transport being
228 instance: it depends on the underlying wire protocol transport being
229 used and the command itself.
229 used and the command itself.
230
230
231 Implementations MAY call ``sendcommands()`` automatically if the
231 Implementations MAY call ``sendcommands()`` automatically if the
232 requested command can not coexist with other commands in this executor.
232 requested command can not coexist with other commands in this executor.
233
233
234 Implementations MAY call ``sendcommands()`` automatically when the
234 Implementations MAY call ``sendcommands()`` automatically when the
235 future's ``result()`` is called. So, consumers using multiple
235 future's ``result()`` is called. So, consumers using multiple
236 commands with an executor MUST ensure that ``result()`` is not called
236 commands with an executor MUST ensure that ``result()`` is not called
237 until all command requests have been issued.
237 until all command requests have been issued.
238 """
238 """
239
239
240 def sendcommands():
240 def sendcommands():
241 """Trigger submission of queued command requests.
241 """Trigger submission of queued command requests.
242
242
243 Not all transports submit commands as soon as they are requested to
243 Not all transports submit commands as soon as they are requested to
244 run. When called, this method forces queued command requests to be
244 run. When called, this method forces queued command requests to be
245 issued. It will no-op if all commands have already been sent.
245 issued. It will no-op if all commands have already been sent.
246
246
247 When called, no more new commands may be issued with this executor.
247 When called, no more new commands may be issued with this executor.
248 """
248 """
249
249
250 def close():
250 def close():
251 """Signal that this command request is finished.
251 """Signal that this command request is finished.
252
252
253 When called, no more new commands may be issued. All outstanding
253 When called, no more new commands may be issued. All outstanding
254 commands that have previously been issued are waited on before
254 commands that have previously been issued are waited on before
255 returning. This not only includes waiting for the futures to resolve,
255 returning. This not only includes waiting for the futures to resolve,
256 but also waiting for all response data to arrive. In other words,
256 but also waiting for all response data to arrive. In other words,
257 calling this waits for all on-wire state for issued command requests
257 calling this waits for all on-wire state for issued command requests
258 to finish.
258 to finish.
259
259
260 When used as a context manager, this method is called when exiting the
260 When used as a context manager, this method is called when exiting the
261 context manager.
261 context manager.
262
262
263 This method may call ``sendcommands()`` if there are buffered commands.
263 This method may call ``sendcommands()`` if there are buffered commands.
264 """
264 """
265
265
266 class ipeerrequests(interfaceutil.Interface):
266 class ipeerrequests(interfaceutil.Interface):
267 """Interface for executing commands on a peer."""
267 """Interface for executing commands on a peer."""
268
268
269 def commandexecutor():
269 def commandexecutor():
270 """A context manager that resolves to an ipeercommandexecutor.
270 """A context manager that resolves to an ipeercommandexecutor.
271
271
272 The object this resolves to can be used to issue command requests
272 The object this resolves to can be used to issue command requests
273 to the peer.
273 to the peer.
274
274
275 Callers should call its ``callcommand`` method to issue command
275 Callers should call its ``callcommand`` method to issue command
276 requests.
276 requests.
277
277
278 A new executor should be obtained for each distinct set of commands
278 A new executor should be obtained for each distinct set of commands
279 (possibly just a single command) that the consumer wants to execute
279 (possibly just a single command) that the consumer wants to execute
280 as part of a single operation or round trip. This is because some
280 as part of a single operation or round trip. This is because some
281 peers are half-duplex and/or don't support persistent connections.
281 peers are half-duplex and/or don't support persistent connections.
282 e.g. in the case of HTTP peers, commands sent to an executor represent
282 e.g. in the case of HTTP peers, commands sent to an executor represent
283 a single HTTP request. While some peers may support multiple command
283 a single HTTP request. While some peers may support multiple command
284 sends over the wire per executor, consumers need to code to the least
284 sends over the wire per executor, consumers need to code to the least
285 capable peer. So it should be assumed that command executors buffer
285 capable peer. So it should be assumed that command executors buffer
286 called commands until they are told to send them and that each
286 called commands until they are told to send them and that each
287 command executor could result in a new connection or wire-level request
287 command executor could result in a new connection or wire-level request
288 being issued.
288 being issued.
289 """
289 """
290
290
291 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
291 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
292 """Unified interface for peer repositories.
292 """Unified interface for peer repositories.
293
293
294 All peer instances must conform to this interface.
294 All peer instances must conform to this interface.
295 """
295 """
296
296
297 @interfaceutil.implementer(ipeerbase)
297 @interfaceutil.implementer(ipeerbase)
298 class peer(object):
298 class peer(object):
299 """Base class for peer repositories."""
299 """Base class for peer repositories."""
300
300
301 def capable(self, name):
301 def capable(self, name):
302 caps = self.capabilities()
302 caps = self.capabilities()
303 if name in caps:
303 if name in caps:
304 return True
304 return True
305
305
306 name = '%s=' % name
306 name = '%s=' % name
307 for cap in caps:
307 for cap in caps:
308 if cap.startswith(name):
308 if cap.startswith(name):
309 return cap[len(name):]
309 return cap[len(name):]
310
310
311 return False
311 return False
312
312
313 def requirecap(self, name, purpose):
313 def requirecap(self, name, purpose):
314 if self.capable(name):
314 if self.capable(name):
315 return
315 return
316
316
317 raise error.CapabilityError(
317 raise error.CapabilityError(
318 _('cannot %s; remote repository does not support the %r '
318 _('cannot %s; remote repository does not support the %r '
319 'capability') % (purpose, name))
319 'capability') % (purpose, name))
320
320
321 class irevisiondelta(interfaceutil.Interface):
321 class irevisiondelta(interfaceutil.Interface):
322 """Represents a delta between one revision and another.
322 """Represents a delta between one revision and another.
323
323
324 Instances convey enough information to allow a revision to be exchanged
324 Instances convey enough information to allow a revision to be exchanged
325 with another repository.
325 with another repository.
326
326
327 Instances represent the fulltext revision data or a delta against
327 Instances represent the fulltext revision data or a delta against
328 another revision. Therefore the ``revision`` and ``delta`` attributes
328 another revision. Therefore the ``revision`` and ``delta`` attributes
329 are mutually exclusive.
329 are mutually exclusive.
330
330
331 Typically used for changegroup generation.
331 Typically used for changegroup generation.
332 """
332 """
333
333
334 node = interfaceutil.Attribute(
334 node = interfaceutil.Attribute(
335 """20 byte node of this revision.""")
335 """20 byte node of this revision.""")
336
336
337 p1node = interfaceutil.Attribute(
337 p1node = interfaceutil.Attribute(
338 """20 byte node of 1st parent of this revision.""")
338 """20 byte node of 1st parent of this revision.""")
339
339
340 p2node = interfaceutil.Attribute(
340 p2node = interfaceutil.Attribute(
341 """20 byte node of 2nd parent of this revision.""")
341 """20 byte node of 2nd parent of this revision.""")
342
342
343 linknode = interfaceutil.Attribute(
343 linknode = interfaceutil.Attribute(
344 """20 byte node of the changelog revision this node is linked to.""")
344 """20 byte node of the changelog revision this node is linked to.""")
345
345
346 flags = interfaceutil.Attribute(
346 flags = interfaceutil.Attribute(
347 """2 bytes of integer flags that apply to this revision.""")
347 """2 bytes of integer flags that apply to this revision.""")
348
348
349 basenode = interfaceutil.Attribute(
349 basenode = interfaceutil.Attribute(
350 """20 byte node of the revision this data is a delta against.
350 """20 byte node of the revision this data is a delta against.
351
351
352 ``nullid`` indicates that the revision is a full revision and not
352 ``nullid`` indicates that the revision is a full revision and not
353 a delta.
353 a delta.
354 """)
354 """)
355
355
356 baserevisionsize = interfaceutil.Attribute(
356 baserevisionsize = interfaceutil.Attribute(
357 """Size of base revision this delta is against.
357 """Size of base revision this delta is against.
358
358
359 May be ``None`` if ``basenode`` is ``nullid``.
359 May be ``None`` if ``basenode`` is ``nullid``.
360 """)
360 """)
361
361
362 revision = interfaceutil.Attribute(
362 revision = interfaceutil.Attribute(
363 """Raw fulltext of revision data for this node.""")
363 """Raw fulltext of revision data for this node.""")
364
364
365 delta = interfaceutil.Attribute(
365 delta = interfaceutil.Attribute(
366 """Delta between ``basenode`` and ``node``.
366 """Delta between ``basenode`` and ``node``.
367
367
368 Stored in the bdiff delta format.
368 Stored in the bdiff delta format.
369 """)
369 """)
370
370
371 class irevisiondeltarequest(interfaceutil.Interface):
371 class irevisiondeltarequest(interfaceutil.Interface):
372 """Represents a request to generate an ``irevisiondelta``."""
372 """Represents a request to generate an ``irevisiondelta``."""
373
373
374 node = interfaceutil.Attribute(
374 node = interfaceutil.Attribute(
375 """20 byte node of revision being requested.""")
375 """20 byte node of revision being requested.""")
376
376
377 p1node = interfaceutil.Attribute(
377 p1node = interfaceutil.Attribute(
378 """20 byte node of 1st parent of revision.""")
378 """20 byte node of 1st parent of revision.""")
379
379
380 p2node = interfaceutil.Attribute(
380 p2node = interfaceutil.Attribute(
381 """20 byte node of 2nd parent of revision.""")
381 """20 byte node of 2nd parent of revision.""")
382
382
383 linknode = interfaceutil.Attribute(
383 linknode = interfaceutil.Attribute(
384 """20 byte node to store in ``linknode`` attribute.""")
384 """20 byte node to store in ``linknode`` attribute.""")
385
385
386 basenode = interfaceutil.Attribute(
386 basenode = interfaceutil.Attribute(
387 """Base revision that delta should be generated against.
387 """Base revision that delta should be generated against.
388
388
389 If ``nullid``, the derived ``irevisiondelta`` should have its
389 If ``nullid``, the derived ``irevisiondelta`` should have its
390 ``revision`` field populated and no delta should be generated.
390 ``revision`` field populated and no delta should be generated.
391
391
392 If ``None``, the delta may be generated against any revision that
392 If ``None``, the delta may be generated against any revision that
393 is an ancestor of this revision. Or a full revision may be used.
393 is an ancestor of this revision. Or a full revision may be used.
394
394
395 If any other value, the delta should be produced against that
395 If any other value, the delta should be produced against that
396 revision.
396 revision.
397 """)
397 """)
398
398
399 ellipsis = interfaceutil.Attribute(
399 ellipsis = interfaceutil.Attribute(
400 """Boolean on whether the ellipsis flag should be set.""")
400 """Boolean on whether the ellipsis flag should be set.""")
401
401
402 class ifilerevisionssequence(interfaceutil.Interface):
402 class ifilerevisionssequence(interfaceutil.Interface):
403 """Contains index data for all revisions of a file.
403 """Contains index data for all revisions of a file.
404
404
405 Types implementing this behave like lists of tuples. The index
405 Types implementing this behave like lists of tuples. The index
406 in the list corresponds to the revision number. The values contain
406 in the list corresponds to the revision number. The values contain
407 index metadata.
407 index metadata.
408
408
409 The *null* revision (revision number -1) is always the last item
409 The *null* revision (revision number -1) is always the last item
410 in the index.
410 in the index.
411 """
411 """
412
412
413 def __len__():
413 def __len__():
414 """The total number of revisions."""
414 """The total number of revisions."""
415
415
416 def __getitem__(rev):
416 def __getitem__(rev):
417 """Returns the object having a specific revision number.
417 """Returns the object having a specific revision number.
418
418
419 Returns an 8-tuple with the following fields:
419 Returns an 8-tuple with the following fields:
420
420
421 offset+flags
421 offset+flags
422 Contains the offset and flags for the revision. 64-bit unsigned
422 Contains the offset and flags for the revision. 64-bit unsigned
423 integer where first 6 bytes are the offset and the next 2 bytes
423 integer where first 6 bytes are the offset and the next 2 bytes
424 are flags. The offset can be 0 if it is not used by the store.
424 are flags. The offset can be 0 if it is not used by the store.
425 compressed size
425 compressed size
426 Size of the revision data in the store. It can be 0 if it isn't
426 Size of the revision data in the store. It can be 0 if it isn't
427 needed by the store.
427 needed by the store.
428 uncompressed size
428 uncompressed size
429 Fulltext size. It can be 0 if it isn't needed by the store.
429 Fulltext size. It can be 0 if it isn't needed by the store.
430 base revision
430 base revision
431 Revision number of revision the delta for storage is encoded
431 Revision number of revision the delta for storage is encoded
432 against. -1 indicates not encoded against a base revision.
432 against. -1 indicates not encoded against a base revision.
433 link revision
433 link revision
434 Revision number of changelog revision this entry is related to.
434 Revision number of changelog revision this entry is related to.
435 p1 revision
435 p1 revision
436 Revision number of 1st parent. -1 if no 1st parent.
436 Revision number of 1st parent. -1 if no 1st parent.
437 p2 revision
437 p2 revision
438 Revision number of 2nd parent. -1 if no 1st parent.
438 Revision number of 2nd parent. -1 if no 1st parent.
439 node
439 node
440 Binary node value for this revision number.
440 Binary node value for this revision number.
441
441
442 Negative values should index off the end of the sequence. ``-1``
442 Negative values should index off the end of the sequence. ``-1``
443 should return the null revision. ``-2`` should return the most
443 should return the null revision. ``-2`` should return the most
444 recent revision.
444 recent revision.
445 """
445 """
446
446
447 def __contains__(rev):
447 def __contains__(rev):
448 """Whether a revision number exists."""
448 """Whether a revision number exists."""
449
449
450 def insert(self, i, entry):
450 def insert(self, i, entry):
451 """Add an item to the index at specific revision."""
451 """Add an item to the index at specific revision."""
452
452
453 class ifileindex(interfaceutil.Interface):
453 class ifileindex(interfaceutil.Interface):
454 """Storage interface for index data of a single file.
454 """Storage interface for index data of a single file.
455
455
456 File storage data is divided into index metadata and data storage.
456 File storage data is divided into index metadata and data storage.
457 This interface defines the index portion of the interface.
457 This interface defines the index portion of the interface.
458
458
459 The index logically consists of:
459 The index logically consists of:
460
460
461 * A mapping between revision numbers and nodes.
461 * A mapping between revision numbers and nodes.
462 * DAG data (storing and querying the relationship between nodes).
462 * DAG data (storing and querying the relationship between nodes).
463 * Metadata to facilitate storage.
463 * Metadata to facilitate storage.
464 """
464 """
465 index = interfaceutil.Attribute(
465 index = interfaceutil.Attribute(
466 """An ``ifilerevisionssequence`` instance.""")
466 """An ``ifilerevisionssequence`` instance.""")
467
467
468 def __len__():
468 def __len__():
469 """Obtain the number of revisions stored for this file."""
469 """Obtain the number of revisions stored for this file."""
470
470
471 def __iter__():
471 def __iter__():
472 """Iterate over revision numbers for this file."""
472 """Iterate over revision numbers for this file."""
473
473
474 def revs(start=0, stop=None):
474 def revs(start=0, stop=None):
475 """Iterate over revision numbers for this file, with control."""
475 """Iterate over revision numbers for this file, with control."""
476
476
477 def parents(node):
477 def parents(node):
478 """Returns a 2-tuple of parent nodes for a revision.
478 """Returns a 2-tuple of parent nodes for a revision.
479
479
480 Values will be ``nullid`` if the parent is empty.
480 Values will be ``nullid`` if the parent is empty.
481 """
481 """
482
482
483 def parentrevs(rev):
483 def parentrevs(rev):
484 """Like parents() but operates on revision numbers."""
484 """Like parents() but operates on revision numbers."""
485
485
486 def rev(node):
486 def rev(node):
487 """Obtain the revision number given a node.
487 """Obtain the revision number given a node.
488
488
489 Raises ``error.LookupError`` if the node is not known.
489 Raises ``error.LookupError`` if the node is not known.
490 """
490 """
491
491
492 def node(rev):
492 def node(rev):
493 """Obtain the node value given a revision number.
493 """Obtain the node value given a revision number.
494
494
495 Raises ``IndexError`` if the node is not known.
495 Raises ``IndexError`` if the node is not known.
496 """
496 """
497
497
498 def lookup(node):
498 def lookup(node):
499 """Attempt to resolve a value to a node.
499 """Attempt to resolve a value to a node.
500
500
501 Value can be a binary node, hex node, revision number, or a string
501 Value can be a binary node, hex node, revision number, or a string
502 that can be converted to an integer.
502 that can be converted to an integer.
503
503
504 Raises ``error.LookupError`` if a node could not be resolved.
504 Raises ``error.LookupError`` if a node could not be resolved.
505 """
505 """
506
506
507 def linkrev(rev):
507 def linkrev(rev):
508 """Obtain the changeset revision number a revision is linked to."""
508 """Obtain the changeset revision number a revision is linked to."""
509
509
510 def flags(rev):
510 def flags(rev):
511 """Obtain flags used to affect storage of a revision."""
511 """Obtain flags used to affect storage of a revision."""
512
512
513 def iscensored(rev):
513 def iscensored(rev):
514 """Return whether a revision's content has been censored."""
514 """Return whether a revision's content has been censored."""
515
515
516 def commonancestorsheads(node1, node2):
516 def commonancestorsheads(node1, node2):
517 """Obtain an iterable of nodes containing heads of common ancestors.
517 """Obtain an iterable of nodes containing heads of common ancestors.
518
518
519 See ``ancestor.commonancestorsheads()``.
519 See ``ancestor.commonancestorsheads()``.
520 """
520 """
521
521
522 def descendants(revs):
522 def descendants(revs):
523 """Obtain descendant revision numbers for a set of revision numbers.
523 """Obtain descendant revision numbers for a set of revision numbers.
524
524
525 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
525 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
526 """
526 """
527
527
528 def headrevs():
528 def headrevs():
529 """Obtain a list of revision numbers that are DAG heads.
529 """Obtain a list of revision numbers that are DAG heads.
530
530
531 The list is sorted oldest to newest.
531 The list is sorted oldest to newest.
532
532
533 TODO determine if sorting is required.
533 TODO determine if sorting is required.
534 """
534 """
535
535
536 def heads(start=None, stop=None):
536 def heads(start=None, stop=None):
537 """Obtain a list of nodes that are DAG heads, with control.
537 """Obtain a list of nodes that are DAG heads, with control.
538
538
539 The set of revisions examined can be limited by specifying
539 The set of revisions examined can be limited by specifying
540 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
540 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
541 iterable of nodes. DAG traversal starts at earlier revision
541 iterable of nodes. DAG traversal starts at earlier revision
542 ``start`` and iterates forward until any node in ``stop`` is
542 ``start`` and iterates forward until any node in ``stop`` is
543 encountered.
543 encountered.
544 """
544 """
545
545
546 def children(node):
546 def children(node):
547 """Obtain nodes that are children of a node.
547 """Obtain nodes that are children of a node.
548
548
549 Returns a list of nodes.
549 Returns a list of nodes.
550 """
550 """
551
551
552 def deltaparent(rev):
552 def deltaparent(rev):
553 """"Return the revision that is a suitable parent to delta against."""
553 """"Return the revision that is a suitable parent to delta against."""
554
554
555 class ifiledata(interfaceutil.Interface):
555 class ifiledata(interfaceutil.Interface):
556 """Storage interface for data storage of a specific file.
556 """Storage interface for data storage of a specific file.
557
557
558 This complements ``ifileindex`` and provides an interface for accessing
558 This complements ``ifileindex`` and provides an interface for accessing
559 data for a tracked file.
559 data for a tracked file.
560 """
560 """
561 def rawsize(rev):
561 def rawsize(rev):
562 """The size of the fulltext data for a revision as stored."""
562 """The size of the fulltext data for a revision as stored."""
563
563
564 def size(rev):
564 def size(rev):
565 """Obtain the fulltext size of file data.
565 """Obtain the fulltext size of file data.
566
566
567 Any metadata is excluded from size measurements. Use ``rawsize()`` if
567 Any metadata is excluded from size measurements. Use ``rawsize()`` if
568 metadata size is important.
568 metadata size is important.
569 """
569 """
570
570
571 def checkhash(fulltext, node, p1=None, p2=None, rev=None):
571 def checkhash(fulltext, node, p1=None, p2=None, rev=None):
572 """Validate the stored hash of a given fulltext and node.
572 """Validate the stored hash of a given fulltext and node.
573
573
574 Raises ``error.RevlogError`` is hash validation fails.
574 Raises ``error.StorageError`` is hash validation fails.
575 """
575 """
576
576
577 def revision(node, raw=False):
577 def revision(node, raw=False):
578 """"Obtain fulltext data for a node.
578 """"Obtain fulltext data for a node.
579
579
580 By default, any storage transformations are applied before the data
580 By default, any storage transformations are applied before the data
581 is returned. If ``raw`` is True, non-raw storage transformations
581 is returned. If ``raw`` is True, non-raw storage transformations
582 are not applied.
582 are not applied.
583
583
584 The fulltext data may contain a header containing metadata. Most
584 The fulltext data may contain a header containing metadata. Most
585 consumers should use ``read()`` to obtain the actual file data.
585 consumers should use ``read()`` to obtain the actual file data.
586 """
586 """
587
587
588 def read(node):
588 def read(node):
589 """Resolve file fulltext data.
589 """Resolve file fulltext data.
590
590
591 This is similar to ``revision()`` except any metadata in the data
591 This is similar to ``revision()`` except any metadata in the data
592 headers is stripped.
592 headers is stripped.
593 """
593 """
594
594
595 def renamed(node):
595 def renamed(node):
596 """Obtain copy metadata for a node.
596 """Obtain copy metadata for a node.
597
597
598 Returns ``False`` if no copy metadata is stored or a 2-tuple of
598 Returns ``False`` if no copy metadata is stored or a 2-tuple of
599 (path, node) from which this revision was copied.
599 (path, node) from which this revision was copied.
600 """
600 """
601
601
602 def cmp(node, fulltext):
602 def cmp(node, fulltext):
603 """Compare fulltext to another revision.
603 """Compare fulltext to another revision.
604
604
605 Returns True if the fulltext is different from what is stored.
605 Returns True if the fulltext is different from what is stored.
606
606
607 This takes copy metadata into account.
607 This takes copy metadata into account.
608
608
609 TODO better document the copy metadata and censoring logic.
609 TODO better document the copy metadata and censoring logic.
610 """
610 """
611
611
612 def revdiff(rev1, rev2):
612 def revdiff(rev1, rev2):
613 """Obtain a delta between two revision numbers.
613 """Obtain a delta between two revision numbers.
614
614
615 Operates on raw data in the store (``revision(node, raw=True)``).
615 Operates on raw data in the store (``revision(node, raw=True)``).
616
616
617 The returned data is the result of ``bdiff.bdiff`` on the raw
617 The returned data is the result of ``bdiff.bdiff`` on the raw
618 revision data.
618 revision data.
619 """
619 """
620
620
621 def emitrevisiondeltas(requests):
621 def emitrevisiondeltas(requests):
622 """Produce ``irevisiondelta`` from ``irevisiondeltarequest``s.
622 """Produce ``irevisiondelta`` from ``irevisiondeltarequest``s.
623
623
624 Given an iterable of objects conforming to the ``irevisiondeltarequest``
624 Given an iterable of objects conforming to the ``irevisiondeltarequest``
625 interface, emits objects conforming to the ``irevisiondelta``
625 interface, emits objects conforming to the ``irevisiondelta``
626 interface.
626 interface.
627
627
628 This method is a generator.
628 This method is a generator.
629
629
630 ``irevisiondelta`` should be emitted in the same order of
630 ``irevisiondelta`` should be emitted in the same order of
631 ``irevisiondeltarequest`` that was passed in.
631 ``irevisiondeltarequest`` that was passed in.
632
632
633 The emitted objects MUST conform by the results of
633 The emitted objects MUST conform by the results of
634 ``irevisiondeltarequest``. Namely, they must respect any requests
634 ``irevisiondeltarequest``. Namely, they must respect any requests
635 for building a delta from a specific ``basenode`` if defined.
635 for building a delta from a specific ``basenode`` if defined.
636
636
637 When sending deltas, implementations must take into account whether
637 When sending deltas, implementations must take into account whether
638 the client has the base delta before encoding a delta against that
638 the client has the base delta before encoding a delta against that
639 revision. A revision encountered previously in ``requests`` is
639 revision. A revision encountered previously in ``requests`` is
640 always a suitable base revision. An example of a bad delta is a delta
640 always a suitable base revision. An example of a bad delta is a delta
641 against a non-ancestor revision. Another example of a bad delta is a
641 against a non-ancestor revision. Another example of a bad delta is a
642 delta against a censored revision.
642 delta against a censored revision.
643 """
643 """
644
644
645 class ifilemutation(interfaceutil.Interface):
645 class ifilemutation(interfaceutil.Interface):
646 """Storage interface for mutation events of a tracked file."""
646 """Storage interface for mutation events of a tracked file."""
647
647
648 def add(filedata, meta, transaction, linkrev, p1, p2):
648 def add(filedata, meta, transaction, linkrev, p1, p2):
649 """Add a new revision to the store.
649 """Add a new revision to the store.
650
650
651 Takes file data, dictionary of metadata, a transaction, linkrev,
651 Takes file data, dictionary of metadata, a transaction, linkrev,
652 and parent nodes.
652 and parent nodes.
653
653
654 Returns the node that was added.
654 Returns the node that was added.
655
655
656 May no-op if a revision matching the supplied data is already stored.
656 May no-op if a revision matching the supplied data is already stored.
657 """
657 """
658
658
659 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
659 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
660 flags=0, cachedelta=None):
660 flags=0, cachedelta=None):
661 """Add a new revision to the store.
661 """Add a new revision to the store.
662
662
663 This is similar to ``add()`` except it operates at a lower level.
663 This is similar to ``add()`` except it operates at a lower level.
664
664
665 The data passed in already contains a metadata header, if any.
665 The data passed in already contains a metadata header, if any.
666
666
667 ``node`` and ``flags`` can be used to define the expected node and
667 ``node`` and ``flags`` can be used to define the expected node and
668 the flags to use with storage.
668 the flags to use with storage.
669
669
670 ``add()`` is usually called when adding files from e.g. the working
670 ``add()`` is usually called when adding files from e.g. the working
671 directory. ``addrevision()`` is often called by ``add()`` and for
671 directory. ``addrevision()`` is often called by ``add()`` and for
672 scenarios where revision data has already been computed, such as when
672 scenarios where revision data has already been computed, such as when
673 applying raw data from a peer repo.
673 applying raw data from a peer repo.
674 """
674 """
675
675
676 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
676 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
677 """Process a series of deltas for storage.
677 """Process a series of deltas for storage.
678
678
679 ``deltas`` is an iterable of 7-tuples of
679 ``deltas`` is an iterable of 7-tuples of
680 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
680 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
681 to add.
681 to add.
682
682
683 The ``delta`` field contains ``mpatch`` data to apply to a base
683 The ``delta`` field contains ``mpatch`` data to apply to a base
684 revision, identified by ``deltabase``. The base node can be
684 revision, identified by ``deltabase``. The base node can be
685 ``nullid``, in which case the header from the delta can be ignored
685 ``nullid``, in which case the header from the delta can be ignored
686 and the delta used as the fulltext.
686 and the delta used as the fulltext.
687
687
688 ``addrevisioncb`` should be called for each node as it is committed.
688 ``addrevisioncb`` should be called for each node as it is committed.
689
689
690 Returns a list of nodes that were processed. A node will be in the list
690 Returns a list of nodes that were processed. A node will be in the list
691 even if it existed in the store previously.
691 even if it existed in the store previously.
692 """
692 """
693
693
694 def getstrippoint(minlink):
694 def getstrippoint(minlink):
695 """Find the minimum revision that must be stripped to strip a linkrev.
695 """Find the minimum revision that must be stripped to strip a linkrev.
696
696
697 Returns a 2-tuple containing the minimum revision number and a set
697 Returns a 2-tuple containing the minimum revision number and a set
698 of all revisions numbers that would be broken by this strip.
698 of all revisions numbers that would be broken by this strip.
699
699
700 TODO this is highly revlog centric and should be abstracted into
700 TODO this is highly revlog centric and should be abstracted into
701 a higher-level deletion API. ``repair.strip()`` relies on this.
701 a higher-level deletion API. ``repair.strip()`` relies on this.
702 """
702 """
703
703
704 def strip(minlink, transaction):
704 def strip(minlink, transaction):
705 """Remove storage of items starting at a linkrev.
705 """Remove storage of items starting at a linkrev.
706
706
707 This uses ``getstrippoint()`` to determine the first node to remove.
707 This uses ``getstrippoint()`` to determine the first node to remove.
708 Then it effectively truncates storage for all revisions after that.
708 Then it effectively truncates storage for all revisions after that.
709
709
710 TODO this is highly revlog centric and should be abstracted into a
710 TODO this is highly revlog centric and should be abstracted into a
711 higher-level deletion API.
711 higher-level deletion API.
712 """
712 """
713
713
714 class ifilestorage(ifileindex, ifiledata, ifilemutation):
714 class ifilestorage(ifileindex, ifiledata, ifilemutation):
715 """Complete storage interface for a single tracked file."""
715 """Complete storage interface for a single tracked file."""
716
716
717 version = interfaceutil.Attribute(
717 version = interfaceutil.Attribute(
718 """Version number of storage.
718 """Version number of storage.
719
719
720 TODO this feels revlog centric and could likely be removed.
720 TODO this feels revlog centric and could likely be removed.
721 """)
721 """)
722
722
723 _generaldelta = interfaceutil.Attribute(
723 _generaldelta = interfaceutil.Attribute(
724 """Whether deltas can be against any parent revision.
724 """Whether deltas can be against any parent revision.
725
725
726 TODO this is used by changegroup code and it could probably be
726 TODO this is used by changegroup code and it could probably be
727 folded into another API.
727 folded into another API.
728 """)
728 """)
729
729
730 def files():
730 def files():
731 """Obtain paths that are backing storage for this file.
731 """Obtain paths that are backing storage for this file.
732
732
733 TODO this is used heavily by verify code and there should probably
733 TODO this is used heavily by verify code and there should probably
734 be a better API for that.
734 be a better API for that.
735 """
735 """
736
736
737 def checksize():
737 def checksize():
738 """Obtain the expected sizes of backing files.
738 """Obtain the expected sizes of backing files.
739
739
740 TODO this is used by verify and it should not be part of the interface.
740 TODO this is used by verify and it should not be part of the interface.
741 """
741 """
742
742
743 class idirs(interfaceutil.Interface):
743 class idirs(interfaceutil.Interface):
744 """Interface representing a collection of directories from paths.
744 """Interface representing a collection of directories from paths.
745
745
746 This interface is essentially a derived data structure representing
746 This interface is essentially a derived data structure representing
747 directories from a collection of paths.
747 directories from a collection of paths.
748 """
748 """
749
749
750 def addpath(path):
750 def addpath(path):
751 """Add a path to the collection.
751 """Add a path to the collection.
752
752
753 All directories in the path will be added to the collection.
753 All directories in the path will be added to the collection.
754 """
754 """
755
755
756 def delpath(path):
756 def delpath(path):
757 """Remove a path from the collection.
757 """Remove a path from the collection.
758
758
759 If the removal was the last path in a particular directory, the
759 If the removal was the last path in a particular directory, the
760 directory is removed from the collection.
760 directory is removed from the collection.
761 """
761 """
762
762
763 def __iter__():
763 def __iter__():
764 """Iterate over the directories in this collection of paths."""
764 """Iterate over the directories in this collection of paths."""
765
765
766 def __contains__(path):
766 def __contains__(path):
767 """Whether a specific directory is in this collection."""
767 """Whether a specific directory is in this collection."""
768
768
769 class imanifestdict(interfaceutil.Interface):
769 class imanifestdict(interfaceutil.Interface):
770 """Interface representing a manifest data structure.
770 """Interface representing a manifest data structure.
771
771
772 A manifest is effectively a dict mapping paths to entries. Each entry
772 A manifest is effectively a dict mapping paths to entries. Each entry
773 consists of a binary node and extra flags affecting that entry.
773 consists of a binary node and extra flags affecting that entry.
774 """
774 """
775
775
776 def __getitem__(path):
776 def __getitem__(path):
777 """Returns the binary node value for a path in the manifest.
777 """Returns the binary node value for a path in the manifest.
778
778
779 Raises ``KeyError`` if the path does not exist in the manifest.
779 Raises ``KeyError`` if the path does not exist in the manifest.
780
780
781 Equivalent to ``self.find(path)[0]``.
781 Equivalent to ``self.find(path)[0]``.
782 """
782 """
783
783
784 def find(path):
784 def find(path):
785 """Returns the entry for a path in the manifest.
785 """Returns the entry for a path in the manifest.
786
786
787 Returns a 2-tuple of (node, flags).
787 Returns a 2-tuple of (node, flags).
788
788
789 Raises ``KeyError`` if the path does not exist in the manifest.
789 Raises ``KeyError`` if the path does not exist in the manifest.
790 """
790 """
791
791
792 def __len__():
792 def __len__():
793 """Return the number of entries in the manifest."""
793 """Return the number of entries in the manifest."""
794
794
795 def __nonzero__():
795 def __nonzero__():
796 """Returns True if the manifest has entries, False otherwise."""
796 """Returns True if the manifest has entries, False otherwise."""
797
797
798 __bool__ = __nonzero__
798 __bool__ = __nonzero__
799
799
800 def __setitem__(path, node):
800 def __setitem__(path, node):
801 """Define the node value for a path in the manifest.
801 """Define the node value for a path in the manifest.
802
802
803 If the path is already in the manifest, its flags will be copied to
803 If the path is already in the manifest, its flags will be copied to
804 the new entry.
804 the new entry.
805 """
805 """
806
806
807 def __contains__(path):
807 def __contains__(path):
808 """Whether a path exists in the manifest."""
808 """Whether a path exists in the manifest."""
809
809
810 def __delitem__(path):
810 def __delitem__(path):
811 """Remove a path from the manifest.
811 """Remove a path from the manifest.
812
812
813 Raises ``KeyError`` if the path is not in the manifest.
813 Raises ``KeyError`` if the path is not in the manifest.
814 """
814 """
815
815
816 def __iter__():
816 def __iter__():
817 """Iterate over paths in the manifest."""
817 """Iterate over paths in the manifest."""
818
818
819 def iterkeys():
819 def iterkeys():
820 """Iterate over paths in the manifest."""
820 """Iterate over paths in the manifest."""
821
821
822 def keys():
822 def keys():
823 """Obtain a list of paths in the manifest."""
823 """Obtain a list of paths in the manifest."""
824
824
825 def filesnotin(other, match=None):
825 def filesnotin(other, match=None):
826 """Obtain the set of paths in this manifest but not in another.
826 """Obtain the set of paths in this manifest but not in another.
827
827
828 ``match`` is an optional matcher function to be applied to both
828 ``match`` is an optional matcher function to be applied to both
829 manifests.
829 manifests.
830
830
831 Returns a set of paths.
831 Returns a set of paths.
832 """
832 """
833
833
834 def dirs():
834 def dirs():
835 """Returns an object implementing the ``idirs`` interface."""
835 """Returns an object implementing the ``idirs`` interface."""
836
836
837 def hasdir(dir):
837 def hasdir(dir):
838 """Returns a bool indicating if a directory is in this manifest."""
838 """Returns a bool indicating if a directory is in this manifest."""
839
839
840 def matches(match):
840 def matches(match):
841 """Generate a new manifest filtered through a matcher.
841 """Generate a new manifest filtered through a matcher.
842
842
843 Returns an object conforming to the ``imanifestdict`` interface.
843 Returns an object conforming to the ``imanifestdict`` interface.
844 """
844 """
845
845
846 def walk(match):
846 def walk(match):
847 """Generator of paths in manifest satisfying a matcher.
847 """Generator of paths in manifest satisfying a matcher.
848
848
849 This is equivalent to ``self.matches(match).iterkeys()`` except a new
849 This is equivalent to ``self.matches(match).iterkeys()`` except a new
850 manifest object is not created.
850 manifest object is not created.
851
851
852 If the matcher has explicit files listed and they don't exist in
852 If the matcher has explicit files listed and they don't exist in
853 the manifest, ``match.bad()`` is called for each missing file.
853 the manifest, ``match.bad()`` is called for each missing file.
854 """
854 """
855
855
856 def diff(other, match=None, clean=False):
856 def diff(other, match=None, clean=False):
857 """Find differences between this manifest and another.
857 """Find differences between this manifest and another.
858
858
859 This manifest is compared to ``other``.
859 This manifest is compared to ``other``.
860
860
861 If ``match`` is provided, the two manifests are filtered against this
861 If ``match`` is provided, the two manifests are filtered against this
862 matcher and only entries satisfying the matcher are compared.
862 matcher and only entries satisfying the matcher are compared.
863
863
864 If ``clean`` is True, unchanged files are included in the returned
864 If ``clean`` is True, unchanged files are included in the returned
865 object.
865 object.
866
866
867 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
867 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
868 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
868 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
869 represents the node and flags for this manifest and ``(node2, flag2)``
869 represents the node and flags for this manifest and ``(node2, flag2)``
870 are the same for the other manifest.
870 are the same for the other manifest.
871 """
871 """
872
872
873 def setflag(path, flag):
873 def setflag(path, flag):
874 """Set the flag value for a given path.
874 """Set the flag value for a given path.
875
875
876 Raises ``KeyError`` if the path is not already in the manifest.
876 Raises ``KeyError`` if the path is not already in the manifest.
877 """
877 """
878
878
879 def get(path, default=None):
879 def get(path, default=None):
880 """Obtain the node value for a path or a default value if missing."""
880 """Obtain the node value for a path or a default value if missing."""
881
881
882 def flags(path, default=''):
882 def flags(path, default=''):
883 """Return the flags value for a path or a default value if missing."""
883 """Return the flags value for a path or a default value if missing."""
884
884
885 def copy():
885 def copy():
886 """Return a copy of this manifest."""
886 """Return a copy of this manifest."""
887
887
888 def items():
888 def items():
889 """Returns an iterable of (path, node) for items in this manifest."""
889 """Returns an iterable of (path, node) for items in this manifest."""
890
890
891 def iteritems():
891 def iteritems():
892 """Identical to items()."""
892 """Identical to items()."""
893
893
894 def iterentries():
894 def iterentries():
895 """Returns an iterable of (path, node, flags) for this manifest.
895 """Returns an iterable of (path, node, flags) for this manifest.
896
896
897 Similar to ``iteritems()`` except items are a 3-tuple and include
897 Similar to ``iteritems()`` except items are a 3-tuple and include
898 flags.
898 flags.
899 """
899 """
900
900
901 def text():
901 def text():
902 """Obtain the raw data representation for this manifest.
902 """Obtain the raw data representation for this manifest.
903
903
904 Result is used to create a manifest revision.
904 Result is used to create a manifest revision.
905 """
905 """
906
906
907 def fastdelta(base, changes):
907 def fastdelta(base, changes):
908 """Obtain a delta between this manifest and another given changes.
908 """Obtain a delta between this manifest and another given changes.
909
909
910 ``base`` in the raw data representation for another manifest.
910 ``base`` in the raw data representation for another manifest.
911
911
912 ``changes`` is an iterable of ``(path, to_delete)``.
912 ``changes`` is an iterable of ``(path, to_delete)``.
913
913
914 Returns a 2-tuple containing ``bytearray(self.text())`` and the
914 Returns a 2-tuple containing ``bytearray(self.text())`` and the
915 delta between ``base`` and this manifest.
915 delta between ``base`` and this manifest.
916 """
916 """
917
917
918 class imanifestrevisionbase(interfaceutil.Interface):
918 class imanifestrevisionbase(interfaceutil.Interface):
919 """Base interface representing a single revision of a manifest.
919 """Base interface representing a single revision of a manifest.
920
920
921 Should not be used as a primary interface: should always be inherited
921 Should not be used as a primary interface: should always be inherited
922 as part of a larger interface.
922 as part of a larger interface.
923 """
923 """
924
924
925 def new():
925 def new():
926 """Obtain a new manifest instance.
926 """Obtain a new manifest instance.
927
927
928 Returns an object conforming to the ``imanifestrevisionwritable``
928 Returns an object conforming to the ``imanifestrevisionwritable``
929 interface. The instance will be associated with the same
929 interface. The instance will be associated with the same
930 ``imanifestlog`` collection as this instance.
930 ``imanifestlog`` collection as this instance.
931 """
931 """
932
932
933 def copy():
933 def copy():
934 """Obtain a copy of this manifest instance.
934 """Obtain a copy of this manifest instance.
935
935
936 Returns an object conforming to the ``imanifestrevisionwritable``
936 Returns an object conforming to the ``imanifestrevisionwritable``
937 interface. The instance will be associated with the same
937 interface. The instance will be associated with the same
938 ``imanifestlog`` collection as this instance.
938 ``imanifestlog`` collection as this instance.
939 """
939 """
940
940
941 def read():
941 def read():
942 """Obtain the parsed manifest data structure.
942 """Obtain the parsed manifest data structure.
943
943
944 The returned object conforms to the ``imanifestdict`` interface.
944 The returned object conforms to the ``imanifestdict`` interface.
945 """
945 """
946
946
947 class imanifestrevisionstored(imanifestrevisionbase):
947 class imanifestrevisionstored(imanifestrevisionbase):
948 """Interface representing a manifest revision committed to storage."""
948 """Interface representing a manifest revision committed to storage."""
949
949
950 def node():
950 def node():
951 """The binary node for this manifest."""
951 """The binary node for this manifest."""
952
952
953 parents = interfaceutil.Attribute(
953 parents = interfaceutil.Attribute(
954 """List of binary nodes that are parents for this manifest revision."""
954 """List of binary nodes that are parents for this manifest revision."""
955 )
955 )
956
956
957 def readdelta(shallow=False):
957 def readdelta(shallow=False):
958 """Obtain the manifest data structure representing changes from parent.
958 """Obtain the manifest data structure representing changes from parent.
959
959
960 This manifest is compared to its 1st parent. A new manifest representing
960 This manifest is compared to its 1st parent. A new manifest representing
961 those differences is constructed.
961 those differences is constructed.
962
962
963 The returned object conforms to the ``imanifestdict`` interface.
963 The returned object conforms to the ``imanifestdict`` interface.
964 """
964 """
965
965
966 def readfast(shallow=False):
966 def readfast(shallow=False):
967 """Calls either ``read()`` or ``readdelta()``.
967 """Calls either ``read()`` or ``readdelta()``.
968
968
969 The faster of the two options is called.
969 The faster of the two options is called.
970 """
970 """
971
971
972 def find(key):
972 def find(key):
973 """Calls self.read().find(key)``.
973 """Calls self.read().find(key)``.
974
974
975 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
975 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
976 """
976 """
977
977
978 class imanifestrevisionwritable(imanifestrevisionbase):
978 class imanifestrevisionwritable(imanifestrevisionbase):
979 """Interface representing a manifest revision that can be committed."""
979 """Interface representing a manifest revision that can be committed."""
980
980
981 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
981 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
982 """Add this revision to storage.
982 """Add this revision to storage.
983
983
984 Takes a transaction object, the changeset revision number it will
984 Takes a transaction object, the changeset revision number it will
985 be associated with, its parent nodes, and lists of added and
985 be associated with, its parent nodes, and lists of added and
986 removed paths.
986 removed paths.
987
987
988 If match is provided, storage can choose not to inspect or write out
988 If match is provided, storage can choose not to inspect or write out
989 items that do not match. Storage is still required to be able to provide
989 items that do not match. Storage is still required to be able to provide
990 the full manifest in the future for any directories written (these
990 the full manifest in the future for any directories written (these
991 manifests should not be "narrowed on disk").
991 manifests should not be "narrowed on disk").
992
992
993 Returns the binary node of the created revision.
993 Returns the binary node of the created revision.
994 """
994 """
995
995
996 class imanifeststorage(interfaceutil.Interface):
996 class imanifeststorage(interfaceutil.Interface):
997 """Storage interface for manifest data."""
997 """Storage interface for manifest data."""
998
998
999 tree = interfaceutil.Attribute(
999 tree = interfaceutil.Attribute(
1000 """The path to the directory this manifest tracks.
1000 """The path to the directory this manifest tracks.
1001
1001
1002 The empty bytestring represents the root manifest.
1002 The empty bytestring represents the root manifest.
1003 """)
1003 """)
1004
1004
1005 index = interfaceutil.Attribute(
1005 index = interfaceutil.Attribute(
1006 """An ``ifilerevisionssequence`` instance.""")
1006 """An ``ifilerevisionssequence`` instance.""")
1007
1007
1008 indexfile = interfaceutil.Attribute(
1008 indexfile = interfaceutil.Attribute(
1009 """Path of revlog index file.
1009 """Path of revlog index file.
1010
1010
1011 TODO this is revlog specific and should not be exposed.
1011 TODO this is revlog specific and should not be exposed.
1012 """)
1012 """)
1013
1013
1014 opener = interfaceutil.Attribute(
1014 opener = interfaceutil.Attribute(
1015 """VFS opener to use to access underlying files used for storage.
1015 """VFS opener to use to access underlying files used for storage.
1016
1016
1017 TODO this is revlog specific and should not be exposed.
1017 TODO this is revlog specific and should not be exposed.
1018 """)
1018 """)
1019
1019
1020 version = interfaceutil.Attribute(
1020 version = interfaceutil.Attribute(
1021 """Revlog version number.
1021 """Revlog version number.
1022
1022
1023 TODO this is revlog specific and should not be exposed.
1023 TODO this is revlog specific and should not be exposed.
1024 """)
1024 """)
1025
1025
1026 _generaldelta = interfaceutil.Attribute(
1026 _generaldelta = interfaceutil.Attribute(
1027 """Whether generaldelta storage is being used.
1027 """Whether generaldelta storage is being used.
1028
1028
1029 TODO this is revlog specific and should not be exposed.
1029 TODO this is revlog specific and should not be exposed.
1030 """)
1030 """)
1031
1031
1032 fulltextcache = interfaceutil.Attribute(
1032 fulltextcache = interfaceutil.Attribute(
1033 """Dict with cache of fulltexts.
1033 """Dict with cache of fulltexts.
1034
1034
1035 TODO this doesn't feel appropriate for the storage interface.
1035 TODO this doesn't feel appropriate for the storage interface.
1036 """)
1036 """)
1037
1037
1038 def __len__():
1038 def __len__():
1039 """Obtain the number of revisions stored for this manifest."""
1039 """Obtain the number of revisions stored for this manifest."""
1040
1040
1041 def __iter__():
1041 def __iter__():
1042 """Iterate over revision numbers for this manifest."""
1042 """Iterate over revision numbers for this manifest."""
1043
1043
1044 def rev(node):
1044 def rev(node):
1045 """Obtain the revision number given a binary node.
1045 """Obtain the revision number given a binary node.
1046
1046
1047 Raises ``error.LookupError`` if the node is not known.
1047 Raises ``error.LookupError`` if the node is not known.
1048 """
1048 """
1049
1049
1050 def node(rev):
1050 def node(rev):
1051 """Obtain the node value given a revision number.
1051 """Obtain the node value given a revision number.
1052
1052
1053 Raises ``error.LookupError`` if the revision is not known.
1053 Raises ``error.LookupError`` if the revision is not known.
1054 """
1054 """
1055
1055
1056 def lookup(value):
1056 def lookup(value):
1057 """Attempt to resolve a value to a node.
1057 """Attempt to resolve a value to a node.
1058
1058
1059 Value can be a binary node, hex node, revision number, or a bytes
1059 Value can be a binary node, hex node, revision number, or a bytes
1060 that can be converted to an integer.
1060 that can be converted to an integer.
1061
1061
1062 Raises ``error.LookupError`` if a ndoe could not be resolved.
1062 Raises ``error.LookupError`` if a ndoe could not be resolved.
1063
1063
1064 TODO this is only used by debug* commands and can probably be deleted
1064 TODO this is only used by debug* commands and can probably be deleted
1065 easily.
1065 easily.
1066 """
1066 """
1067
1067
1068 def parents(node):
1068 def parents(node):
1069 """Returns a 2-tuple of parent nodes for a node.
1069 """Returns a 2-tuple of parent nodes for a node.
1070
1070
1071 Values will be ``nullid`` if the parent is empty.
1071 Values will be ``nullid`` if the parent is empty.
1072 """
1072 """
1073
1073
1074 def parentrevs(rev):
1074 def parentrevs(rev):
1075 """Like parents() but operates on revision numbers."""
1075 """Like parents() but operates on revision numbers."""
1076
1076
1077 def linkrev(rev):
1077 def linkrev(rev):
1078 """Obtain the changeset revision number a revision is linked to."""
1078 """Obtain the changeset revision number a revision is linked to."""
1079
1079
1080 def revision(node, _df=None, raw=False):
1080 def revision(node, _df=None, raw=False):
1081 """Obtain fulltext data for a node."""
1081 """Obtain fulltext data for a node."""
1082
1082
1083 def revdiff(rev1, rev2):
1083 def revdiff(rev1, rev2):
1084 """Obtain a delta between two revision numbers.
1084 """Obtain a delta between two revision numbers.
1085
1085
1086 The returned data is the result of ``bdiff.bdiff()`` on the raw
1086 The returned data is the result of ``bdiff.bdiff()`` on the raw
1087 revision data.
1087 revision data.
1088 """
1088 """
1089
1089
1090 def cmp(node, fulltext):
1090 def cmp(node, fulltext):
1091 """Compare fulltext to another revision.
1091 """Compare fulltext to another revision.
1092
1092
1093 Returns True if the fulltext is different from what is stored.
1093 Returns True if the fulltext is different from what is stored.
1094 """
1094 """
1095
1095
1096 def emitrevisiondeltas(requests):
1096 def emitrevisiondeltas(requests):
1097 """Produce ``irevisiondelta`` from ``irevisiondeltarequest``s.
1097 """Produce ``irevisiondelta`` from ``irevisiondeltarequest``s.
1098
1098
1099 See the documentation for ``ifiledata`` for more.
1099 See the documentation for ``ifiledata`` for more.
1100 """
1100 """
1101
1101
1102 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1102 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1103 """Process a series of deltas for storage.
1103 """Process a series of deltas for storage.
1104
1104
1105 See the documentation in ``ifilemutation`` for more.
1105 See the documentation in ``ifilemutation`` for more.
1106 """
1106 """
1107
1107
1108 def getstrippoint(minlink):
1108 def getstrippoint(minlink):
1109 """Find minimum revision that must be stripped to strip a linkrev.
1109 """Find minimum revision that must be stripped to strip a linkrev.
1110
1110
1111 See the documentation in ``ifilemutation`` for more.
1111 See the documentation in ``ifilemutation`` for more.
1112 """
1112 """
1113
1113
1114 def strip(minlink, transaction):
1114 def strip(minlink, transaction):
1115 """Remove storage of items starting at a linkrev.
1115 """Remove storage of items starting at a linkrev.
1116
1116
1117 See the documentation in ``ifilemutation`` for more.
1117 See the documentation in ``ifilemutation`` for more.
1118 """
1118 """
1119
1119
1120 def checksize():
1120 def checksize():
1121 """Obtain the expected sizes of backing files.
1121 """Obtain the expected sizes of backing files.
1122
1122
1123 TODO this is used by verify and it should not be part of the interface.
1123 TODO this is used by verify and it should not be part of the interface.
1124 """
1124 """
1125
1125
1126 def files():
1126 def files():
1127 """Obtain paths that are backing storage for this manifest.
1127 """Obtain paths that are backing storage for this manifest.
1128
1128
1129 TODO this is used by verify and there should probably be a better API
1129 TODO this is used by verify and there should probably be a better API
1130 for this functionality.
1130 for this functionality.
1131 """
1131 """
1132
1132
1133 def deltaparent(rev):
1133 def deltaparent(rev):
1134 """Obtain the revision that a revision is delta'd against.
1134 """Obtain the revision that a revision is delta'd against.
1135
1135
1136 TODO delta encoding is an implementation detail of storage and should
1136 TODO delta encoding is an implementation detail of storage and should
1137 not be exposed to the storage interface.
1137 not be exposed to the storage interface.
1138 """
1138 """
1139
1139
1140 def clone(tr, dest, **kwargs):
1140 def clone(tr, dest, **kwargs):
1141 """Clone this instance to another."""
1141 """Clone this instance to another."""
1142
1142
1143 def clearcaches(clear_persisted_data=False):
1143 def clearcaches(clear_persisted_data=False):
1144 """Clear any caches associated with this instance."""
1144 """Clear any caches associated with this instance."""
1145
1145
1146 def dirlog(d):
1146 def dirlog(d):
1147 """Obtain a manifest storage instance for a tree."""
1147 """Obtain a manifest storage instance for a tree."""
1148
1148
1149 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1149 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1150 match=None):
1150 match=None):
1151 """Add a revision to storage.
1151 """Add a revision to storage.
1152
1152
1153 ``m`` is an object conforming to ``imanifestdict``.
1153 ``m`` is an object conforming to ``imanifestdict``.
1154
1154
1155 ``link`` is the linkrev revision number.
1155 ``link`` is the linkrev revision number.
1156
1156
1157 ``p1`` and ``p2`` are the parent revision numbers.
1157 ``p1`` and ``p2`` are the parent revision numbers.
1158
1158
1159 ``added`` and ``removed`` are iterables of added and removed paths,
1159 ``added`` and ``removed`` are iterables of added and removed paths,
1160 respectively.
1160 respectively.
1161
1161
1162 ``readtree`` is a function that can be used to read the child tree(s)
1162 ``readtree`` is a function that can be used to read the child tree(s)
1163 when recursively writing the full tree structure when using
1163 when recursively writing the full tree structure when using
1164 treemanifets.
1164 treemanifets.
1165
1165
1166 ``match`` is a matcher that can be used to hint to storage that not all
1166 ``match`` is a matcher that can be used to hint to storage that not all
1167 paths must be inspected; this is an optimization and can be safely
1167 paths must be inspected; this is an optimization and can be safely
1168 ignored. Note that the storage must still be able to reproduce a full
1168 ignored. Note that the storage must still be able to reproduce a full
1169 manifest including files that did not match.
1169 manifest including files that did not match.
1170 """
1170 """
1171
1171
1172 class imanifestlog(interfaceutil.Interface):
1172 class imanifestlog(interfaceutil.Interface):
1173 """Interface representing a collection of manifest snapshots.
1173 """Interface representing a collection of manifest snapshots.
1174
1174
1175 Represents the root manifest in a repository.
1175 Represents the root manifest in a repository.
1176
1176
1177 Also serves as a means to access nested tree manifests and to cache
1177 Also serves as a means to access nested tree manifests and to cache
1178 tree manifests.
1178 tree manifests.
1179 """
1179 """
1180
1180
1181 def __getitem__(node):
1181 def __getitem__(node):
1182 """Obtain a manifest instance for a given binary node.
1182 """Obtain a manifest instance for a given binary node.
1183
1183
1184 Equivalent to calling ``self.get('', node)``.
1184 Equivalent to calling ``self.get('', node)``.
1185
1185
1186 The returned object conforms to the ``imanifestrevisionstored``
1186 The returned object conforms to the ``imanifestrevisionstored``
1187 interface.
1187 interface.
1188 """
1188 """
1189
1189
1190 def get(tree, node, verify=True):
1190 def get(tree, node, verify=True):
1191 """Retrieve the manifest instance for a given directory and binary node.
1191 """Retrieve the manifest instance for a given directory and binary node.
1192
1192
1193 ``node`` always refers to the node of the root manifest (which will be
1193 ``node`` always refers to the node of the root manifest (which will be
1194 the only manifest if flat manifests are being used).
1194 the only manifest if flat manifests are being used).
1195
1195
1196 If ``tree`` is the empty string, the root manifest is returned.
1196 If ``tree`` is the empty string, the root manifest is returned.
1197 Otherwise the manifest for the specified directory will be returned
1197 Otherwise the manifest for the specified directory will be returned
1198 (requires tree manifests).
1198 (requires tree manifests).
1199
1199
1200 If ``verify`` is True, ``LookupError`` is raised if the node is not
1200 If ``verify`` is True, ``LookupError`` is raised if the node is not
1201 known.
1201 known.
1202
1202
1203 The returned object conforms to the ``imanifestrevisionstored``
1203 The returned object conforms to the ``imanifestrevisionstored``
1204 interface.
1204 interface.
1205 """
1205 """
1206
1206
1207 def getstorage(tree):
1207 def getstorage(tree):
1208 """Retrieve an interface to storage for a particular tree.
1208 """Retrieve an interface to storage for a particular tree.
1209
1209
1210 If ``tree`` is the empty bytestring, storage for the root manifest will
1210 If ``tree`` is the empty bytestring, storage for the root manifest will
1211 be returned. Otherwise storage for a tree manifest is returned.
1211 be returned. Otherwise storage for a tree manifest is returned.
1212
1212
1213 TODO formalize interface for returned object.
1213 TODO formalize interface for returned object.
1214 """
1214 """
1215
1215
1216 def clearcaches():
1216 def clearcaches():
1217 """Clear caches associated with this collection."""
1217 """Clear caches associated with this collection."""
1218
1218
1219 def rev(node):
1219 def rev(node):
1220 """Obtain the revision number for a binary node.
1220 """Obtain the revision number for a binary node.
1221
1221
1222 Raises ``error.LookupError`` if the node is not known.
1222 Raises ``error.LookupError`` if the node is not known.
1223 """
1223 """
1224
1224
1225 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1225 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1226 """Local repository sub-interface providing access to tracked file storage.
1226 """Local repository sub-interface providing access to tracked file storage.
1227
1227
1228 This interface defines how a repository accesses storage for a single
1228 This interface defines how a repository accesses storage for a single
1229 tracked file path.
1229 tracked file path.
1230 """
1230 """
1231
1231
1232 def file(f):
1232 def file(f):
1233 """Obtain a filelog for a tracked path.
1233 """Obtain a filelog for a tracked path.
1234
1234
1235 The returned type conforms to the ``ifilestorage`` interface.
1235 The returned type conforms to the ``ifilestorage`` interface.
1236 """
1236 """
1237
1237
1238 class ilocalrepositorymain(interfaceutil.Interface):
1238 class ilocalrepositorymain(interfaceutil.Interface):
1239 """Main interface for local repositories.
1239 """Main interface for local repositories.
1240
1240
1241 This currently captures the reality of things - not how things should be.
1241 This currently captures the reality of things - not how things should be.
1242 """
1242 """
1243
1243
1244 supportedformats = interfaceutil.Attribute(
1244 supportedformats = interfaceutil.Attribute(
1245 """Set of requirements that apply to stream clone.
1245 """Set of requirements that apply to stream clone.
1246
1246
1247 This is actually a class attribute and is shared among all instances.
1247 This is actually a class attribute and is shared among all instances.
1248 """)
1248 """)
1249
1249
1250 supported = interfaceutil.Attribute(
1250 supported = interfaceutil.Attribute(
1251 """Set of requirements that this repo is capable of opening.""")
1251 """Set of requirements that this repo is capable of opening.""")
1252
1252
1253 requirements = interfaceutil.Attribute(
1253 requirements = interfaceutil.Attribute(
1254 """Set of requirements this repo uses.""")
1254 """Set of requirements this repo uses.""")
1255
1255
1256 filtername = interfaceutil.Attribute(
1256 filtername = interfaceutil.Attribute(
1257 """Name of the repoview that is active on this repo.""")
1257 """Name of the repoview that is active on this repo.""")
1258
1258
1259 wvfs = interfaceutil.Attribute(
1259 wvfs = interfaceutil.Attribute(
1260 """VFS used to access the working directory.""")
1260 """VFS used to access the working directory.""")
1261
1261
1262 vfs = interfaceutil.Attribute(
1262 vfs = interfaceutil.Attribute(
1263 """VFS rooted at the .hg directory.
1263 """VFS rooted at the .hg directory.
1264
1264
1265 Used to access repository data not in the store.
1265 Used to access repository data not in the store.
1266 """)
1266 """)
1267
1267
1268 svfs = interfaceutil.Attribute(
1268 svfs = interfaceutil.Attribute(
1269 """VFS rooted at the store.
1269 """VFS rooted at the store.
1270
1270
1271 Used to access repository data in the store. Typically .hg/store.
1271 Used to access repository data in the store. Typically .hg/store.
1272 But can point elsewhere if the store is shared.
1272 But can point elsewhere if the store is shared.
1273 """)
1273 """)
1274
1274
1275 root = interfaceutil.Attribute(
1275 root = interfaceutil.Attribute(
1276 """Path to the root of the working directory.""")
1276 """Path to the root of the working directory.""")
1277
1277
1278 path = interfaceutil.Attribute(
1278 path = interfaceutil.Attribute(
1279 """Path to the .hg directory.""")
1279 """Path to the .hg directory.""")
1280
1280
1281 origroot = interfaceutil.Attribute(
1281 origroot = interfaceutil.Attribute(
1282 """The filesystem path that was used to construct the repo.""")
1282 """The filesystem path that was used to construct the repo.""")
1283
1283
1284 auditor = interfaceutil.Attribute(
1284 auditor = interfaceutil.Attribute(
1285 """A pathauditor for the working directory.
1285 """A pathauditor for the working directory.
1286
1286
1287 This checks if a path refers to a nested repository.
1287 This checks if a path refers to a nested repository.
1288
1288
1289 Operates on the filesystem.
1289 Operates on the filesystem.
1290 """)
1290 """)
1291
1291
1292 nofsauditor = interfaceutil.Attribute(
1292 nofsauditor = interfaceutil.Attribute(
1293 """A pathauditor for the working directory.
1293 """A pathauditor for the working directory.
1294
1294
1295 This is like ``auditor`` except it doesn't do filesystem checks.
1295 This is like ``auditor`` except it doesn't do filesystem checks.
1296 """)
1296 """)
1297
1297
1298 baseui = interfaceutil.Attribute(
1298 baseui = interfaceutil.Attribute(
1299 """Original ui instance passed into constructor.""")
1299 """Original ui instance passed into constructor.""")
1300
1300
1301 ui = interfaceutil.Attribute(
1301 ui = interfaceutil.Attribute(
1302 """Main ui instance for this instance.""")
1302 """Main ui instance for this instance.""")
1303
1303
1304 sharedpath = interfaceutil.Attribute(
1304 sharedpath = interfaceutil.Attribute(
1305 """Path to the .hg directory of the repo this repo was shared from.""")
1305 """Path to the .hg directory of the repo this repo was shared from.""")
1306
1306
1307 store = interfaceutil.Attribute(
1307 store = interfaceutil.Attribute(
1308 """A store instance.""")
1308 """A store instance.""")
1309
1309
1310 spath = interfaceutil.Attribute(
1310 spath = interfaceutil.Attribute(
1311 """Path to the store.""")
1311 """Path to the store.""")
1312
1312
1313 sjoin = interfaceutil.Attribute(
1313 sjoin = interfaceutil.Attribute(
1314 """Alias to self.store.join.""")
1314 """Alias to self.store.join.""")
1315
1315
1316 cachevfs = interfaceutil.Attribute(
1316 cachevfs = interfaceutil.Attribute(
1317 """A VFS used to access the cache directory.
1317 """A VFS used to access the cache directory.
1318
1318
1319 Typically .hg/cache.
1319 Typically .hg/cache.
1320 """)
1320 """)
1321
1321
1322 filteredrevcache = interfaceutil.Attribute(
1322 filteredrevcache = interfaceutil.Attribute(
1323 """Holds sets of revisions to be filtered.""")
1323 """Holds sets of revisions to be filtered.""")
1324
1324
1325 names = interfaceutil.Attribute(
1325 names = interfaceutil.Attribute(
1326 """A ``namespaces`` instance.""")
1326 """A ``namespaces`` instance.""")
1327
1327
1328 def close():
1328 def close():
1329 """Close the handle on this repository."""
1329 """Close the handle on this repository."""
1330
1330
1331 def peer():
1331 def peer():
1332 """Obtain an object conforming to the ``peer`` interface."""
1332 """Obtain an object conforming to the ``peer`` interface."""
1333
1333
1334 def unfiltered():
1334 def unfiltered():
1335 """Obtain an unfiltered/raw view of this repo."""
1335 """Obtain an unfiltered/raw view of this repo."""
1336
1336
1337 def filtered(name, visibilityexceptions=None):
1337 def filtered(name, visibilityexceptions=None):
1338 """Obtain a named view of this repository."""
1338 """Obtain a named view of this repository."""
1339
1339
1340 obsstore = interfaceutil.Attribute(
1340 obsstore = interfaceutil.Attribute(
1341 """A store of obsolescence data.""")
1341 """A store of obsolescence data.""")
1342
1342
1343 changelog = interfaceutil.Attribute(
1343 changelog = interfaceutil.Attribute(
1344 """A handle on the changelog revlog.""")
1344 """A handle on the changelog revlog.""")
1345
1345
1346 manifestlog = interfaceutil.Attribute(
1346 manifestlog = interfaceutil.Attribute(
1347 """An instance conforming to the ``imanifestlog`` interface.
1347 """An instance conforming to the ``imanifestlog`` interface.
1348
1348
1349 Provides access to manifests for the repository.
1349 Provides access to manifests for the repository.
1350 """)
1350 """)
1351
1351
1352 dirstate = interfaceutil.Attribute(
1352 dirstate = interfaceutil.Attribute(
1353 """Working directory state.""")
1353 """Working directory state.""")
1354
1354
1355 narrowpats = interfaceutil.Attribute(
1355 narrowpats = interfaceutil.Attribute(
1356 """Matcher patterns for this repository's narrowspec.""")
1356 """Matcher patterns for this repository's narrowspec.""")
1357
1357
1358 def narrowmatch():
1358 def narrowmatch():
1359 """Obtain a matcher for the narrowspec."""
1359 """Obtain a matcher for the narrowspec."""
1360
1360
1361 def setnarrowpats(newincludes, newexcludes):
1361 def setnarrowpats(newincludes, newexcludes):
1362 """Define the narrowspec for this repository."""
1362 """Define the narrowspec for this repository."""
1363
1363
1364 def __getitem__(changeid):
1364 def __getitem__(changeid):
1365 """Try to resolve a changectx."""
1365 """Try to resolve a changectx."""
1366
1366
1367 def __contains__(changeid):
1367 def __contains__(changeid):
1368 """Whether a changeset exists."""
1368 """Whether a changeset exists."""
1369
1369
1370 def __nonzero__():
1370 def __nonzero__():
1371 """Always returns True."""
1371 """Always returns True."""
1372 return True
1372 return True
1373
1373
1374 __bool__ = __nonzero__
1374 __bool__ = __nonzero__
1375
1375
1376 def __len__():
1376 def __len__():
1377 """Returns the number of changesets in the repo."""
1377 """Returns the number of changesets in the repo."""
1378
1378
1379 def __iter__():
1379 def __iter__():
1380 """Iterate over revisions in the changelog."""
1380 """Iterate over revisions in the changelog."""
1381
1381
1382 def revs(expr, *args):
1382 def revs(expr, *args):
1383 """Evaluate a revset.
1383 """Evaluate a revset.
1384
1384
1385 Emits revisions.
1385 Emits revisions.
1386 """
1386 """
1387
1387
1388 def set(expr, *args):
1388 def set(expr, *args):
1389 """Evaluate a revset.
1389 """Evaluate a revset.
1390
1390
1391 Emits changectx instances.
1391 Emits changectx instances.
1392 """
1392 """
1393
1393
1394 def anyrevs(specs, user=False, localalias=None):
1394 def anyrevs(specs, user=False, localalias=None):
1395 """Find revisions matching one of the given revsets."""
1395 """Find revisions matching one of the given revsets."""
1396
1396
1397 def url():
1397 def url():
1398 """Returns a string representing the location of this repo."""
1398 """Returns a string representing the location of this repo."""
1399
1399
1400 def hook(name, throw=False, **args):
1400 def hook(name, throw=False, **args):
1401 """Call a hook."""
1401 """Call a hook."""
1402
1402
1403 def tags():
1403 def tags():
1404 """Return a mapping of tag to node."""
1404 """Return a mapping of tag to node."""
1405
1405
1406 def tagtype(tagname):
1406 def tagtype(tagname):
1407 """Return the type of a given tag."""
1407 """Return the type of a given tag."""
1408
1408
1409 def tagslist():
1409 def tagslist():
1410 """Return a list of tags ordered by revision."""
1410 """Return a list of tags ordered by revision."""
1411
1411
1412 def nodetags(node):
1412 def nodetags(node):
1413 """Return the tags associated with a node."""
1413 """Return the tags associated with a node."""
1414
1414
1415 def nodebookmarks(node):
1415 def nodebookmarks(node):
1416 """Return the list of bookmarks pointing to the specified node."""
1416 """Return the list of bookmarks pointing to the specified node."""
1417
1417
1418 def branchmap():
1418 def branchmap():
1419 """Return a mapping of branch to heads in that branch."""
1419 """Return a mapping of branch to heads in that branch."""
1420
1420
1421 def revbranchcache():
1421 def revbranchcache():
1422 pass
1422 pass
1423
1423
1424 def branchtip(branchtip, ignoremissing=False):
1424 def branchtip(branchtip, ignoremissing=False):
1425 """Return the tip node for a given branch."""
1425 """Return the tip node for a given branch."""
1426
1426
1427 def lookup(key):
1427 def lookup(key):
1428 """Resolve the node for a revision."""
1428 """Resolve the node for a revision."""
1429
1429
1430 def lookupbranch(key):
1430 def lookupbranch(key):
1431 """Look up the branch name of the given revision or branch name."""
1431 """Look up the branch name of the given revision or branch name."""
1432
1432
1433 def known(nodes):
1433 def known(nodes):
1434 """Determine whether a series of nodes is known.
1434 """Determine whether a series of nodes is known.
1435
1435
1436 Returns a list of bools.
1436 Returns a list of bools.
1437 """
1437 """
1438
1438
1439 def local():
1439 def local():
1440 """Whether the repository is local."""
1440 """Whether the repository is local."""
1441 return True
1441 return True
1442
1442
1443 def publishing():
1443 def publishing():
1444 """Whether the repository is a publishing repository."""
1444 """Whether the repository is a publishing repository."""
1445
1445
1446 def cancopy():
1446 def cancopy():
1447 pass
1447 pass
1448
1448
1449 def shared():
1449 def shared():
1450 """The type of shared repository or None."""
1450 """The type of shared repository or None."""
1451
1451
1452 def wjoin(f, *insidef):
1452 def wjoin(f, *insidef):
1453 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1453 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1454
1454
1455 def setparents(p1, p2):
1455 def setparents(p1, p2):
1456 """Set the parent nodes of the working directory."""
1456 """Set the parent nodes of the working directory."""
1457
1457
1458 def filectx(path, changeid=None, fileid=None):
1458 def filectx(path, changeid=None, fileid=None):
1459 """Obtain a filectx for the given file revision."""
1459 """Obtain a filectx for the given file revision."""
1460
1460
1461 def getcwd():
1461 def getcwd():
1462 """Obtain the current working directory from the dirstate."""
1462 """Obtain the current working directory from the dirstate."""
1463
1463
1464 def pathto(f, cwd=None):
1464 def pathto(f, cwd=None):
1465 """Obtain the relative path to a file."""
1465 """Obtain the relative path to a file."""
1466
1466
1467 def adddatafilter(name, fltr):
1467 def adddatafilter(name, fltr):
1468 pass
1468 pass
1469
1469
1470 def wread(filename):
1470 def wread(filename):
1471 """Read a file from wvfs, using data filters."""
1471 """Read a file from wvfs, using data filters."""
1472
1472
1473 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1473 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1474 """Write data to a file in the wvfs, using data filters."""
1474 """Write data to a file in the wvfs, using data filters."""
1475
1475
1476 def wwritedata(filename, data):
1476 def wwritedata(filename, data):
1477 """Resolve data for writing to the wvfs, using data filters."""
1477 """Resolve data for writing to the wvfs, using data filters."""
1478
1478
1479 def currenttransaction():
1479 def currenttransaction():
1480 """Obtain the current transaction instance or None."""
1480 """Obtain the current transaction instance or None."""
1481
1481
1482 def transaction(desc, report=None):
1482 def transaction(desc, report=None):
1483 """Open a new transaction to write to the repository."""
1483 """Open a new transaction to write to the repository."""
1484
1484
1485 def undofiles():
1485 def undofiles():
1486 """Returns a list of (vfs, path) for files to undo transactions."""
1486 """Returns a list of (vfs, path) for files to undo transactions."""
1487
1487
1488 def recover():
1488 def recover():
1489 """Roll back an interrupted transaction."""
1489 """Roll back an interrupted transaction."""
1490
1490
1491 def rollback(dryrun=False, force=False):
1491 def rollback(dryrun=False, force=False):
1492 """Undo the last transaction.
1492 """Undo the last transaction.
1493
1493
1494 DANGEROUS.
1494 DANGEROUS.
1495 """
1495 """
1496
1496
1497 def updatecaches(tr=None, full=False):
1497 def updatecaches(tr=None, full=False):
1498 """Warm repo caches."""
1498 """Warm repo caches."""
1499
1499
1500 def invalidatecaches():
1500 def invalidatecaches():
1501 """Invalidate cached data due to the repository mutating."""
1501 """Invalidate cached data due to the repository mutating."""
1502
1502
1503 def invalidatevolatilesets():
1503 def invalidatevolatilesets():
1504 pass
1504 pass
1505
1505
1506 def invalidatedirstate():
1506 def invalidatedirstate():
1507 """Invalidate the dirstate."""
1507 """Invalidate the dirstate."""
1508
1508
1509 def invalidate(clearfilecache=False):
1509 def invalidate(clearfilecache=False):
1510 pass
1510 pass
1511
1511
1512 def invalidateall():
1512 def invalidateall():
1513 pass
1513 pass
1514
1514
1515 def lock(wait=True):
1515 def lock(wait=True):
1516 """Lock the repository store and return a lock instance."""
1516 """Lock the repository store and return a lock instance."""
1517
1517
1518 def wlock(wait=True):
1518 def wlock(wait=True):
1519 """Lock the non-store parts of the repository."""
1519 """Lock the non-store parts of the repository."""
1520
1520
1521 def currentwlock():
1521 def currentwlock():
1522 """Return the wlock if it's held or None."""
1522 """Return the wlock if it's held or None."""
1523
1523
1524 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1524 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1525 pass
1525 pass
1526
1526
1527 def commit(text='', user=None, date=None, match=None, force=False,
1527 def commit(text='', user=None, date=None, match=None, force=False,
1528 editor=False, extra=None):
1528 editor=False, extra=None):
1529 """Add a new revision to the repository."""
1529 """Add a new revision to the repository."""
1530
1530
1531 def commitctx(ctx, error=False):
1531 def commitctx(ctx, error=False):
1532 """Commit a commitctx instance to the repository."""
1532 """Commit a commitctx instance to the repository."""
1533
1533
1534 def destroying():
1534 def destroying():
1535 """Inform the repository that nodes are about to be destroyed."""
1535 """Inform the repository that nodes are about to be destroyed."""
1536
1536
1537 def destroyed():
1537 def destroyed():
1538 """Inform the repository that nodes have been destroyed."""
1538 """Inform the repository that nodes have been destroyed."""
1539
1539
1540 def status(node1='.', node2=None, match=None, ignored=False,
1540 def status(node1='.', node2=None, match=None, ignored=False,
1541 clean=False, unknown=False, listsubrepos=False):
1541 clean=False, unknown=False, listsubrepos=False):
1542 """Convenience method to call repo[x].status()."""
1542 """Convenience method to call repo[x].status()."""
1543
1543
1544 def addpostdsstatus(ps):
1544 def addpostdsstatus(ps):
1545 pass
1545 pass
1546
1546
1547 def postdsstatus():
1547 def postdsstatus():
1548 pass
1548 pass
1549
1549
1550 def clearpostdsstatus():
1550 def clearpostdsstatus():
1551 pass
1551 pass
1552
1552
1553 def heads(start=None):
1553 def heads(start=None):
1554 """Obtain list of nodes that are DAG heads."""
1554 """Obtain list of nodes that are DAG heads."""
1555
1555
1556 def branchheads(branch=None, start=None, closed=False):
1556 def branchheads(branch=None, start=None, closed=False):
1557 pass
1557 pass
1558
1558
1559 def branches(nodes):
1559 def branches(nodes):
1560 pass
1560 pass
1561
1561
1562 def between(pairs):
1562 def between(pairs):
1563 pass
1563 pass
1564
1564
1565 def checkpush(pushop):
1565 def checkpush(pushop):
1566 pass
1566 pass
1567
1567
1568 prepushoutgoinghooks = interfaceutil.Attribute(
1568 prepushoutgoinghooks = interfaceutil.Attribute(
1569 """util.hooks instance.""")
1569 """util.hooks instance.""")
1570
1570
1571 def pushkey(namespace, key, old, new):
1571 def pushkey(namespace, key, old, new):
1572 pass
1572 pass
1573
1573
1574 def listkeys(namespace):
1574 def listkeys(namespace):
1575 pass
1575 pass
1576
1576
1577 def debugwireargs(one, two, three=None, four=None, five=None):
1577 def debugwireargs(one, two, three=None, four=None, five=None):
1578 pass
1578 pass
1579
1579
1580 def savecommitmessage(text):
1580 def savecommitmessage(text):
1581 pass
1581 pass
1582
1582
1583 class completelocalrepository(ilocalrepositorymain,
1583 class completelocalrepository(ilocalrepositorymain,
1584 ilocalrepositoryfilestorage):
1584 ilocalrepositoryfilestorage):
1585 """Complete interface for a local repository."""
1585 """Complete interface for a local repository."""
@@ -1,984 +1,984
1 # storage.py - Testing of storage primitives.
1 # storage.py - Testing of storage primitives.
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import unittest
10 import unittest
11
11
12 from ..node import (
12 from ..node import (
13 hex,
13 hex,
14 nullid,
14 nullid,
15 nullrev,
15 nullrev,
16 )
16 )
17 from .. import (
17 from .. import (
18 error,
18 error,
19 mdiff,
19 mdiff,
20 revlog,
20 revlog,
21 )
21 )
22
22
23 class basetestcase(unittest.TestCase):
23 class basetestcase(unittest.TestCase):
24 if not getattr(unittest.TestCase, r'assertRaisesRegex', False):
24 if not getattr(unittest.TestCase, r'assertRaisesRegex', False):
25 assertRaisesRegex = (# camelcase-required
25 assertRaisesRegex = (# camelcase-required
26 unittest.TestCase.assertRaisesRegexp)
26 unittest.TestCase.assertRaisesRegexp)
27
27
28 class revisiondeltarequest(object):
28 class revisiondeltarequest(object):
29 def __init__(self, node, p1, p2, linknode, basenode, ellipsis):
29 def __init__(self, node, p1, p2, linknode, basenode, ellipsis):
30 self.node = node
30 self.node = node
31 self.p1node = p1
31 self.p1node = p1
32 self.p2node = p2
32 self.p2node = p2
33 self.linknode = linknode
33 self.linknode = linknode
34 self.basenode = basenode
34 self.basenode = basenode
35 self.ellipsis = ellipsis
35 self.ellipsis = ellipsis
36
36
37 class ifileindextests(basetestcase):
37 class ifileindextests(basetestcase):
38 """Generic tests for the ifileindex interface.
38 """Generic tests for the ifileindex interface.
39
39
40 All file storage backends for index data should conform to the tests in this
40 All file storage backends for index data should conform to the tests in this
41 class.
41 class.
42
42
43 Use ``makeifileindextests()`` to create an instance of this type.
43 Use ``makeifileindextests()`` to create an instance of this type.
44 """
44 """
45 def testempty(self):
45 def testempty(self):
46 f = self._makefilefn()
46 f = self._makefilefn()
47 self.assertEqual(len(f), 0, 'new file store has 0 length by default')
47 self.assertEqual(len(f), 0, 'new file store has 0 length by default')
48 self.assertEqual(list(f), [], 'iter yields nothing by default')
48 self.assertEqual(list(f), [], 'iter yields nothing by default')
49
49
50 gen = iter(f)
50 gen = iter(f)
51 with self.assertRaises(StopIteration):
51 with self.assertRaises(StopIteration):
52 next(gen)
52 next(gen)
53
53
54 # revs() should evaluate to an empty list.
54 # revs() should evaluate to an empty list.
55 self.assertEqual(list(f.revs()), [])
55 self.assertEqual(list(f.revs()), [])
56
56
57 revs = iter(f.revs())
57 revs = iter(f.revs())
58 with self.assertRaises(StopIteration):
58 with self.assertRaises(StopIteration):
59 next(revs)
59 next(revs)
60
60
61 self.assertEqual(list(f.revs(start=20)), [])
61 self.assertEqual(list(f.revs(start=20)), [])
62
62
63 # parents() and parentrevs() work with nullid/nullrev.
63 # parents() and parentrevs() work with nullid/nullrev.
64 self.assertEqual(f.parents(nullid), (nullid, nullid))
64 self.assertEqual(f.parents(nullid), (nullid, nullid))
65 self.assertEqual(f.parentrevs(nullrev), (nullrev, nullrev))
65 self.assertEqual(f.parentrevs(nullrev), (nullrev, nullrev))
66
66
67 with self.assertRaises(error.LookupError):
67 with self.assertRaises(error.LookupError):
68 f.parents(b'\x01' * 20)
68 f.parents(b'\x01' * 20)
69
69
70 for i in range(-5, 5):
70 for i in range(-5, 5):
71 if i == nullrev:
71 if i == nullrev:
72 continue
72 continue
73
73
74 with self.assertRaises(IndexError):
74 with self.assertRaises(IndexError):
75 f.parentrevs(i)
75 f.parentrevs(i)
76
76
77 # nullid/nullrev lookup always works.
77 # nullid/nullrev lookup always works.
78 self.assertEqual(f.rev(nullid), nullrev)
78 self.assertEqual(f.rev(nullid), nullrev)
79 self.assertEqual(f.node(nullrev), nullid)
79 self.assertEqual(f.node(nullrev), nullid)
80
80
81 with self.assertRaises(error.LookupError):
81 with self.assertRaises(error.LookupError):
82 f.rev(b'\x01' * 20)
82 f.rev(b'\x01' * 20)
83
83
84 for i in range(-5, 5):
84 for i in range(-5, 5):
85 if i == nullrev:
85 if i == nullrev:
86 continue
86 continue
87
87
88 with self.assertRaises(IndexError):
88 with self.assertRaises(IndexError):
89 f.node(i)
89 f.node(i)
90
90
91 self.assertEqual(f.lookup(nullid), nullid)
91 self.assertEqual(f.lookup(nullid), nullid)
92 self.assertEqual(f.lookup(nullrev), nullid)
92 self.assertEqual(f.lookup(nullrev), nullid)
93 self.assertEqual(f.lookup(hex(nullid)), nullid)
93 self.assertEqual(f.lookup(hex(nullid)), nullid)
94
94
95 # String converted to integer doesn't work for nullrev.
95 # String converted to integer doesn't work for nullrev.
96 with self.assertRaises(error.LookupError):
96 with self.assertRaises(error.LookupError):
97 f.lookup(b'%d' % nullrev)
97 f.lookup(b'%d' % nullrev)
98
98
99 self.assertEqual(f.linkrev(nullrev), nullrev)
99 self.assertEqual(f.linkrev(nullrev), nullrev)
100
100
101 for i in range(-5, 5):
101 for i in range(-5, 5):
102 if i == nullrev:
102 if i == nullrev:
103 continue
103 continue
104
104
105 with self.assertRaises(IndexError):
105 with self.assertRaises(IndexError):
106 f.linkrev(i)
106 f.linkrev(i)
107
107
108 self.assertEqual(f.flags(nullrev), 0)
108 self.assertEqual(f.flags(nullrev), 0)
109
109
110 for i in range(-5, 5):
110 for i in range(-5, 5):
111 if i == nullrev:
111 if i == nullrev:
112 continue
112 continue
113
113
114 with self.assertRaises(IndexError):
114 with self.assertRaises(IndexError):
115 f.flags(i)
115 f.flags(i)
116
116
117 self.assertFalse(f.iscensored(nullrev))
117 self.assertFalse(f.iscensored(nullrev))
118
118
119 for i in range(-5, 5):
119 for i in range(-5, 5):
120 if i == nullrev:
120 if i == nullrev:
121 continue
121 continue
122
122
123 with self.assertRaises(IndexError):
123 with self.assertRaises(IndexError):
124 f.iscensored(i)
124 f.iscensored(i)
125
125
126 self.assertEqual(list(f.commonancestorsheads(nullid, nullid)), [])
126 self.assertEqual(list(f.commonancestorsheads(nullid, nullid)), [])
127
127
128 with self.assertRaises(ValueError):
128 with self.assertRaises(ValueError):
129 self.assertEqual(list(f.descendants([])), [])
129 self.assertEqual(list(f.descendants([])), [])
130
130
131 self.assertEqual(list(f.descendants([nullrev])), [])
131 self.assertEqual(list(f.descendants([nullrev])), [])
132
132
133 self.assertEqual(f.headrevs(), [nullrev])
133 self.assertEqual(f.headrevs(), [nullrev])
134 self.assertEqual(f.heads(), [nullid])
134 self.assertEqual(f.heads(), [nullid])
135 self.assertEqual(f.heads(nullid), [nullid])
135 self.assertEqual(f.heads(nullid), [nullid])
136 self.assertEqual(f.heads(None, [nullid]), [nullid])
136 self.assertEqual(f.heads(None, [nullid]), [nullid])
137 self.assertEqual(f.heads(nullid, [nullid]), [nullid])
137 self.assertEqual(f.heads(nullid, [nullid]), [nullid])
138
138
139 self.assertEqual(f.children(nullid), [])
139 self.assertEqual(f.children(nullid), [])
140
140
141 with self.assertRaises(error.LookupError):
141 with self.assertRaises(error.LookupError):
142 f.children(b'\x01' * 20)
142 f.children(b'\x01' * 20)
143
143
144 self.assertEqual(f.deltaparent(nullrev), nullrev)
144 self.assertEqual(f.deltaparent(nullrev), nullrev)
145
145
146 for i in range(-5, 5):
146 for i in range(-5, 5):
147 if i == nullrev:
147 if i == nullrev:
148 continue
148 continue
149
149
150 with self.assertRaises(IndexError):
150 with self.assertRaises(IndexError):
151 f.deltaparent(i)
151 f.deltaparent(i)
152
152
153 def testsinglerevision(self):
153 def testsinglerevision(self):
154 f = self._makefilefn()
154 f = self._makefilefn()
155 with self._maketransactionfn() as tr:
155 with self._maketransactionfn() as tr:
156 node = f.add(b'initial', None, tr, 0, nullid, nullid)
156 node = f.add(b'initial', None, tr, 0, nullid, nullid)
157
157
158 self.assertEqual(len(f), 1)
158 self.assertEqual(len(f), 1)
159 self.assertEqual(list(f), [0])
159 self.assertEqual(list(f), [0])
160
160
161 gen = iter(f)
161 gen = iter(f)
162 self.assertEqual(next(gen), 0)
162 self.assertEqual(next(gen), 0)
163
163
164 with self.assertRaises(StopIteration):
164 with self.assertRaises(StopIteration):
165 next(gen)
165 next(gen)
166
166
167 self.assertEqual(list(f.revs()), [0])
167 self.assertEqual(list(f.revs()), [0])
168 self.assertEqual(list(f.revs(start=1)), [])
168 self.assertEqual(list(f.revs(start=1)), [])
169 self.assertEqual(list(f.revs(start=0)), [0])
169 self.assertEqual(list(f.revs(start=0)), [0])
170 self.assertEqual(list(f.revs(stop=0)), [0])
170 self.assertEqual(list(f.revs(stop=0)), [0])
171 self.assertEqual(list(f.revs(stop=1)), [0])
171 self.assertEqual(list(f.revs(stop=1)), [0])
172 self.assertEqual(list(f.revs(1, 1)), [])
172 self.assertEqual(list(f.revs(1, 1)), [])
173 # TODO buggy
173 # TODO buggy
174 self.assertEqual(list(f.revs(1, 0)), [1, 0])
174 self.assertEqual(list(f.revs(1, 0)), [1, 0])
175 self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
175 self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
176
176
177 self.assertEqual(f.parents(node), (nullid, nullid))
177 self.assertEqual(f.parents(node), (nullid, nullid))
178 self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
178 self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
179
179
180 with self.assertRaises(error.LookupError):
180 with self.assertRaises(error.LookupError):
181 f.parents(b'\x01' * 20)
181 f.parents(b'\x01' * 20)
182
182
183 with self.assertRaises(IndexError):
183 with self.assertRaises(IndexError):
184 f.parentrevs(1)
184 f.parentrevs(1)
185
185
186 self.assertEqual(f.rev(node), 0)
186 self.assertEqual(f.rev(node), 0)
187
187
188 with self.assertRaises(error.LookupError):
188 with self.assertRaises(error.LookupError):
189 f.rev(b'\x01' * 20)
189 f.rev(b'\x01' * 20)
190
190
191 self.assertEqual(f.node(0), node)
191 self.assertEqual(f.node(0), node)
192
192
193 with self.assertRaises(IndexError):
193 with self.assertRaises(IndexError):
194 f.node(1)
194 f.node(1)
195
195
196 self.assertEqual(f.lookup(node), node)
196 self.assertEqual(f.lookup(node), node)
197 self.assertEqual(f.lookup(0), node)
197 self.assertEqual(f.lookup(0), node)
198 self.assertEqual(f.lookup(b'0'), node)
198 self.assertEqual(f.lookup(b'0'), node)
199 self.assertEqual(f.lookup(hex(node)), node)
199 self.assertEqual(f.lookup(hex(node)), node)
200
200
201 self.assertEqual(f.linkrev(0), 0)
201 self.assertEqual(f.linkrev(0), 0)
202
202
203 with self.assertRaises(IndexError):
203 with self.assertRaises(IndexError):
204 f.linkrev(1)
204 f.linkrev(1)
205
205
206 self.assertEqual(f.flags(0), 0)
206 self.assertEqual(f.flags(0), 0)
207
207
208 with self.assertRaises(IndexError):
208 with self.assertRaises(IndexError):
209 f.flags(1)
209 f.flags(1)
210
210
211 self.assertFalse(f.iscensored(0))
211 self.assertFalse(f.iscensored(0))
212
212
213 with self.assertRaises(IndexError):
213 with self.assertRaises(IndexError):
214 f.iscensored(1)
214 f.iscensored(1)
215
215
216 self.assertEqual(list(f.descendants([0])), [])
216 self.assertEqual(list(f.descendants([0])), [])
217
217
218 self.assertEqual(f.headrevs(), [0])
218 self.assertEqual(f.headrevs(), [0])
219
219
220 self.assertEqual(f.heads(), [node])
220 self.assertEqual(f.heads(), [node])
221 self.assertEqual(f.heads(node), [node])
221 self.assertEqual(f.heads(node), [node])
222 self.assertEqual(f.heads(stop=[node]), [node])
222 self.assertEqual(f.heads(stop=[node]), [node])
223
223
224 with self.assertRaises(error.LookupError):
224 with self.assertRaises(error.LookupError):
225 f.heads(stop=[b'\x01' * 20])
225 f.heads(stop=[b'\x01' * 20])
226
226
227 self.assertEqual(f.children(node), [])
227 self.assertEqual(f.children(node), [])
228
228
229 self.assertEqual(f.deltaparent(0), nullrev)
229 self.assertEqual(f.deltaparent(0), nullrev)
230
230
231 def testmultiplerevisions(self):
231 def testmultiplerevisions(self):
232 fulltext0 = b'x' * 1024
232 fulltext0 = b'x' * 1024
233 fulltext1 = fulltext0 + b'y'
233 fulltext1 = fulltext0 + b'y'
234 fulltext2 = b'y' + fulltext0 + b'z'
234 fulltext2 = b'y' + fulltext0 + b'z'
235
235
236 f = self._makefilefn()
236 f = self._makefilefn()
237 with self._maketransactionfn() as tr:
237 with self._maketransactionfn() as tr:
238 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
238 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
239 node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
239 node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
240 node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
240 node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
241
241
242 self.assertEqual(len(f), 3)
242 self.assertEqual(len(f), 3)
243 self.assertEqual(list(f), [0, 1, 2])
243 self.assertEqual(list(f), [0, 1, 2])
244
244
245 gen = iter(f)
245 gen = iter(f)
246 self.assertEqual(next(gen), 0)
246 self.assertEqual(next(gen), 0)
247 self.assertEqual(next(gen), 1)
247 self.assertEqual(next(gen), 1)
248 self.assertEqual(next(gen), 2)
248 self.assertEqual(next(gen), 2)
249
249
250 with self.assertRaises(StopIteration):
250 with self.assertRaises(StopIteration):
251 next(gen)
251 next(gen)
252
252
253 self.assertEqual(list(f.revs()), [0, 1, 2])
253 self.assertEqual(list(f.revs()), [0, 1, 2])
254 self.assertEqual(list(f.revs(0)), [0, 1, 2])
254 self.assertEqual(list(f.revs(0)), [0, 1, 2])
255 self.assertEqual(list(f.revs(1)), [1, 2])
255 self.assertEqual(list(f.revs(1)), [1, 2])
256 self.assertEqual(list(f.revs(2)), [2])
256 self.assertEqual(list(f.revs(2)), [2])
257 self.assertEqual(list(f.revs(3)), [])
257 self.assertEqual(list(f.revs(3)), [])
258 self.assertEqual(list(f.revs(stop=1)), [0, 1])
258 self.assertEqual(list(f.revs(stop=1)), [0, 1])
259 self.assertEqual(list(f.revs(stop=2)), [0, 1, 2])
259 self.assertEqual(list(f.revs(stop=2)), [0, 1, 2])
260 self.assertEqual(list(f.revs(stop=3)), [0, 1, 2])
260 self.assertEqual(list(f.revs(stop=3)), [0, 1, 2])
261 self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
261 self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
262 self.assertEqual(list(f.revs(2, 1)), [2, 1])
262 self.assertEqual(list(f.revs(2, 1)), [2, 1])
263 # TODO this is wrong
263 # TODO this is wrong
264 self.assertEqual(list(f.revs(3, 2)), [3, 2])
264 self.assertEqual(list(f.revs(3, 2)), [3, 2])
265
265
266 self.assertEqual(f.parents(node0), (nullid, nullid))
266 self.assertEqual(f.parents(node0), (nullid, nullid))
267 self.assertEqual(f.parents(node1), (node0, nullid))
267 self.assertEqual(f.parents(node1), (node0, nullid))
268 self.assertEqual(f.parents(node2), (node1, nullid))
268 self.assertEqual(f.parents(node2), (node1, nullid))
269
269
270 self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
270 self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
271 self.assertEqual(f.parentrevs(1), (0, nullrev))
271 self.assertEqual(f.parentrevs(1), (0, nullrev))
272 self.assertEqual(f.parentrevs(2), (1, nullrev))
272 self.assertEqual(f.parentrevs(2), (1, nullrev))
273
273
274 self.assertEqual(f.rev(node0), 0)
274 self.assertEqual(f.rev(node0), 0)
275 self.assertEqual(f.rev(node1), 1)
275 self.assertEqual(f.rev(node1), 1)
276 self.assertEqual(f.rev(node2), 2)
276 self.assertEqual(f.rev(node2), 2)
277
277
278 with self.assertRaises(error.LookupError):
278 with self.assertRaises(error.LookupError):
279 f.rev(b'\x01' * 20)
279 f.rev(b'\x01' * 20)
280
280
281 self.assertEqual(f.node(0), node0)
281 self.assertEqual(f.node(0), node0)
282 self.assertEqual(f.node(1), node1)
282 self.assertEqual(f.node(1), node1)
283 self.assertEqual(f.node(2), node2)
283 self.assertEqual(f.node(2), node2)
284
284
285 with self.assertRaises(IndexError):
285 with self.assertRaises(IndexError):
286 f.node(3)
286 f.node(3)
287
287
288 self.assertEqual(f.lookup(node0), node0)
288 self.assertEqual(f.lookup(node0), node0)
289 self.assertEqual(f.lookup(0), node0)
289 self.assertEqual(f.lookup(0), node0)
290 self.assertEqual(f.lookup(b'0'), node0)
290 self.assertEqual(f.lookup(b'0'), node0)
291 self.assertEqual(f.lookup(hex(node0)), node0)
291 self.assertEqual(f.lookup(hex(node0)), node0)
292
292
293 self.assertEqual(f.lookup(node1), node1)
293 self.assertEqual(f.lookup(node1), node1)
294 self.assertEqual(f.lookup(1), node1)
294 self.assertEqual(f.lookup(1), node1)
295 self.assertEqual(f.lookup(b'1'), node1)
295 self.assertEqual(f.lookup(b'1'), node1)
296 self.assertEqual(f.lookup(hex(node1)), node1)
296 self.assertEqual(f.lookup(hex(node1)), node1)
297
297
298 self.assertEqual(f.linkrev(0), 0)
298 self.assertEqual(f.linkrev(0), 0)
299 self.assertEqual(f.linkrev(1), 1)
299 self.assertEqual(f.linkrev(1), 1)
300 self.assertEqual(f.linkrev(2), 3)
300 self.assertEqual(f.linkrev(2), 3)
301
301
302 with self.assertRaises(IndexError):
302 with self.assertRaises(IndexError):
303 f.linkrev(3)
303 f.linkrev(3)
304
304
305 self.assertEqual(f.flags(0), 0)
305 self.assertEqual(f.flags(0), 0)
306 self.assertEqual(f.flags(1), 0)
306 self.assertEqual(f.flags(1), 0)
307 self.assertEqual(f.flags(2), 0)
307 self.assertEqual(f.flags(2), 0)
308
308
309 with self.assertRaises(IndexError):
309 with self.assertRaises(IndexError):
310 f.flags(3)
310 f.flags(3)
311
311
312 self.assertFalse(f.iscensored(0))
312 self.assertFalse(f.iscensored(0))
313 self.assertFalse(f.iscensored(1))
313 self.assertFalse(f.iscensored(1))
314 self.assertFalse(f.iscensored(2))
314 self.assertFalse(f.iscensored(2))
315
315
316 with self.assertRaises(IndexError):
316 with self.assertRaises(IndexError):
317 f.iscensored(3)
317 f.iscensored(3)
318
318
319 self.assertEqual(f.commonancestorsheads(node1, nullid), [])
319 self.assertEqual(f.commonancestorsheads(node1, nullid), [])
320 self.assertEqual(f.commonancestorsheads(node1, node0), [node0])
320 self.assertEqual(f.commonancestorsheads(node1, node0), [node0])
321 self.assertEqual(f.commonancestorsheads(node1, node1), [node1])
321 self.assertEqual(f.commonancestorsheads(node1, node1), [node1])
322 self.assertEqual(f.commonancestorsheads(node0, node1), [node0])
322 self.assertEqual(f.commonancestorsheads(node0, node1), [node0])
323 self.assertEqual(f.commonancestorsheads(node1, node2), [node1])
323 self.assertEqual(f.commonancestorsheads(node1, node2), [node1])
324 self.assertEqual(f.commonancestorsheads(node2, node1), [node1])
324 self.assertEqual(f.commonancestorsheads(node2, node1), [node1])
325
325
326 self.assertEqual(list(f.descendants([0])), [1, 2])
326 self.assertEqual(list(f.descendants([0])), [1, 2])
327 self.assertEqual(list(f.descendants([1])), [2])
327 self.assertEqual(list(f.descendants([1])), [2])
328 self.assertEqual(list(f.descendants([0, 1])), [1, 2])
328 self.assertEqual(list(f.descendants([0, 1])), [1, 2])
329
329
330 self.assertEqual(f.headrevs(), [2])
330 self.assertEqual(f.headrevs(), [2])
331
331
332 self.assertEqual(f.heads(), [node2])
332 self.assertEqual(f.heads(), [node2])
333 self.assertEqual(f.heads(node0), [node2])
333 self.assertEqual(f.heads(node0), [node2])
334 self.assertEqual(f.heads(node1), [node2])
334 self.assertEqual(f.heads(node1), [node2])
335 self.assertEqual(f.heads(node2), [node2])
335 self.assertEqual(f.heads(node2), [node2])
336
336
337 # TODO this behavior seems wonky. Is it correct? If so, the
337 # TODO this behavior seems wonky. Is it correct? If so, the
338 # docstring for heads() should be updated to reflect desired
338 # docstring for heads() should be updated to reflect desired
339 # behavior.
339 # behavior.
340 self.assertEqual(f.heads(stop=[node1]), [node1, node2])
340 self.assertEqual(f.heads(stop=[node1]), [node1, node2])
341 self.assertEqual(f.heads(stop=[node0]), [node0, node2])
341 self.assertEqual(f.heads(stop=[node0]), [node0, node2])
342 self.assertEqual(f.heads(stop=[node1, node2]), [node1, node2])
342 self.assertEqual(f.heads(stop=[node1, node2]), [node1, node2])
343
343
344 with self.assertRaises(error.LookupError):
344 with self.assertRaises(error.LookupError):
345 f.heads(stop=[b'\x01' * 20])
345 f.heads(stop=[b'\x01' * 20])
346
346
347 self.assertEqual(f.children(node0), [node1])
347 self.assertEqual(f.children(node0), [node1])
348 self.assertEqual(f.children(node1), [node2])
348 self.assertEqual(f.children(node1), [node2])
349 self.assertEqual(f.children(node2), [])
349 self.assertEqual(f.children(node2), [])
350
350
351 self.assertEqual(f.deltaparent(0), nullrev)
351 self.assertEqual(f.deltaparent(0), nullrev)
352 self.assertEqual(f.deltaparent(1), 0)
352 self.assertEqual(f.deltaparent(1), 0)
353 self.assertEqual(f.deltaparent(2), 1)
353 self.assertEqual(f.deltaparent(2), 1)
354
354
355 def testmultipleheads(self):
355 def testmultipleheads(self):
356 f = self._makefilefn()
356 f = self._makefilefn()
357
357
358 with self._maketransactionfn() as tr:
358 with self._maketransactionfn() as tr:
359 node0 = f.add(b'0', None, tr, 0, nullid, nullid)
359 node0 = f.add(b'0', None, tr, 0, nullid, nullid)
360 node1 = f.add(b'1', None, tr, 1, node0, nullid)
360 node1 = f.add(b'1', None, tr, 1, node0, nullid)
361 node2 = f.add(b'2', None, tr, 2, node1, nullid)
361 node2 = f.add(b'2', None, tr, 2, node1, nullid)
362 node3 = f.add(b'3', None, tr, 3, node0, nullid)
362 node3 = f.add(b'3', None, tr, 3, node0, nullid)
363 node4 = f.add(b'4', None, tr, 4, node3, nullid)
363 node4 = f.add(b'4', None, tr, 4, node3, nullid)
364 node5 = f.add(b'5', None, tr, 5, node0, nullid)
364 node5 = f.add(b'5', None, tr, 5, node0, nullid)
365
365
366 self.assertEqual(len(f), 6)
366 self.assertEqual(len(f), 6)
367
367
368 self.assertEqual(list(f.descendants([0])), [1, 2, 3, 4, 5])
368 self.assertEqual(list(f.descendants([0])), [1, 2, 3, 4, 5])
369 self.assertEqual(list(f.descendants([1])), [2])
369 self.assertEqual(list(f.descendants([1])), [2])
370 self.assertEqual(list(f.descendants([2])), [])
370 self.assertEqual(list(f.descendants([2])), [])
371 self.assertEqual(list(f.descendants([3])), [4])
371 self.assertEqual(list(f.descendants([3])), [4])
372 self.assertEqual(list(f.descendants([0, 1])), [1, 2, 3, 4, 5])
372 self.assertEqual(list(f.descendants([0, 1])), [1, 2, 3, 4, 5])
373 self.assertEqual(list(f.descendants([1, 3])), [2, 4])
373 self.assertEqual(list(f.descendants([1, 3])), [2, 4])
374
374
375 self.assertEqual(f.headrevs(), [2, 4, 5])
375 self.assertEqual(f.headrevs(), [2, 4, 5])
376
376
377 self.assertEqual(f.heads(), [node2, node4, node5])
377 self.assertEqual(f.heads(), [node2, node4, node5])
378 self.assertEqual(f.heads(node0), [node2, node4, node5])
378 self.assertEqual(f.heads(node0), [node2, node4, node5])
379 self.assertEqual(f.heads(node1), [node2])
379 self.assertEqual(f.heads(node1), [node2])
380 self.assertEqual(f.heads(node2), [node2])
380 self.assertEqual(f.heads(node2), [node2])
381 self.assertEqual(f.heads(node3), [node4])
381 self.assertEqual(f.heads(node3), [node4])
382 self.assertEqual(f.heads(node4), [node4])
382 self.assertEqual(f.heads(node4), [node4])
383 self.assertEqual(f.heads(node5), [node5])
383 self.assertEqual(f.heads(node5), [node5])
384
384
385 # TODO this seems wrong.
385 # TODO this seems wrong.
386 self.assertEqual(f.heads(stop=[node0]), [node0, node2, node4, node5])
386 self.assertEqual(f.heads(stop=[node0]), [node0, node2, node4, node5])
387 self.assertEqual(f.heads(stop=[node1]), [node1, node2, node4, node5])
387 self.assertEqual(f.heads(stop=[node1]), [node1, node2, node4, node5])
388
388
389 self.assertEqual(f.children(node0), [node1, node3, node5])
389 self.assertEqual(f.children(node0), [node1, node3, node5])
390 self.assertEqual(f.children(node1), [node2])
390 self.assertEqual(f.children(node1), [node2])
391 self.assertEqual(f.children(node2), [])
391 self.assertEqual(f.children(node2), [])
392 self.assertEqual(f.children(node3), [node4])
392 self.assertEqual(f.children(node3), [node4])
393 self.assertEqual(f.children(node4), [])
393 self.assertEqual(f.children(node4), [])
394 self.assertEqual(f.children(node5), [])
394 self.assertEqual(f.children(node5), [])
395
395
396 class ifiledatatests(basetestcase):
396 class ifiledatatests(basetestcase):
397 """Generic tests for the ifiledata interface.
397 """Generic tests for the ifiledata interface.
398
398
399 All file storage backends for data should conform to the tests in this
399 All file storage backends for data should conform to the tests in this
400 class.
400 class.
401
401
402 Use ``makeifiledatatests()`` to create an instance of this type.
402 Use ``makeifiledatatests()`` to create an instance of this type.
403 """
403 """
404 def testempty(self):
404 def testempty(self):
405 f = self._makefilefn()
405 f = self._makefilefn()
406
406
407 self.assertEqual(f.rawsize(nullrev), 0)
407 self.assertEqual(f.rawsize(nullrev), 0)
408
408
409 for i in range(-5, 5):
409 for i in range(-5, 5):
410 if i == nullrev:
410 if i == nullrev:
411 continue
411 continue
412
412
413 with self.assertRaises(IndexError):
413 with self.assertRaises(IndexError):
414 f.rawsize(i)
414 f.rawsize(i)
415
415
416 self.assertEqual(f.size(nullrev), 0)
416 self.assertEqual(f.size(nullrev), 0)
417
417
418 for i in range(-5, 5):
418 for i in range(-5, 5):
419 if i == nullrev:
419 if i == nullrev:
420 continue
420 continue
421
421
422 with self.assertRaises(IndexError):
422 with self.assertRaises(IndexError):
423 f.size(i)
423 f.size(i)
424
424
425 with self.assertRaises(error.RevlogError):
425 with self.assertRaises(error.StorageError):
426 f.checkhash(b'', nullid)
426 f.checkhash(b'', nullid)
427
427
428 with self.assertRaises(error.LookupError):
428 with self.assertRaises(error.LookupError):
429 f.checkhash(b'', b'\x01' * 20)
429 f.checkhash(b'', b'\x01' * 20)
430
430
431 self.assertEqual(f.revision(nullid), b'')
431 self.assertEqual(f.revision(nullid), b'')
432 self.assertEqual(f.revision(nullid, raw=True), b'')
432 self.assertEqual(f.revision(nullid, raw=True), b'')
433
433
434 with self.assertRaises(error.LookupError):
434 with self.assertRaises(error.LookupError):
435 f.revision(b'\x01' * 20)
435 f.revision(b'\x01' * 20)
436
436
437 self.assertEqual(f.read(nullid), b'')
437 self.assertEqual(f.read(nullid), b'')
438
438
439 with self.assertRaises(error.LookupError):
439 with self.assertRaises(error.LookupError):
440 f.read(b'\x01' * 20)
440 f.read(b'\x01' * 20)
441
441
442 self.assertFalse(f.renamed(nullid))
442 self.assertFalse(f.renamed(nullid))
443
443
444 with self.assertRaises(error.LookupError):
444 with self.assertRaises(error.LookupError):
445 f.read(b'\x01' * 20)
445 f.read(b'\x01' * 20)
446
446
447 self.assertTrue(f.cmp(nullid, b''))
447 self.assertTrue(f.cmp(nullid, b''))
448 self.assertTrue(f.cmp(nullid, b'foo'))
448 self.assertTrue(f.cmp(nullid, b'foo'))
449
449
450 with self.assertRaises(error.LookupError):
450 with self.assertRaises(error.LookupError):
451 f.cmp(b'\x01' * 20, b'irrelevant')
451 f.cmp(b'\x01' * 20, b'irrelevant')
452
452
453 self.assertEqual(f.revdiff(nullrev, nullrev), b'')
453 self.assertEqual(f.revdiff(nullrev, nullrev), b'')
454
454
455 with self.assertRaises(IndexError):
455 with self.assertRaises(IndexError):
456 f.revdiff(0, nullrev)
456 f.revdiff(0, nullrev)
457
457
458 with self.assertRaises(IndexError):
458 with self.assertRaises(IndexError):
459 f.revdiff(nullrev, 0)
459 f.revdiff(nullrev, 0)
460
460
461 with self.assertRaises(IndexError):
461 with self.assertRaises(IndexError):
462 f.revdiff(0, 0)
462 f.revdiff(0, 0)
463
463
464 gen = f.emitrevisiondeltas([])
464 gen = f.emitrevisiondeltas([])
465 with self.assertRaises(StopIteration):
465 with self.assertRaises(StopIteration):
466 next(gen)
466 next(gen)
467
467
468 requests = [
468 requests = [
469 revisiondeltarequest(nullid, nullid, nullid, nullid, nullid, False),
469 revisiondeltarequest(nullid, nullid, nullid, nullid, nullid, False),
470 ]
470 ]
471 gen = f.emitrevisiondeltas(requests)
471 gen = f.emitrevisiondeltas(requests)
472
472
473 delta = next(gen)
473 delta = next(gen)
474
474
475 self.assertEqual(delta.node, nullid)
475 self.assertEqual(delta.node, nullid)
476 self.assertEqual(delta.p1node, nullid)
476 self.assertEqual(delta.p1node, nullid)
477 self.assertEqual(delta.p2node, nullid)
477 self.assertEqual(delta.p2node, nullid)
478 self.assertEqual(delta.linknode, nullid)
478 self.assertEqual(delta.linknode, nullid)
479 self.assertEqual(delta.basenode, nullid)
479 self.assertEqual(delta.basenode, nullid)
480 self.assertIsNone(delta.baserevisionsize)
480 self.assertIsNone(delta.baserevisionsize)
481 self.assertEqual(delta.revision, b'')
481 self.assertEqual(delta.revision, b'')
482 self.assertIsNone(delta.delta)
482 self.assertIsNone(delta.delta)
483
483
484 with self.assertRaises(StopIteration):
484 with self.assertRaises(StopIteration):
485 next(gen)
485 next(gen)
486
486
487 requests = [
487 requests = [
488 revisiondeltarequest(nullid, nullid, nullid, nullid, nullid, False),
488 revisiondeltarequest(nullid, nullid, nullid, nullid, nullid, False),
489 revisiondeltarequest(nullid, b'\x01' * 20, b'\x02' * 20,
489 revisiondeltarequest(nullid, b'\x01' * 20, b'\x02' * 20,
490 b'\x03' * 20, nullid, False)
490 b'\x03' * 20, nullid, False)
491 ]
491 ]
492
492
493 gen = f.emitrevisiondeltas(requests)
493 gen = f.emitrevisiondeltas(requests)
494
494
495 next(gen)
495 next(gen)
496 delta = next(gen)
496 delta = next(gen)
497
497
498 self.assertEqual(delta.node, nullid)
498 self.assertEqual(delta.node, nullid)
499 self.assertEqual(delta.p1node, b'\x01' * 20)
499 self.assertEqual(delta.p1node, b'\x01' * 20)
500 self.assertEqual(delta.p2node, b'\x02' * 20)
500 self.assertEqual(delta.p2node, b'\x02' * 20)
501 self.assertEqual(delta.linknode, b'\x03' * 20)
501 self.assertEqual(delta.linknode, b'\x03' * 20)
502 self.assertEqual(delta.basenode, nullid)
502 self.assertEqual(delta.basenode, nullid)
503 self.assertIsNone(delta.baserevisionsize)
503 self.assertIsNone(delta.baserevisionsize)
504 self.assertEqual(delta.revision, b'')
504 self.assertEqual(delta.revision, b'')
505 self.assertIsNone(delta.delta)
505 self.assertIsNone(delta.delta)
506
506
507 with self.assertRaises(StopIteration):
507 with self.assertRaises(StopIteration):
508 next(gen)
508 next(gen)
509
509
510 def testsinglerevision(self):
510 def testsinglerevision(self):
511 fulltext = b'initial'
511 fulltext = b'initial'
512
512
513 f = self._makefilefn()
513 f = self._makefilefn()
514 with self._maketransactionfn() as tr:
514 with self._maketransactionfn() as tr:
515 node = f.add(fulltext, None, tr, 0, nullid, nullid)
515 node = f.add(fulltext, None, tr, 0, nullid, nullid)
516
516
517 self.assertEqual(f.rawsize(0), len(fulltext))
517 self.assertEqual(f.rawsize(0), len(fulltext))
518
518
519 with self.assertRaises(IndexError):
519 with self.assertRaises(IndexError):
520 f.rawsize(1)
520 f.rawsize(1)
521
521
522 self.assertEqual(f.size(0), len(fulltext))
522 self.assertEqual(f.size(0), len(fulltext))
523
523
524 with self.assertRaises(IndexError):
524 with self.assertRaises(IndexError):
525 f.size(1)
525 f.size(1)
526
526
527 f.checkhash(fulltext, node)
527 f.checkhash(fulltext, node)
528 f.checkhash(fulltext, node, nullid, nullid)
528 f.checkhash(fulltext, node, nullid, nullid)
529
529
530 with self.assertRaises(error.RevlogError):
530 with self.assertRaises(error.StorageError):
531 f.checkhash(fulltext + b'extra', node)
531 f.checkhash(fulltext + b'extra', node)
532
532
533 with self.assertRaises(error.RevlogError):
533 with self.assertRaises(error.StorageError):
534 f.checkhash(fulltext, node, b'\x01' * 20, nullid)
534 f.checkhash(fulltext, node, b'\x01' * 20, nullid)
535
535
536 with self.assertRaises(error.RevlogError):
536 with self.assertRaises(error.StorageError):
537 f.checkhash(fulltext, node, nullid, b'\x01' * 20)
537 f.checkhash(fulltext, node, nullid, b'\x01' * 20)
538
538
539 self.assertEqual(f.revision(node), fulltext)
539 self.assertEqual(f.revision(node), fulltext)
540 self.assertEqual(f.revision(node, raw=True), fulltext)
540 self.assertEqual(f.revision(node, raw=True), fulltext)
541
541
542 self.assertEqual(f.read(node), fulltext)
542 self.assertEqual(f.read(node), fulltext)
543
543
544 self.assertFalse(f.renamed(node))
544 self.assertFalse(f.renamed(node))
545
545
546 self.assertFalse(f.cmp(node, fulltext))
546 self.assertFalse(f.cmp(node, fulltext))
547 self.assertTrue(f.cmp(node, fulltext + b'extra'))
547 self.assertTrue(f.cmp(node, fulltext + b'extra'))
548
548
549 self.assertEqual(f.revdiff(0, 0), b'')
549 self.assertEqual(f.revdiff(0, 0), b'')
550 self.assertEqual(f.revdiff(nullrev, 0),
550 self.assertEqual(f.revdiff(nullrev, 0),
551 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07%s' %
551 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07%s' %
552 fulltext)
552 fulltext)
553
553
554 self.assertEqual(f.revdiff(0, nullrev),
554 self.assertEqual(f.revdiff(0, nullrev),
555 b'\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00')
555 b'\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00')
556
556
557 requests = [
557 requests = [
558 revisiondeltarequest(node, nullid, nullid, nullid, nullid, False),
558 revisiondeltarequest(node, nullid, nullid, nullid, nullid, False),
559 ]
559 ]
560 gen = f.emitrevisiondeltas(requests)
560 gen = f.emitrevisiondeltas(requests)
561
561
562 delta = next(gen)
562 delta = next(gen)
563
563
564 self.assertEqual(delta.node, node)
564 self.assertEqual(delta.node, node)
565 self.assertEqual(delta.p1node, nullid)
565 self.assertEqual(delta.p1node, nullid)
566 self.assertEqual(delta.p2node, nullid)
566 self.assertEqual(delta.p2node, nullid)
567 self.assertEqual(delta.linknode, nullid)
567 self.assertEqual(delta.linknode, nullid)
568 self.assertEqual(delta.basenode, nullid)
568 self.assertEqual(delta.basenode, nullid)
569 self.assertIsNone(delta.baserevisionsize)
569 self.assertIsNone(delta.baserevisionsize)
570 self.assertEqual(delta.revision, fulltext)
570 self.assertEqual(delta.revision, fulltext)
571 self.assertIsNone(delta.delta)
571 self.assertIsNone(delta.delta)
572
572
573 with self.assertRaises(StopIteration):
573 with self.assertRaises(StopIteration):
574 next(gen)
574 next(gen)
575
575
576 def testmultiplerevisions(self):
576 def testmultiplerevisions(self):
577 fulltext0 = b'x' * 1024
577 fulltext0 = b'x' * 1024
578 fulltext1 = fulltext0 + b'y'
578 fulltext1 = fulltext0 + b'y'
579 fulltext2 = b'y' + fulltext0 + b'z'
579 fulltext2 = b'y' + fulltext0 + b'z'
580
580
581 f = self._makefilefn()
581 f = self._makefilefn()
582 with self._maketransactionfn() as tr:
582 with self._maketransactionfn() as tr:
583 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
583 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
584 node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
584 node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
585 node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
585 node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
586
586
587 self.assertEqual(f.rawsize(0), len(fulltext0))
587 self.assertEqual(f.rawsize(0), len(fulltext0))
588 self.assertEqual(f.rawsize(1), len(fulltext1))
588 self.assertEqual(f.rawsize(1), len(fulltext1))
589 self.assertEqual(f.rawsize(2), len(fulltext2))
589 self.assertEqual(f.rawsize(2), len(fulltext2))
590
590
591 with self.assertRaises(IndexError):
591 with self.assertRaises(IndexError):
592 f.rawsize(3)
592 f.rawsize(3)
593
593
594 self.assertEqual(f.size(0), len(fulltext0))
594 self.assertEqual(f.size(0), len(fulltext0))
595 self.assertEqual(f.size(1), len(fulltext1))
595 self.assertEqual(f.size(1), len(fulltext1))
596 self.assertEqual(f.size(2), len(fulltext2))
596 self.assertEqual(f.size(2), len(fulltext2))
597
597
598 with self.assertRaises(IndexError):
598 with self.assertRaises(IndexError):
599 f.size(3)
599 f.size(3)
600
600
601 f.checkhash(fulltext0, node0)
601 f.checkhash(fulltext0, node0)
602 f.checkhash(fulltext1, node1)
602 f.checkhash(fulltext1, node1)
603 f.checkhash(fulltext1, node1, node0, nullid)
603 f.checkhash(fulltext1, node1, node0, nullid)
604 f.checkhash(fulltext2, node2, node1, nullid)
604 f.checkhash(fulltext2, node2, node1, nullid)
605
605
606 with self.assertRaises(error.RevlogError):
606 with self.assertRaises(error.StorageError):
607 f.checkhash(fulltext1, b'\x01' * 20)
607 f.checkhash(fulltext1, b'\x01' * 20)
608
608
609 with self.assertRaises(error.RevlogError):
609 with self.assertRaises(error.StorageError):
610 f.checkhash(fulltext1 + b'extra', node1, node0, nullid)
610 f.checkhash(fulltext1 + b'extra', node1, node0, nullid)
611
611
612 with self.assertRaises(error.RevlogError):
612 with self.assertRaises(error.StorageError):
613 f.checkhash(fulltext1, node1, node0, node0)
613 f.checkhash(fulltext1, node1, node0, node0)
614
614
615 self.assertEqual(f.revision(node0), fulltext0)
615 self.assertEqual(f.revision(node0), fulltext0)
616 self.assertEqual(f.revision(node0, raw=True), fulltext0)
616 self.assertEqual(f.revision(node0, raw=True), fulltext0)
617 self.assertEqual(f.revision(node1), fulltext1)
617 self.assertEqual(f.revision(node1), fulltext1)
618 self.assertEqual(f.revision(node1, raw=True), fulltext1)
618 self.assertEqual(f.revision(node1, raw=True), fulltext1)
619 self.assertEqual(f.revision(node2), fulltext2)
619 self.assertEqual(f.revision(node2), fulltext2)
620 self.assertEqual(f.revision(node2, raw=True), fulltext2)
620 self.assertEqual(f.revision(node2, raw=True), fulltext2)
621
621
622 with self.assertRaises(error.LookupError):
622 with self.assertRaises(error.LookupError):
623 f.revision(b'\x01' * 20)
623 f.revision(b'\x01' * 20)
624
624
625 self.assertEqual(f.read(node0), fulltext0)
625 self.assertEqual(f.read(node0), fulltext0)
626 self.assertEqual(f.read(node1), fulltext1)
626 self.assertEqual(f.read(node1), fulltext1)
627 self.assertEqual(f.read(node2), fulltext2)
627 self.assertEqual(f.read(node2), fulltext2)
628
628
629 with self.assertRaises(error.LookupError):
629 with self.assertRaises(error.LookupError):
630 f.read(b'\x01' * 20)
630 f.read(b'\x01' * 20)
631
631
632 self.assertFalse(f.renamed(node0))
632 self.assertFalse(f.renamed(node0))
633 self.assertFalse(f.renamed(node1))
633 self.assertFalse(f.renamed(node1))
634 self.assertFalse(f.renamed(node2))
634 self.assertFalse(f.renamed(node2))
635
635
636 with self.assertRaises(error.LookupError):
636 with self.assertRaises(error.LookupError):
637 f.renamed(b'\x01' * 20)
637 f.renamed(b'\x01' * 20)
638
638
639 self.assertFalse(f.cmp(node0, fulltext0))
639 self.assertFalse(f.cmp(node0, fulltext0))
640 self.assertFalse(f.cmp(node1, fulltext1))
640 self.assertFalse(f.cmp(node1, fulltext1))
641 self.assertFalse(f.cmp(node2, fulltext2))
641 self.assertFalse(f.cmp(node2, fulltext2))
642
642
643 self.assertTrue(f.cmp(node1, fulltext0))
643 self.assertTrue(f.cmp(node1, fulltext0))
644 self.assertTrue(f.cmp(node2, fulltext1))
644 self.assertTrue(f.cmp(node2, fulltext1))
645
645
646 with self.assertRaises(error.LookupError):
646 with self.assertRaises(error.LookupError):
647 f.cmp(b'\x01' * 20, b'irrelevant')
647 f.cmp(b'\x01' * 20, b'irrelevant')
648
648
649 self.assertEqual(f.revdiff(0, 1),
649 self.assertEqual(f.revdiff(0, 1),
650 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
650 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
651 fulltext1)
651 fulltext1)
652
652
653 self.assertEqual(f.revdiff(0, 2),
653 self.assertEqual(f.revdiff(0, 2),
654 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x02' +
654 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x02' +
655 fulltext2)
655 fulltext2)
656
656
657 requests = [
657 requests = [
658 revisiondeltarequest(node0, nullid, nullid, b'\x01' * 20, nullid,
658 revisiondeltarequest(node0, nullid, nullid, b'\x01' * 20, nullid,
659 False),
659 False),
660 revisiondeltarequest(node1, node0, nullid, b'\x02' * 20, node0,
660 revisiondeltarequest(node1, node0, nullid, b'\x02' * 20, node0,
661 False),
661 False),
662 revisiondeltarequest(node2, node1, nullid, b'\x03' * 20, node1,
662 revisiondeltarequest(node2, node1, nullid, b'\x03' * 20, node1,
663 False),
663 False),
664 ]
664 ]
665 gen = f.emitrevisiondeltas(requests)
665 gen = f.emitrevisiondeltas(requests)
666
666
667 delta = next(gen)
667 delta = next(gen)
668
668
669 self.assertEqual(delta.node, node0)
669 self.assertEqual(delta.node, node0)
670 self.assertEqual(delta.p1node, nullid)
670 self.assertEqual(delta.p1node, nullid)
671 self.assertEqual(delta.p2node, nullid)
671 self.assertEqual(delta.p2node, nullid)
672 self.assertEqual(delta.linknode, b'\x01' * 20)
672 self.assertEqual(delta.linknode, b'\x01' * 20)
673 self.assertEqual(delta.basenode, nullid)
673 self.assertEqual(delta.basenode, nullid)
674 self.assertIsNone(delta.baserevisionsize)
674 self.assertIsNone(delta.baserevisionsize)
675 self.assertEqual(delta.revision, fulltext0)
675 self.assertEqual(delta.revision, fulltext0)
676 self.assertIsNone(delta.delta)
676 self.assertIsNone(delta.delta)
677
677
678 delta = next(gen)
678 delta = next(gen)
679
679
680 self.assertEqual(delta.node, node1)
680 self.assertEqual(delta.node, node1)
681 self.assertEqual(delta.p1node, node0)
681 self.assertEqual(delta.p1node, node0)
682 self.assertEqual(delta.p2node, nullid)
682 self.assertEqual(delta.p2node, nullid)
683 self.assertEqual(delta.linknode, b'\x02' * 20)
683 self.assertEqual(delta.linknode, b'\x02' * 20)
684 self.assertEqual(delta.basenode, node0)
684 self.assertEqual(delta.basenode, node0)
685 self.assertIsNone(delta.baserevisionsize)
685 self.assertIsNone(delta.baserevisionsize)
686 self.assertIsNone(delta.revision)
686 self.assertIsNone(delta.revision)
687 self.assertEqual(delta.delta,
687 self.assertEqual(delta.delta,
688 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
688 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
689 fulltext1)
689 fulltext1)
690
690
691 delta = next(gen)
691 delta = next(gen)
692
692
693 self.assertEqual(delta.node, node2)
693 self.assertEqual(delta.node, node2)
694 self.assertEqual(delta.p1node, node1)
694 self.assertEqual(delta.p1node, node1)
695 self.assertEqual(delta.p2node, nullid)
695 self.assertEqual(delta.p2node, nullid)
696 self.assertEqual(delta.linknode, b'\x03' * 20)
696 self.assertEqual(delta.linknode, b'\x03' * 20)
697 self.assertEqual(delta.basenode, node1)
697 self.assertEqual(delta.basenode, node1)
698 self.assertIsNone(delta.baserevisionsize)
698 self.assertIsNone(delta.baserevisionsize)
699 self.assertIsNone(delta.revision)
699 self.assertIsNone(delta.revision)
700 self.assertEqual(delta.delta,
700 self.assertEqual(delta.delta,
701 b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
701 b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
702 fulltext2)
702 fulltext2)
703
703
704 with self.assertRaises(StopIteration):
704 with self.assertRaises(StopIteration):
705 next(gen)
705 next(gen)
706
706
707 def testrenamed(self):
707 def testrenamed(self):
708 fulltext0 = b'foo'
708 fulltext0 = b'foo'
709 fulltext1 = b'bar'
709 fulltext1 = b'bar'
710 fulltext2 = b'baz'
710 fulltext2 = b'baz'
711
711
712 meta1 = {
712 meta1 = {
713 b'copy': b'source0',
713 b'copy': b'source0',
714 b'copyrev': b'a' * 40,
714 b'copyrev': b'a' * 40,
715 }
715 }
716
716
717 meta2 = {
717 meta2 = {
718 b'copy': b'source1',
718 b'copy': b'source1',
719 b'copyrev': b'b' * 40,
719 b'copyrev': b'b' * 40,
720 }
720 }
721
721
722 stored1 = b''.join([
722 stored1 = b''.join([
723 b'\x01\ncopy: source0\n',
723 b'\x01\ncopy: source0\n',
724 b'copyrev: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n\x01\n',
724 b'copyrev: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n\x01\n',
725 fulltext1,
725 fulltext1,
726 ])
726 ])
727
727
728 stored2 = b''.join([
728 stored2 = b''.join([
729 b'\x01\ncopy: source1\n',
729 b'\x01\ncopy: source1\n',
730 b'copyrev: bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n\x01\n',
730 b'copyrev: bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n\x01\n',
731 fulltext2,
731 fulltext2,
732 ])
732 ])
733
733
734 f = self._makefilefn()
734 f = self._makefilefn()
735 with self._maketransactionfn() as tr:
735 with self._maketransactionfn() as tr:
736 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
736 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
737 node1 = f.add(fulltext1, meta1, tr, 1, node0, nullid)
737 node1 = f.add(fulltext1, meta1, tr, 1, node0, nullid)
738 node2 = f.add(fulltext2, meta2, tr, 2, nullid, nullid)
738 node2 = f.add(fulltext2, meta2, tr, 2, nullid, nullid)
739
739
740 self.assertEqual(f.rawsize(1), len(stored1))
740 self.assertEqual(f.rawsize(1), len(stored1))
741 self.assertEqual(f.rawsize(2), len(stored2))
741 self.assertEqual(f.rawsize(2), len(stored2))
742
742
743 # Metadata header isn't recognized when parent isn't nullid.
743 # Metadata header isn't recognized when parent isn't nullid.
744 self.assertEqual(f.size(1), len(stored1))
744 self.assertEqual(f.size(1), len(stored1))
745 self.assertEqual(f.size(2), len(fulltext2))
745 self.assertEqual(f.size(2), len(fulltext2))
746
746
747 self.assertEqual(f.revision(node1), stored1)
747 self.assertEqual(f.revision(node1), stored1)
748 self.assertEqual(f.revision(node1, raw=True), stored1)
748 self.assertEqual(f.revision(node1, raw=True), stored1)
749 self.assertEqual(f.revision(node2), stored2)
749 self.assertEqual(f.revision(node2), stored2)
750 self.assertEqual(f.revision(node2, raw=True), stored2)
750 self.assertEqual(f.revision(node2, raw=True), stored2)
751
751
752 self.assertEqual(f.read(node1), fulltext1)
752 self.assertEqual(f.read(node1), fulltext1)
753 self.assertEqual(f.read(node2), fulltext2)
753 self.assertEqual(f.read(node2), fulltext2)
754
754
755 # Returns False when first parent is set.
755 # Returns False when first parent is set.
756 self.assertFalse(f.renamed(node1))
756 self.assertFalse(f.renamed(node1))
757 self.assertEqual(f.renamed(node2), (b'source1', b'\xbb' * 20))
757 self.assertEqual(f.renamed(node2), (b'source1', b'\xbb' * 20))
758
758
759 self.assertTrue(f.cmp(node1, fulltext1))
759 self.assertTrue(f.cmp(node1, fulltext1))
760 self.assertTrue(f.cmp(node1, stored1))
760 self.assertTrue(f.cmp(node1, stored1))
761 self.assertFalse(f.cmp(node2, fulltext2))
761 self.assertFalse(f.cmp(node2, fulltext2))
762 self.assertTrue(f.cmp(node2, stored2))
762 self.assertTrue(f.cmp(node2, stored2))
763
763
764 def testmetadataprefix(self):
764 def testmetadataprefix(self):
765 # Content with metadata prefix has extra prefix inserted in storage.
765 # Content with metadata prefix has extra prefix inserted in storage.
766 fulltext0 = b'\x01\nfoo'
766 fulltext0 = b'\x01\nfoo'
767 stored0 = b'\x01\n\x01\n\x01\nfoo'
767 stored0 = b'\x01\n\x01\n\x01\nfoo'
768
768
769 fulltext1 = b'\x01\nbar'
769 fulltext1 = b'\x01\nbar'
770 meta1 = {
770 meta1 = {
771 b'copy': b'source0',
771 b'copy': b'source0',
772 b'copyrev': b'b' * 40,
772 b'copyrev': b'b' * 40,
773 }
773 }
774 stored1 = b''.join([
774 stored1 = b''.join([
775 b'\x01\ncopy: source0\n',
775 b'\x01\ncopy: source0\n',
776 b'copyrev: %s\n' % (b'b' * 40),
776 b'copyrev: %s\n' % (b'b' * 40),
777 b'\x01\n\x01\nbar',
777 b'\x01\n\x01\nbar',
778 ])
778 ])
779
779
780 f = self._makefilefn()
780 f = self._makefilefn()
781 with self._maketransactionfn() as tr:
781 with self._maketransactionfn() as tr:
782 node0 = f.add(fulltext0, {}, tr, 0, nullid, nullid)
782 node0 = f.add(fulltext0, {}, tr, 0, nullid, nullid)
783 node1 = f.add(fulltext1, meta1, tr, 1, nullid, nullid)
783 node1 = f.add(fulltext1, meta1, tr, 1, nullid, nullid)
784
784
785 self.assertEqual(f.rawsize(0), len(stored0))
785 self.assertEqual(f.rawsize(0), len(stored0))
786 self.assertEqual(f.rawsize(1), len(stored1))
786 self.assertEqual(f.rawsize(1), len(stored1))
787
787
788 # TODO this is buggy.
788 # TODO this is buggy.
789 self.assertEqual(f.size(0), len(fulltext0) + 4)
789 self.assertEqual(f.size(0), len(fulltext0) + 4)
790
790
791 self.assertEqual(f.size(1), len(fulltext1))
791 self.assertEqual(f.size(1), len(fulltext1))
792
792
793 self.assertEqual(f.revision(node0), stored0)
793 self.assertEqual(f.revision(node0), stored0)
794 self.assertEqual(f.revision(node0, raw=True), stored0)
794 self.assertEqual(f.revision(node0, raw=True), stored0)
795
795
796 self.assertEqual(f.revision(node1), stored1)
796 self.assertEqual(f.revision(node1), stored1)
797 self.assertEqual(f.revision(node1, raw=True), stored1)
797 self.assertEqual(f.revision(node1, raw=True), stored1)
798
798
799 self.assertEqual(f.read(node0), fulltext0)
799 self.assertEqual(f.read(node0), fulltext0)
800 self.assertEqual(f.read(node1), fulltext1)
800 self.assertEqual(f.read(node1), fulltext1)
801
801
802 self.assertFalse(f.cmp(node0, fulltext0))
802 self.assertFalse(f.cmp(node0, fulltext0))
803 self.assertTrue(f.cmp(node0, stored0))
803 self.assertTrue(f.cmp(node0, stored0))
804
804
805 self.assertFalse(f.cmp(node1, fulltext1))
805 self.assertFalse(f.cmp(node1, fulltext1))
806 self.assertTrue(f.cmp(node1, stored0))
806 self.assertTrue(f.cmp(node1, stored0))
807
807
808 def testcensored(self):
808 def testcensored(self):
809 f = self._makefilefn()
809 f = self._makefilefn()
810
810
811 stored1 = revlog.packmeta({
811 stored1 = revlog.packmeta({
812 b'censored': b'tombstone',
812 b'censored': b'tombstone',
813 }, b'')
813 }, b'')
814
814
815 # TODO tests are incomplete because we need the node to be
815 # TODO tests are incomplete because we need the node to be
816 # different due to presence of censor metadata. But we can't
816 # different due to presence of censor metadata. But we can't
817 # do this with addrevision().
817 # do this with addrevision().
818 with self._maketransactionfn() as tr:
818 with self._maketransactionfn() as tr:
819 node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
819 node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
820 f.addrevision(stored1, tr, 1, node0, nullid,
820 f.addrevision(stored1, tr, 1, node0, nullid,
821 flags=revlog.REVIDX_ISCENSORED)
821 flags=revlog.REVIDX_ISCENSORED)
822
822
823 self.assertEqual(f.flags(1), revlog.REVIDX_ISCENSORED)
823 self.assertEqual(f.flags(1), revlog.REVIDX_ISCENSORED)
824 self.assertTrue(f.iscensored(1))
824 self.assertTrue(f.iscensored(1))
825
825
826 self.assertEqual(f.revision(1), stored1)
826 self.assertEqual(f.revision(1), stored1)
827 self.assertEqual(f.revision(1, raw=True), stored1)
827 self.assertEqual(f.revision(1, raw=True), stored1)
828
828
829 self.assertEqual(f.read(1), b'')
829 self.assertEqual(f.read(1), b'')
830
830
831 class ifilemutationtests(basetestcase):
831 class ifilemutationtests(basetestcase):
832 """Generic tests for the ifilemutation interface.
832 """Generic tests for the ifilemutation interface.
833
833
834 All file storage backends that support writing should conform to this
834 All file storage backends that support writing should conform to this
835 interface.
835 interface.
836
836
837 Use ``makeifilemutationtests()`` to create an instance of this type.
837 Use ``makeifilemutationtests()`` to create an instance of this type.
838 """
838 """
839 def testaddnoop(self):
839 def testaddnoop(self):
840 f = self._makefilefn()
840 f = self._makefilefn()
841 with self._maketransactionfn() as tr:
841 with self._maketransactionfn() as tr:
842 node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
842 node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
843 node1 = f.add(b'foo', None, tr, 0, nullid, nullid)
843 node1 = f.add(b'foo', None, tr, 0, nullid, nullid)
844 # Varying by linkrev shouldn't impact hash.
844 # Varying by linkrev shouldn't impact hash.
845 node2 = f.add(b'foo', None, tr, 1, nullid, nullid)
845 node2 = f.add(b'foo', None, tr, 1, nullid, nullid)
846
846
847 self.assertEqual(node1, node0)
847 self.assertEqual(node1, node0)
848 self.assertEqual(node2, node0)
848 self.assertEqual(node2, node0)
849 self.assertEqual(len(f), 1)
849 self.assertEqual(len(f), 1)
850
850
851 def testaddrevisionbadnode(self):
851 def testaddrevisionbadnode(self):
852 f = self._makefilefn()
852 f = self._makefilefn()
853 with self._maketransactionfn() as tr:
853 with self._maketransactionfn() as tr:
854 # Adding a revision with bad node value fails.
854 # Adding a revision with bad node value fails.
855 with self.assertRaises(error.RevlogError):
855 with self.assertRaises(error.StorageError):
856 f.addrevision(b'foo', tr, 0, nullid, nullid, node=b'\x01' * 20)
856 f.addrevision(b'foo', tr, 0, nullid, nullid, node=b'\x01' * 20)
857
857
858 def testaddrevisionunknownflag(self):
858 def testaddrevisionunknownflag(self):
859 f = self._makefilefn()
859 f = self._makefilefn()
860 with self._maketransactionfn() as tr:
860 with self._maketransactionfn() as tr:
861 for i in range(15, 0, -1):
861 for i in range(15, 0, -1):
862 if (1 << i) & ~revlog.REVIDX_KNOWN_FLAGS:
862 if (1 << i) & ~revlog.REVIDX_KNOWN_FLAGS:
863 flags = 1 << i
863 flags = 1 << i
864 break
864 break
865
865
866 with self.assertRaises(error.RevlogError):
866 with self.assertRaises(error.StorageError):
867 f.addrevision(b'foo', tr, 0, nullid, nullid, flags=flags)
867 f.addrevision(b'foo', tr, 0, nullid, nullid, flags=flags)
868
868
869 def testaddgroupsimple(self):
869 def testaddgroupsimple(self):
870 f = self._makefilefn()
870 f = self._makefilefn()
871
871
872 callbackargs = []
872 callbackargs = []
873 def cb(*args, **kwargs):
873 def cb(*args, **kwargs):
874 callbackargs.append((args, kwargs))
874 callbackargs.append((args, kwargs))
875
875
876 def linkmapper(node):
876 def linkmapper(node):
877 return 0
877 return 0
878
878
879 with self._maketransactionfn() as tr:
879 with self._maketransactionfn() as tr:
880 nodes = f.addgroup([], None, tr, addrevisioncb=cb)
880 nodes = f.addgroup([], None, tr, addrevisioncb=cb)
881
881
882 self.assertEqual(nodes, [])
882 self.assertEqual(nodes, [])
883 self.assertEqual(callbackargs, [])
883 self.assertEqual(callbackargs, [])
884 self.assertEqual(len(f), 0)
884 self.assertEqual(len(f), 0)
885
885
886 fulltext0 = b'foo'
886 fulltext0 = b'foo'
887 delta0 = mdiff.trivialdiffheader(len(fulltext0)) + fulltext0
887 delta0 = mdiff.trivialdiffheader(len(fulltext0)) + fulltext0
888
888
889 deltas = [
889 deltas = [
890 (b'\x01' * 20, nullid, nullid, nullid, nullid, delta0, 0),
890 (b'\x01' * 20, nullid, nullid, nullid, nullid, delta0, 0),
891 ]
891 ]
892
892
893 with self._maketransactionfn() as tr:
893 with self._maketransactionfn() as tr:
894 with self.assertRaises(error.RevlogError):
894 with self.assertRaises(error.StorageError):
895 f.addgroup(deltas, linkmapper, tr, addrevisioncb=cb)
895 f.addgroup(deltas, linkmapper, tr, addrevisioncb=cb)
896
896
897 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
897 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
898
898
899 f = self._makefilefn()
899 f = self._makefilefn()
900
900
901 deltas = [
901 deltas = [
902 (node0, nullid, nullid, nullid, nullid, delta0, 0),
902 (node0, nullid, nullid, nullid, nullid, delta0, 0),
903 ]
903 ]
904
904
905 with self._maketransactionfn() as tr:
905 with self._maketransactionfn() as tr:
906 nodes = f.addgroup(deltas, linkmapper, tr, addrevisioncb=cb)
906 nodes = f.addgroup(deltas, linkmapper, tr, addrevisioncb=cb)
907
907
908 self.assertEqual(nodes, [
908 self.assertEqual(nodes, [
909 b'\x49\xd8\xcb\xb1\x5c\xe2\x57\x92\x04\x47'
909 b'\x49\xd8\xcb\xb1\x5c\xe2\x57\x92\x04\x47'
910 b'\x00\x6b\x46\x97\x8b\x7a\xf9\x80\xa9\x79'])
910 b'\x00\x6b\x46\x97\x8b\x7a\xf9\x80\xa9\x79'])
911
911
912 self.assertEqual(len(callbackargs), 1)
912 self.assertEqual(len(callbackargs), 1)
913 self.assertEqual(callbackargs[0][0][1], nodes[0])
913 self.assertEqual(callbackargs[0][0][1], nodes[0])
914
914
915 self.assertEqual(list(f.revs()), [0])
915 self.assertEqual(list(f.revs()), [0])
916 self.assertEqual(f.rev(nodes[0]), 0)
916 self.assertEqual(f.rev(nodes[0]), 0)
917 self.assertEqual(f.node(0), nodes[0])
917 self.assertEqual(f.node(0), nodes[0])
918
918
919 def testaddgroupmultiple(self):
919 def testaddgroupmultiple(self):
920 f = self._makefilefn()
920 f = self._makefilefn()
921
921
922 fulltexts = [
922 fulltexts = [
923 b'foo',
923 b'foo',
924 b'bar',
924 b'bar',
925 b'x' * 1024,
925 b'x' * 1024,
926 ]
926 ]
927
927
928 nodes = []
928 nodes = []
929 with self._maketransactionfn() as tr:
929 with self._maketransactionfn() as tr:
930 for fulltext in fulltexts:
930 for fulltext in fulltexts:
931 nodes.append(f.add(fulltext, None, tr, 0, nullid, nullid))
931 nodes.append(f.add(fulltext, None, tr, 0, nullid, nullid))
932
932
933 f = self._makefilefn()
933 f = self._makefilefn()
934 deltas = []
934 deltas = []
935 for i, fulltext in enumerate(fulltexts):
935 for i, fulltext in enumerate(fulltexts):
936 delta = mdiff.trivialdiffheader(len(fulltext)) + fulltext
936 delta = mdiff.trivialdiffheader(len(fulltext)) + fulltext
937
937
938 deltas.append((nodes[i], nullid, nullid, nullid, nullid, delta, 0))
938 deltas.append((nodes[i], nullid, nullid, nullid, nullid, delta, 0))
939
939
940 with self._maketransactionfn() as tr:
940 with self._maketransactionfn() as tr:
941 self.assertEqual(f.addgroup(deltas, lambda x: 0, tr), nodes)
941 self.assertEqual(f.addgroup(deltas, lambda x: 0, tr), nodes)
942
942
943 self.assertEqual(len(f), len(deltas))
943 self.assertEqual(len(f), len(deltas))
944 self.assertEqual(list(f.revs()), [0, 1, 2])
944 self.assertEqual(list(f.revs()), [0, 1, 2])
945 self.assertEqual(f.rev(nodes[0]), 0)
945 self.assertEqual(f.rev(nodes[0]), 0)
946 self.assertEqual(f.rev(nodes[1]), 1)
946 self.assertEqual(f.rev(nodes[1]), 1)
947 self.assertEqual(f.rev(nodes[2]), 2)
947 self.assertEqual(f.rev(nodes[2]), 2)
948 self.assertEqual(f.node(0), nodes[0])
948 self.assertEqual(f.node(0), nodes[0])
949 self.assertEqual(f.node(1), nodes[1])
949 self.assertEqual(f.node(1), nodes[1])
950 self.assertEqual(f.node(2), nodes[2])
950 self.assertEqual(f.node(2), nodes[2])
951
951
952 def makeifileindextests(makefilefn, maketransactionfn):
952 def makeifileindextests(makefilefn, maketransactionfn):
953 """Create a unittest.TestCase class suitable for testing file storage.
953 """Create a unittest.TestCase class suitable for testing file storage.
954
954
955 ``makefilefn`` is a callable which receives the test case as an
955 ``makefilefn`` is a callable which receives the test case as an
956 argument and returns an object implementing the ``ifilestorage`` interface.
956 argument and returns an object implementing the ``ifilestorage`` interface.
957
957
958 ``maketransactionfn`` is a callable which receives the test case as an
958 ``maketransactionfn`` is a callable which receives the test case as an
959 argument and returns a transaction object.
959 argument and returns a transaction object.
960
960
961 Returns a type that is a ``unittest.TestCase`` that can be used for
961 Returns a type that is a ``unittest.TestCase`` that can be used for
962 testing the object implementing the file storage interface. Simply
962 testing the object implementing the file storage interface. Simply
963 assign the returned value to a module-level attribute and a test loader
963 assign the returned value to a module-level attribute and a test loader
964 should find and run it automatically.
964 should find and run it automatically.
965 """
965 """
966 d = {
966 d = {
967 r'_makefilefn': makefilefn,
967 r'_makefilefn': makefilefn,
968 r'_maketransactionfn': maketransactionfn,
968 r'_maketransactionfn': maketransactionfn,
969 }
969 }
970 return type(r'ifileindextests', (ifileindextests,), d)
970 return type(r'ifileindextests', (ifileindextests,), d)
971
971
972 def makeifiledatatests(makefilefn, maketransactionfn):
972 def makeifiledatatests(makefilefn, maketransactionfn):
973 d = {
973 d = {
974 r'_makefilefn': makefilefn,
974 r'_makefilefn': makefilefn,
975 r'_maketransactionfn': maketransactionfn,
975 r'_maketransactionfn': maketransactionfn,
976 }
976 }
977 return type(r'ifiledatatests', (ifiledatatests,), d)
977 return type(r'ifiledatatests', (ifiledatatests,), d)
978
978
979 def makeifilemutationtests(makefilefn, maketransactionfn):
979 def makeifilemutationtests(makefilefn, maketransactionfn):
980 d = {
980 d = {
981 r'_makefilefn': makefilefn,
981 r'_makefilefn': makefilefn,
982 r'_maketransactionfn': maketransactionfn,
982 r'_maketransactionfn': maketransactionfn,
983 }
983 }
984 return type(r'ifilemutationtests', (ifilemutationtests,), d)
984 return type(r'ifilemutationtests', (ifilemutationtests,), d)
General Comments 0
You need to be logged in to leave comments. Login now