##// END OF EJS Templates
error: introduce StorageError...
Gregory Szorc -
r39812:cb65d4b7 default
parent child Browse files
Show More
@@ -1,329 +1,336
1 1 # error.py - Mercurial exceptions
2 2 #
3 3 # Copyright 2005-2008 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Mercurial exceptions.
9 9
10 10 This allows us to catch exceptions at higher levels without forcing
11 11 imports.
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 # Do not import anything but pycompat here, please
17 17 from . import pycompat
18 18
19 19 def _tobytes(exc):
20 20 """Byte-stringify exception in the same way as BaseException_str()"""
21 21 if not exc.args:
22 22 return b''
23 23 if len(exc.args) == 1:
24 24 return pycompat.bytestr(exc.args[0])
25 25 return b'(%s)' % b', '.join(b"'%s'" % pycompat.bytestr(a) for a in exc.args)
26 26
27 27 class Hint(object):
28 28 """Mix-in to provide a hint of an error
29 29
30 30 This should come first in the inheritance list to consume a hint and
31 31 pass remaining arguments to the exception class.
32 32 """
33 33 def __init__(self, *args, **kw):
34 34 self.hint = kw.pop(r'hint', None)
35 35 super(Hint, self).__init__(*args, **kw)
36 36
37 class RevlogError(Hint, Exception):
37 class StorageError(Hint, Exception):
38 """Raised when an error occurs in a storage layer.
39
40 Usually subclassed by a storage-specific exception.
41 """
42 __bytes__ = _tobytes
43
44 class RevlogError(StorageError):
38 45 __bytes__ = _tobytes
39 46
40 47 class FilteredIndexError(IndexError):
41 48 __bytes__ = _tobytes
42 49
43 50 class LookupError(RevlogError, KeyError):
44 51 def __init__(self, name, index, message):
45 52 self.name = name
46 53 self.index = index
47 54 # this can't be called 'message' because at least some installs of
48 55 # Python 2.6+ complain about the 'message' property being deprecated
49 56 self.lookupmessage = message
50 57 if isinstance(name, bytes) and len(name) == 20:
51 58 from .node import short
52 59 name = short(name)
53 60 RevlogError.__init__(self, '%s@%s: %s' % (index, name, message))
54 61
55 62 def __bytes__(self):
56 63 return RevlogError.__bytes__(self)
57 64
58 65 def __str__(self):
59 66 return RevlogError.__str__(self)
60 67
61 68 class AmbiguousPrefixLookupError(LookupError):
62 69 pass
63 70
64 71 class FilteredLookupError(LookupError):
65 72 pass
66 73
67 74 class ManifestLookupError(LookupError):
68 75 pass
69 76
70 77 class CommandError(Exception):
71 78 """Exception raised on errors in parsing the command line."""
72 79 __bytes__ = _tobytes
73 80
74 81 class InterventionRequired(Hint, Exception):
75 82 """Exception raised when a command requires human intervention."""
76 83 __bytes__ = _tobytes
77 84
78 85 class Abort(Hint, Exception):
79 86 """Raised if a command needs to print an error and exit."""
80 87 __bytes__ = _tobytes
81 88
82 89 class HookLoadError(Abort):
83 90 """raised when loading a hook fails, aborting an operation
84 91
85 92 Exists to allow more specialized catching."""
86 93
87 94 class HookAbort(Abort):
88 95 """raised when a validation hook fails, aborting an operation
89 96
90 97 Exists to allow more specialized catching."""
91 98
92 99 class ConfigError(Abort):
93 100 """Exception raised when parsing config files"""
94 101
95 102 class UpdateAbort(Abort):
96 103 """Raised when an update is aborted for destination issue"""
97 104
98 105 class MergeDestAbort(Abort):
99 106 """Raised when an update is aborted for destination issues"""
100 107
101 108 class NoMergeDestAbort(MergeDestAbort):
102 109 """Raised when an update is aborted because there is nothing to merge"""
103 110
104 111 class ManyMergeDestAbort(MergeDestAbort):
105 112 """Raised when an update is aborted because destination is ambiguous"""
106 113
107 114 class ResponseExpected(Abort):
108 115 """Raised when an EOF is received for a prompt"""
109 116 def __init__(self):
110 117 from .i18n import _
111 118 Abort.__init__(self, _('response expected'))
112 119
113 120 class OutOfBandError(Hint, Exception):
114 121 """Exception raised when a remote repo reports failure"""
115 122 __bytes__ = _tobytes
116 123
117 124 class ParseError(Hint, Exception):
118 125 """Raised when parsing config files and {rev,file}sets (msg[, pos])"""
119 126 __bytes__ = _tobytes
120 127
121 128 class PatchError(Exception):
122 129 __bytes__ = _tobytes
123 130
124 131 class UnknownIdentifier(ParseError):
125 132 """Exception raised when a {rev,file}set references an unknown identifier"""
126 133
127 134 def __init__(self, function, symbols):
128 135 from .i18n import _
129 136 ParseError.__init__(self, _("unknown identifier: %s") % function)
130 137 self.function = function
131 138 self.symbols = symbols
132 139
133 140 class RepoError(Hint, Exception):
134 141 __bytes__ = _tobytes
135 142
136 143 class RepoLookupError(RepoError):
137 144 pass
138 145
139 146 class FilteredRepoLookupError(RepoLookupError):
140 147 pass
141 148
142 149 class CapabilityError(RepoError):
143 150 pass
144 151
145 152 class RequirementError(RepoError):
146 153 """Exception raised if .hg/requires has an unknown entry."""
147 154
148 155 class StdioError(IOError):
149 156 """Raised if I/O to stdout or stderr fails"""
150 157
151 158 def __init__(self, err):
152 159 IOError.__init__(self, err.errno, err.strerror)
153 160
154 161 # no __bytes__() because error message is derived from the standard IOError
155 162
156 163 class UnsupportedMergeRecords(Abort):
157 164 def __init__(self, recordtypes):
158 165 from .i18n import _
159 166 self.recordtypes = sorted(recordtypes)
160 167 s = ' '.join(self.recordtypes)
161 168 Abort.__init__(
162 169 self, _('unsupported merge state records: %s') % s,
163 170 hint=_('see https://mercurial-scm.org/wiki/MergeStateRecords for '
164 171 'more information'))
165 172
166 173 class UnknownVersion(Abort):
167 174 """generic exception for aborting from an encounter with an unknown version
168 175 """
169 176
170 177 def __init__(self, msg, hint=None, version=None):
171 178 self.version = version
172 179 super(UnknownVersion, self).__init__(msg, hint=hint)
173 180
174 181 class LockError(IOError):
175 182 def __init__(self, errno, strerror, filename, desc):
176 183 IOError.__init__(self, errno, strerror, filename)
177 184 self.desc = desc
178 185
179 186 # no __bytes__() because error message is derived from the standard IOError
180 187
181 188 class LockHeld(LockError):
182 189 def __init__(self, errno, filename, desc, locker):
183 190 LockError.__init__(self, errno, 'Lock held', filename, desc)
184 191 self.locker = locker
185 192
186 193 class LockUnavailable(LockError):
187 194 pass
188 195
189 196 # LockError is for errors while acquiring the lock -- this is unrelated
190 197 class LockInheritanceContractViolation(RuntimeError):
191 198 __bytes__ = _tobytes
192 199
193 200 class ResponseError(Exception):
194 201 """Raised to print an error with part of output and exit."""
195 202 __bytes__ = _tobytes
196 203
197 204 class UnknownCommand(Exception):
198 205 """Exception raised if command is not in the command table."""
199 206 __bytes__ = _tobytes
200 207
201 208 class AmbiguousCommand(Exception):
202 209 """Exception raised if command shortcut matches more than one command."""
203 210 __bytes__ = _tobytes
204 211
205 212 # derived from KeyboardInterrupt to simplify some breakout code
206 213 class SignalInterrupt(KeyboardInterrupt):
207 214 """Exception raised on SIGTERM and SIGHUP."""
208 215
209 216 class SignatureError(Exception):
210 217 __bytes__ = _tobytes
211 218
212 219 class PushRaced(RuntimeError):
213 220 """An exception raised during unbundling that indicate a push race"""
214 221 __bytes__ = _tobytes
215 222
216 223 class ProgrammingError(Hint, RuntimeError):
217 224 """Raised if a mercurial (core or extension) developer made a mistake"""
218 225
219 226 def __init__(self, msg, *args, **kwargs):
220 227 # On Python 3, turn the message back into a string since this is
221 228 # an internal-only error that won't be printed except in a
222 229 # stack traces.
223 230 msg = pycompat.sysstr(msg)
224 231 super(ProgrammingError, self).__init__(msg, *args, **kwargs)
225 232
226 233 __bytes__ = _tobytes
227 234
228 235 class WdirUnsupported(Exception):
229 236 """An exception which is raised when 'wdir()' is not supported"""
230 237 __bytes__ = _tobytes
231 238
232 239 # bundle2 related errors
233 240 class BundleValueError(ValueError):
234 241 """error raised when bundle2 cannot be processed"""
235 242 __bytes__ = _tobytes
236 243
237 244 class BundleUnknownFeatureError(BundleValueError):
238 245 def __init__(self, parttype=None, params=(), values=()):
239 246 self.parttype = parttype
240 247 self.params = params
241 248 self.values = values
242 249 if self.parttype is None:
243 250 msg = 'Stream Parameter'
244 251 else:
245 252 msg = parttype
246 253 entries = self.params
247 254 if self.params and self.values:
248 255 assert len(self.params) == len(self.values)
249 256 entries = []
250 257 for idx, par in enumerate(self.params):
251 258 val = self.values[idx]
252 259 if val is None:
253 260 entries.append(val)
254 261 else:
255 262 entries.append("%s=%r" % (par, pycompat.maybebytestr(val)))
256 263 if entries:
257 264 msg = '%s - %s' % (msg, ', '.join(entries))
258 265 ValueError.__init__(self, msg)
259 266
260 267 class ReadOnlyPartError(RuntimeError):
261 268 """error raised when code tries to alter a part being generated"""
262 269 __bytes__ = _tobytes
263 270
264 271 class PushkeyFailed(Abort):
265 272 """error raised when a pushkey part failed to update a value"""
266 273
267 274 def __init__(self, partid, namespace=None, key=None, new=None, old=None,
268 275 ret=None):
269 276 self.partid = partid
270 277 self.namespace = namespace
271 278 self.key = key
272 279 self.new = new
273 280 self.old = old
274 281 self.ret = ret
275 282 # no i18n expected to be processed into a better message
276 283 Abort.__init__(self, 'failed to update value for "%s/%s"'
277 284 % (namespace, key))
278 285
279 286 class CensoredNodeError(RevlogError):
280 287 """error raised when content verification fails on a censored node
281 288
282 289 Also contains the tombstone data substituted for the uncensored data.
283 290 """
284 291
285 292 def __init__(self, filename, node, tombstone):
286 293 from .node import short
287 294 RevlogError.__init__(self, '%s:%s' % (filename, short(node)))
288 295 self.tombstone = tombstone
289 296
290 297 class CensoredBaseError(RevlogError):
291 298 """error raised when a delta is rejected because its base is censored
292 299
293 300 A delta based on a censored revision must be formed as single patch
294 301 operation which replaces the entire base with new content. This ensures
295 302 the delta may be applied by clones which have not censored the base.
296 303 """
297 304
298 305 class InvalidBundleSpecification(Exception):
299 306 """error raised when a bundle specification is invalid.
300 307
301 308 This is used for syntax errors as opposed to support errors.
302 309 """
303 310 __bytes__ = _tobytes
304 311
305 312 class UnsupportedBundleSpecification(Exception):
306 313 """error raised when a bundle specification is not supported."""
307 314 __bytes__ = _tobytes
308 315
309 316 class CorruptedState(Exception):
310 317 """error raised when a command is not able to read its state from file"""
311 318 __bytes__ = _tobytes
312 319
313 320 class PeerTransportError(Abort):
314 321 """Transport-level I/O error when communicating with a peer repo."""
315 322
316 323 class InMemoryMergeConflictsError(Exception):
317 324 """Exception raised when merge conflicts arose during an in-memory merge."""
318 325 __bytes__ = _tobytes
319 326
320 327 class WireprotoCommandError(Exception):
321 328 """Represents an error during execution of a wire protocol command.
322 329
323 330 Should only be thrown by wire protocol version 2 commands.
324 331
325 332 The error is a formatter string and an optional iterable of arguments.
326 333 """
327 334 def __init__(self, message, args=None):
328 335 self.message = message
329 336 self.messageargs = args
@@ -1,1585 +1,1585
1 1 # repository.py - Interfaces and base classes for repositories and peers.
2 2 #
3 3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from .i18n import _
11 11 from . import (
12 12 error,
13 13 )
14 14 from .utils import (
15 15 interfaceutil,
16 16 )
17 17
18 18 # When narrowing is finalized and no longer subject to format changes,
19 19 # we should move this to just "narrow" or similar.
20 20 NARROW_REQUIREMENT = 'narrowhg-experimental'
21 21
22 22 class ipeerconnection(interfaceutil.Interface):
23 23 """Represents a "connection" to a repository.
24 24
25 25 This is the base interface for representing a connection to a repository.
26 26 It holds basic properties and methods applicable to all peer types.
27 27
28 28 This is not a complete interface definition and should not be used
29 29 outside of this module.
30 30 """
31 31 ui = interfaceutil.Attribute("""ui.ui instance""")
32 32
33 33 def url():
34 34 """Returns a URL string representing this peer.
35 35
36 36 Currently, implementations expose the raw URL used to construct the
37 37 instance. It may contain credentials as part of the URL. The
38 38 expectations of the value aren't well-defined and this could lead to
39 39 data leakage.
40 40
41 41 TODO audit/clean consumers and more clearly define the contents of this
42 42 value.
43 43 """
44 44
45 45 def local():
46 46 """Returns a local repository instance.
47 47
48 48 If the peer represents a local repository, returns an object that
49 49 can be used to interface with it. Otherwise returns ``None``.
50 50 """
51 51
52 52 def peer():
53 53 """Returns an object conforming to this interface.
54 54
55 55 Most implementations will ``return self``.
56 56 """
57 57
58 58 def canpush():
59 59 """Returns a boolean indicating if this peer can be pushed to."""
60 60
61 61 def close():
62 62 """Close the connection to this peer.
63 63
64 64 This is called when the peer will no longer be used. Resources
65 65 associated with the peer should be cleaned up.
66 66 """
67 67
68 68 class ipeercapabilities(interfaceutil.Interface):
69 69 """Peer sub-interface related to capabilities."""
70 70
71 71 def capable(name):
72 72 """Determine support for a named capability.
73 73
74 74 Returns ``False`` if capability not supported.
75 75
76 76 Returns ``True`` if boolean capability is supported. Returns a string
77 77 if capability support is non-boolean.
78 78
79 79 Capability strings may or may not map to wire protocol capabilities.
80 80 """
81 81
82 82 def requirecap(name, purpose):
83 83 """Require a capability to be present.
84 84
85 85 Raises a ``CapabilityError`` if the capability isn't present.
86 86 """
87 87
88 88 class ipeercommands(interfaceutil.Interface):
89 89 """Client-side interface for communicating over the wire protocol.
90 90
91 91 This interface is used as a gateway to the Mercurial wire protocol.
92 92 methods commonly call wire protocol commands of the same name.
93 93 """
94 94
95 95 def branchmap():
96 96 """Obtain heads in named branches.
97 97
98 98 Returns a dict mapping branch name to an iterable of nodes that are
99 99 heads on that branch.
100 100 """
101 101
102 102 def capabilities():
103 103 """Obtain capabilities of the peer.
104 104
105 105 Returns a set of string capabilities.
106 106 """
107 107
108 108 def clonebundles():
109 109 """Obtains the clone bundles manifest for the repo.
110 110
111 111 Returns the manifest as unparsed bytes.
112 112 """
113 113
114 114 def debugwireargs(one, two, three=None, four=None, five=None):
115 115 """Used to facilitate debugging of arguments passed over the wire."""
116 116
117 117 def getbundle(source, **kwargs):
118 118 """Obtain remote repository data as a bundle.
119 119
120 120 This command is how the bulk of repository data is transferred from
121 121 the peer to the local repository
122 122
123 123 Returns a generator of bundle data.
124 124 """
125 125
126 126 def heads():
127 127 """Determine all known head revisions in the peer.
128 128
129 129 Returns an iterable of binary nodes.
130 130 """
131 131
132 132 def known(nodes):
133 133 """Determine whether multiple nodes are known.
134 134
135 135 Accepts an iterable of nodes whose presence to check for.
136 136
137 137 Returns an iterable of booleans indicating of the corresponding node
138 138 at that index is known to the peer.
139 139 """
140 140
141 141 def listkeys(namespace):
142 142 """Obtain all keys in a pushkey namespace.
143 143
144 144 Returns an iterable of key names.
145 145 """
146 146
147 147 def lookup(key):
148 148 """Resolve a value to a known revision.
149 149
150 150 Returns a binary node of the resolved revision on success.
151 151 """
152 152
153 153 def pushkey(namespace, key, old, new):
154 154 """Set a value using the ``pushkey`` protocol.
155 155
156 156 Arguments correspond to the pushkey namespace and key to operate on and
157 157 the old and new values for that key.
158 158
159 159 Returns a string with the peer result. The value inside varies by the
160 160 namespace.
161 161 """
162 162
163 163 def stream_out():
164 164 """Obtain streaming clone data.
165 165
166 166 Successful result should be a generator of data chunks.
167 167 """
168 168
169 169 def unbundle(bundle, heads, url):
170 170 """Transfer repository data to the peer.
171 171
172 172 This is how the bulk of data during a push is transferred.
173 173
174 174 Returns the integer number of heads added to the peer.
175 175 """
176 176
177 177 class ipeerlegacycommands(interfaceutil.Interface):
178 178 """Interface for implementing support for legacy wire protocol commands.
179 179
180 180 Wire protocol commands transition to legacy status when they are no longer
181 181 used by modern clients. To facilitate identifying which commands are
182 182 legacy, the interfaces are split.
183 183 """
184 184
185 185 def between(pairs):
186 186 """Obtain nodes between pairs of nodes.
187 187
188 188 ``pairs`` is an iterable of node pairs.
189 189
190 190 Returns an iterable of iterables of nodes corresponding to each
191 191 requested pair.
192 192 """
193 193
194 194 def branches(nodes):
195 195 """Obtain ancestor changesets of specific nodes back to a branch point.
196 196
197 197 For each requested node, the peer finds the first ancestor node that is
198 198 a DAG root or is a merge.
199 199
200 200 Returns an iterable of iterables with the resolved values for each node.
201 201 """
202 202
203 203 def changegroup(nodes, source):
204 204 """Obtain a changegroup with data for descendants of specified nodes."""
205 205
206 206 def changegroupsubset(bases, heads, source):
207 207 pass
208 208
209 209 class ipeercommandexecutor(interfaceutil.Interface):
210 210 """Represents a mechanism to execute remote commands.
211 211
212 212 This is the primary interface for requesting that wire protocol commands
213 213 be executed. Instances of this interface are active in a context manager
214 214 and have a well-defined lifetime. When the context manager exits, all
215 215 outstanding requests are waited on.
216 216 """
217 217
218 218 def callcommand(name, args):
219 219 """Request that a named command be executed.
220 220
221 221 Receives the command name and a dictionary of command arguments.
222 222
223 223 Returns a ``concurrent.futures.Future`` that will resolve to the
224 224 result of that command request. That exact value is left up to
225 225 the implementation and possibly varies by command.
226 226
227 227 Not all commands can coexist with other commands in an executor
228 228 instance: it depends on the underlying wire protocol transport being
229 229 used and the command itself.
230 230
231 231 Implementations MAY call ``sendcommands()`` automatically if the
232 232 requested command can not coexist with other commands in this executor.
233 233
234 234 Implementations MAY call ``sendcommands()`` automatically when the
235 235 future's ``result()`` is called. So, consumers using multiple
236 236 commands with an executor MUST ensure that ``result()`` is not called
237 237 until all command requests have been issued.
238 238 """
239 239
240 240 def sendcommands():
241 241 """Trigger submission of queued command requests.
242 242
243 243 Not all transports submit commands as soon as they are requested to
244 244 run. When called, this method forces queued command requests to be
245 245 issued. It will no-op if all commands have already been sent.
246 246
247 247 When called, no more new commands may be issued with this executor.
248 248 """
249 249
250 250 def close():
251 251 """Signal that this command request is finished.
252 252
253 253 When called, no more new commands may be issued. All outstanding
254 254 commands that have previously been issued are waited on before
255 255 returning. This not only includes waiting for the futures to resolve,
256 256 but also waiting for all response data to arrive. In other words,
257 257 calling this waits for all on-wire state for issued command requests
258 258 to finish.
259 259
260 260 When used as a context manager, this method is called when exiting the
261 261 context manager.
262 262
263 263 This method may call ``sendcommands()`` if there are buffered commands.
264 264 """
265 265
266 266 class ipeerrequests(interfaceutil.Interface):
267 267 """Interface for executing commands on a peer."""
268 268
269 269 def commandexecutor():
270 270 """A context manager that resolves to an ipeercommandexecutor.
271 271
272 272 The object this resolves to can be used to issue command requests
273 273 to the peer.
274 274
275 275 Callers should call its ``callcommand`` method to issue command
276 276 requests.
277 277
278 278 A new executor should be obtained for each distinct set of commands
279 279 (possibly just a single command) that the consumer wants to execute
280 280 as part of a single operation or round trip. This is because some
281 281 peers are half-duplex and/or don't support persistent connections.
282 282 e.g. in the case of HTTP peers, commands sent to an executor represent
283 283 a single HTTP request. While some peers may support multiple command
284 284 sends over the wire per executor, consumers need to code to the least
285 285 capable peer. So it should be assumed that command executors buffer
286 286 called commands until they are told to send them and that each
287 287 command executor could result in a new connection or wire-level request
288 288 being issued.
289 289 """
290 290
291 291 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
292 292 """Unified interface for peer repositories.
293 293
294 294 All peer instances must conform to this interface.
295 295 """
296 296
297 297 @interfaceutil.implementer(ipeerbase)
298 298 class peer(object):
299 299 """Base class for peer repositories."""
300 300
301 301 def capable(self, name):
302 302 caps = self.capabilities()
303 303 if name in caps:
304 304 return True
305 305
306 306 name = '%s=' % name
307 307 for cap in caps:
308 308 if cap.startswith(name):
309 309 return cap[len(name):]
310 310
311 311 return False
312 312
313 313 def requirecap(self, name, purpose):
314 314 if self.capable(name):
315 315 return
316 316
317 317 raise error.CapabilityError(
318 318 _('cannot %s; remote repository does not support the %r '
319 319 'capability') % (purpose, name))
320 320
321 321 class irevisiondelta(interfaceutil.Interface):
322 322 """Represents a delta between one revision and another.
323 323
324 324 Instances convey enough information to allow a revision to be exchanged
325 325 with another repository.
326 326
327 327 Instances represent the fulltext revision data or a delta against
328 328 another revision. Therefore the ``revision`` and ``delta`` attributes
329 329 are mutually exclusive.
330 330
331 331 Typically used for changegroup generation.
332 332 """
333 333
334 334 node = interfaceutil.Attribute(
335 335 """20 byte node of this revision.""")
336 336
337 337 p1node = interfaceutil.Attribute(
338 338 """20 byte node of 1st parent of this revision.""")
339 339
340 340 p2node = interfaceutil.Attribute(
341 341 """20 byte node of 2nd parent of this revision.""")
342 342
343 343 linknode = interfaceutil.Attribute(
344 344 """20 byte node of the changelog revision this node is linked to.""")
345 345
346 346 flags = interfaceutil.Attribute(
347 347 """2 bytes of integer flags that apply to this revision.""")
348 348
349 349 basenode = interfaceutil.Attribute(
350 350 """20 byte node of the revision this data is a delta against.
351 351
352 352 ``nullid`` indicates that the revision is a full revision and not
353 353 a delta.
354 354 """)
355 355
356 356 baserevisionsize = interfaceutil.Attribute(
357 357 """Size of base revision this delta is against.
358 358
359 359 May be ``None`` if ``basenode`` is ``nullid``.
360 360 """)
361 361
362 362 revision = interfaceutil.Attribute(
363 363 """Raw fulltext of revision data for this node.""")
364 364
365 365 delta = interfaceutil.Attribute(
366 366 """Delta between ``basenode`` and ``node``.
367 367
368 368 Stored in the bdiff delta format.
369 369 """)
370 370
371 371 class irevisiondeltarequest(interfaceutil.Interface):
372 372 """Represents a request to generate an ``irevisiondelta``."""
373 373
374 374 node = interfaceutil.Attribute(
375 375 """20 byte node of revision being requested.""")
376 376
377 377 p1node = interfaceutil.Attribute(
378 378 """20 byte node of 1st parent of revision.""")
379 379
380 380 p2node = interfaceutil.Attribute(
381 381 """20 byte node of 2nd parent of revision.""")
382 382
383 383 linknode = interfaceutil.Attribute(
384 384 """20 byte node to store in ``linknode`` attribute.""")
385 385
386 386 basenode = interfaceutil.Attribute(
387 387 """Base revision that delta should be generated against.
388 388
389 389 If ``nullid``, the derived ``irevisiondelta`` should have its
390 390 ``revision`` field populated and no delta should be generated.
391 391
392 392 If ``None``, the delta may be generated against any revision that
393 393 is an ancestor of this revision. Or a full revision may be used.
394 394
395 395 If any other value, the delta should be produced against that
396 396 revision.
397 397 """)
398 398
399 399 ellipsis = interfaceutil.Attribute(
400 400 """Boolean on whether the ellipsis flag should be set.""")
401 401
402 402 class ifilerevisionssequence(interfaceutil.Interface):
403 403 """Contains index data for all revisions of a file.
404 404
405 405 Types implementing this behave like lists of tuples. The index
406 406 in the list corresponds to the revision number. The values contain
407 407 index metadata.
408 408
409 409 The *null* revision (revision number -1) is always the last item
410 410 in the index.
411 411 """
412 412
413 413 def __len__():
414 414 """The total number of revisions."""
415 415
416 416 def __getitem__(rev):
417 417 """Returns the object having a specific revision number.
418 418
419 419 Returns an 8-tuple with the following fields:
420 420
421 421 offset+flags
422 422 Contains the offset and flags for the revision. 64-bit unsigned
423 423 integer where first 6 bytes are the offset and the next 2 bytes
424 424 are flags. The offset can be 0 if it is not used by the store.
425 425 compressed size
426 426 Size of the revision data in the store. It can be 0 if it isn't
427 427 needed by the store.
428 428 uncompressed size
429 429 Fulltext size. It can be 0 if it isn't needed by the store.
430 430 base revision
431 431 Revision number of revision the delta for storage is encoded
432 432 against. -1 indicates not encoded against a base revision.
433 433 link revision
434 434 Revision number of changelog revision this entry is related to.
435 435 p1 revision
436 436 Revision number of 1st parent. -1 if no 1st parent.
437 437 p2 revision
438 438 Revision number of 2nd parent. -1 if no 1st parent.
439 439 node
440 440 Binary node value for this revision number.
441 441
442 442 Negative values should index off the end of the sequence. ``-1``
443 443 should return the null revision. ``-2`` should return the most
444 444 recent revision.
445 445 """
446 446
447 447 def __contains__(rev):
448 448 """Whether a revision number exists."""
449 449
450 450 def insert(self, i, entry):
451 451 """Add an item to the index at specific revision."""
452 452
453 453 class ifileindex(interfaceutil.Interface):
454 454 """Storage interface for index data of a single file.
455 455
456 456 File storage data is divided into index metadata and data storage.
457 457 This interface defines the index portion of the interface.
458 458
459 459 The index logically consists of:
460 460
461 461 * A mapping between revision numbers and nodes.
462 462 * DAG data (storing and querying the relationship between nodes).
463 463 * Metadata to facilitate storage.
464 464 """
465 465 index = interfaceutil.Attribute(
466 466 """An ``ifilerevisionssequence`` instance.""")
467 467
468 468 def __len__():
469 469 """Obtain the number of revisions stored for this file."""
470 470
471 471 def __iter__():
472 472 """Iterate over revision numbers for this file."""
473 473
474 474 def revs(start=0, stop=None):
475 475 """Iterate over revision numbers for this file, with control."""
476 476
477 477 def parents(node):
478 478 """Returns a 2-tuple of parent nodes for a revision.
479 479
480 480 Values will be ``nullid`` if the parent is empty.
481 481 """
482 482
483 483 def parentrevs(rev):
484 484 """Like parents() but operates on revision numbers."""
485 485
486 486 def rev(node):
487 487 """Obtain the revision number given a node.
488 488
489 489 Raises ``error.LookupError`` if the node is not known.
490 490 """
491 491
492 492 def node(rev):
493 493 """Obtain the node value given a revision number.
494 494
495 495 Raises ``IndexError`` if the node is not known.
496 496 """
497 497
498 498 def lookup(node):
499 499 """Attempt to resolve a value to a node.
500 500
501 501 Value can be a binary node, hex node, revision number, or a string
502 502 that can be converted to an integer.
503 503
504 504 Raises ``error.LookupError`` if a node could not be resolved.
505 505 """
506 506
507 507 def linkrev(rev):
508 508 """Obtain the changeset revision number a revision is linked to."""
509 509
510 510 def flags(rev):
511 511 """Obtain flags used to affect storage of a revision."""
512 512
513 513 def iscensored(rev):
514 514 """Return whether a revision's content has been censored."""
515 515
516 516 def commonancestorsheads(node1, node2):
517 517 """Obtain an iterable of nodes containing heads of common ancestors.
518 518
519 519 See ``ancestor.commonancestorsheads()``.
520 520 """
521 521
522 522 def descendants(revs):
523 523 """Obtain descendant revision numbers for a set of revision numbers.
524 524
525 525 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
526 526 """
527 527
528 528 def headrevs():
529 529 """Obtain a list of revision numbers that are DAG heads.
530 530
531 531 The list is sorted oldest to newest.
532 532
533 533 TODO determine if sorting is required.
534 534 """
535 535
536 536 def heads(start=None, stop=None):
537 537 """Obtain a list of nodes that are DAG heads, with control.
538 538
539 539 The set of revisions examined can be limited by specifying
540 540 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
541 541 iterable of nodes. DAG traversal starts at earlier revision
542 542 ``start`` and iterates forward until any node in ``stop`` is
543 543 encountered.
544 544 """
545 545
546 546 def children(node):
547 547 """Obtain nodes that are children of a node.
548 548
549 549 Returns a list of nodes.
550 550 """
551 551
552 552 def deltaparent(rev):
553 553 """"Return the revision that is a suitable parent to delta against."""
554 554
555 555 class ifiledata(interfaceutil.Interface):
556 556 """Storage interface for data storage of a specific file.
557 557
558 558 This complements ``ifileindex`` and provides an interface for accessing
559 559 data for a tracked file.
560 560 """
561 561 def rawsize(rev):
562 562 """The size of the fulltext data for a revision as stored."""
563 563
564 564 def size(rev):
565 565 """Obtain the fulltext size of file data.
566 566
567 567 Any metadata is excluded from size measurements. Use ``rawsize()`` if
568 568 metadata size is important.
569 569 """
570 570
571 571 def checkhash(fulltext, node, p1=None, p2=None, rev=None):
572 572 """Validate the stored hash of a given fulltext and node.
573 573
574 Raises ``error.RevlogError`` is hash validation fails.
574 Raises ``error.StorageError`` is hash validation fails.
575 575 """
576 576
577 577 def revision(node, raw=False):
578 578 """"Obtain fulltext data for a node.
579 579
580 580 By default, any storage transformations are applied before the data
581 581 is returned. If ``raw`` is True, non-raw storage transformations
582 582 are not applied.
583 583
584 584 The fulltext data may contain a header containing metadata. Most
585 585 consumers should use ``read()`` to obtain the actual file data.
586 586 """
587 587
588 588 def read(node):
589 589 """Resolve file fulltext data.
590 590
591 591 This is similar to ``revision()`` except any metadata in the data
592 592 headers is stripped.
593 593 """
594 594
595 595 def renamed(node):
596 596 """Obtain copy metadata for a node.
597 597
598 598 Returns ``False`` if no copy metadata is stored or a 2-tuple of
599 599 (path, node) from which this revision was copied.
600 600 """
601 601
602 602 def cmp(node, fulltext):
603 603 """Compare fulltext to another revision.
604 604
605 605 Returns True if the fulltext is different from what is stored.
606 606
607 607 This takes copy metadata into account.
608 608
609 609 TODO better document the copy metadata and censoring logic.
610 610 """
611 611
612 612 def revdiff(rev1, rev2):
613 613 """Obtain a delta between two revision numbers.
614 614
615 615 Operates on raw data in the store (``revision(node, raw=True)``).
616 616
617 617 The returned data is the result of ``bdiff.bdiff`` on the raw
618 618 revision data.
619 619 """
620 620
621 621 def emitrevisiondeltas(requests):
622 622 """Produce ``irevisiondelta`` from ``irevisiondeltarequest``s.
623 623
624 624 Given an iterable of objects conforming to the ``irevisiondeltarequest``
625 625 interface, emits objects conforming to the ``irevisiondelta``
626 626 interface.
627 627
628 628 This method is a generator.
629 629
630 630 ``irevisiondelta`` should be emitted in the same order of
631 631 ``irevisiondeltarequest`` that was passed in.
632 632
633 633 The emitted objects MUST conform by the results of
634 634 ``irevisiondeltarequest``. Namely, they must respect any requests
635 635 for building a delta from a specific ``basenode`` if defined.
636 636
637 637 When sending deltas, implementations must take into account whether
638 638 the client has the base delta before encoding a delta against that
639 639 revision. A revision encountered previously in ``requests`` is
640 640 always a suitable base revision. An example of a bad delta is a delta
641 641 against a non-ancestor revision. Another example of a bad delta is a
642 642 delta against a censored revision.
643 643 """
644 644
645 645 class ifilemutation(interfaceutil.Interface):
646 646 """Storage interface for mutation events of a tracked file."""
647 647
648 648 def add(filedata, meta, transaction, linkrev, p1, p2):
649 649 """Add a new revision to the store.
650 650
651 651 Takes file data, dictionary of metadata, a transaction, linkrev,
652 652 and parent nodes.
653 653
654 654 Returns the node that was added.
655 655
656 656 May no-op if a revision matching the supplied data is already stored.
657 657 """
658 658
659 659 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
660 660 flags=0, cachedelta=None):
661 661 """Add a new revision to the store.
662 662
663 663 This is similar to ``add()`` except it operates at a lower level.
664 664
665 665 The data passed in already contains a metadata header, if any.
666 666
667 667 ``node`` and ``flags`` can be used to define the expected node and
668 668 the flags to use with storage.
669 669
670 670 ``add()`` is usually called when adding files from e.g. the working
671 671 directory. ``addrevision()`` is often called by ``add()`` and for
672 672 scenarios where revision data has already been computed, such as when
673 673 applying raw data from a peer repo.
674 674 """
675 675
676 676 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
677 677 """Process a series of deltas for storage.
678 678
679 679 ``deltas`` is an iterable of 7-tuples of
680 680 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
681 681 to add.
682 682
683 683 The ``delta`` field contains ``mpatch`` data to apply to a base
684 684 revision, identified by ``deltabase``. The base node can be
685 685 ``nullid``, in which case the header from the delta can be ignored
686 686 and the delta used as the fulltext.
687 687
688 688 ``addrevisioncb`` should be called for each node as it is committed.
689 689
690 690 Returns a list of nodes that were processed. A node will be in the list
691 691 even if it existed in the store previously.
692 692 """
693 693
694 694 def getstrippoint(minlink):
695 695 """Find the minimum revision that must be stripped to strip a linkrev.
696 696
697 697 Returns a 2-tuple containing the minimum revision number and a set
698 698 of all revisions numbers that would be broken by this strip.
699 699
700 700 TODO this is highly revlog centric and should be abstracted into
701 701 a higher-level deletion API. ``repair.strip()`` relies on this.
702 702 """
703 703
704 704 def strip(minlink, transaction):
705 705 """Remove storage of items starting at a linkrev.
706 706
707 707 This uses ``getstrippoint()`` to determine the first node to remove.
708 708 Then it effectively truncates storage for all revisions after that.
709 709
710 710 TODO this is highly revlog centric and should be abstracted into a
711 711 higher-level deletion API.
712 712 """
713 713
714 714 class ifilestorage(ifileindex, ifiledata, ifilemutation):
715 715 """Complete storage interface for a single tracked file."""
716 716
717 717 version = interfaceutil.Attribute(
718 718 """Version number of storage.
719 719
720 720 TODO this feels revlog centric and could likely be removed.
721 721 """)
722 722
723 723 _generaldelta = interfaceutil.Attribute(
724 724 """Whether deltas can be against any parent revision.
725 725
726 726 TODO this is used by changegroup code and it could probably be
727 727 folded into another API.
728 728 """)
729 729
730 730 def files():
731 731 """Obtain paths that are backing storage for this file.
732 732
733 733 TODO this is used heavily by verify code and there should probably
734 734 be a better API for that.
735 735 """
736 736
737 737 def checksize():
738 738 """Obtain the expected sizes of backing files.
739 739
740 740 TODO this is used by verify and it should not be part of the interface.
741 741 """
742 742
743 743 class idirs(interfaceutil.Interface):
744 744 """Interface representing a collection of directories from paths.
745 745
746 746 This interface is essentially a derived data structure representing
747 747 directories from a collection of paths.
748 748 """
749 749
750 750 def addpath(path):
751 751 """Add a path to the collection.
752 752
753 753 All directories in the path will be added to the collection.
754 754 """
755 755
756 756 def delpath(path):
757 757 """Remove a path from the collection.
758 758
759 759 If the removal was the last path in a particular directory, the
760 760 directory is removed from the collection.
761 761 """
762 762
763 763 def __iter__():
764 764 """Iterate over the directories in this collection of paths."""
765 765
766 766 def __contains__(path):
767 767 """Whether a specific directory is in this collection."""
768 768
769 769 class imanifestdict(interfaceutil.Interface):
770 770 """Interface representing a manifest data structure.
771 771
772 772 A manifest is effectively a dict mapping paths to entries. Each entry
773 773 consists of a binary node and extra flags affecting that entry.
774 774 """
775 775
776 776 def __getitem__(path):
777 777 """Returns the binary node value for a path in the manifest.
778 778
779 779 Raises ``KeyError`` if the path does not exist in the manifest.
780 780
781 781 Equivalent to ``self.find(path)[0]``.
782 782 """
783 783
784 784 def find(path):
785 785 """Returns the entry for a path in the manifest.
786 786
787 787 Returns a 2-tuple of (node, flags).
788 788
789 789 Raises ``KeyError`` if the path does not exist in the manifest.
790 790 """
791 791
792 792 def __len__():
793 793 """Return the number of entries in the manifest."""
794 794
795 795 def __nonzero__():
796 796 """Returns True if the manifest has entries, False otherwise."""
797 797
798 798 __bool__ = __nonzero__
799 799
800 800 def __setitem__(path, node):
801 801 """Define the node value for a path in the manifest.
802 802
803 803 If the path is already in the manifest, its flags will be copied to
804 804 the new entry.
805 805 """
806 806
807 807 def __contains__(path):
808 808 """Whether a path exists in the manifest."""
809 809
810 810 def __delitem__(path):
811 811 """Remove a path from the manifest.
812 812
813 813 Raises ``KeyError`` if the path is not in the manifest.
814 814 """
815 815
816 816 def __iter__():
817 817 """Iterate over paths in the manifest."""
818 818
819 819 def iterkeys():
820 820 """Iterate over paths in the manifest."""
821 821
822 822 def keys():
823 823 """Obtain a list of paths in the manifest."""
824 824
825 825 def filesnotin(other, match=None):
826 826 """Obtain the set of paths in this manifest but not in another.
827 827
828 828 ``match`` is an optional matcher function to be applied to both
829 829 manifests.
830 830
831 831 Returns a set of paths.
832 832 """
833 833
834 834 def dirs():
835 835 """Returns an object implementing the ``idirs`` interface."""
836 836
837 837 def hasdir(dir):
838 838 """Returns a bool indicating if a directory is in this manifest."""
839 839
840 840 def matches(match):
841 841 """Generate a new manifest filtered through a matcher.
842 842
843 843 Returns an object conforming to the ``imanifestdict`` interface.
844 844 """
845 845
846 846 def walk(match):
847 847 """Generator of paths in manifest satisfying a matcher.
848 848
849 849 This is equivalent to ``self.matches(match).iterkeys()`` except a new
850 850 manifest object is not created.
851 851
852 852 If the matcher has explicit files listed and they don't exist in
853 853 the manifest, ``match.bad()`` is called for each missing file.
854 854 """
855 855
856 856 def diff(other, match=None, clean=False):
857 857 """Find differences between this manifest and another.
858 858
859 859 This manifest is compared to ``other``.
860 860
861 861 If ``match`` is provided, the two manifests are filtered against this
862 862 matcher and only entries satisfying the matcher are compared.
863 863
864 864 If ``clean`` is True, unchanged files are included in the returned
865 865 object.
866 866
867 867 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
868 868 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
869 869 represents the node and flags for this manifest and ``(node2, flag2)``
870 870 are the same for the other manifest.
871 871 """
872 872
873 873 def setflag(path, flag):
874 874 """Set the flag value for a given path.
875 875
876 876 Raises ``KeyError`` if the path is not already in the manifest.
877 877 """
878 878
879 879 def get(path, default=None):
880 880 """Obtain the node value for a path or a default value if missing."""
881 881
882 882 def flags(path, default=''):
883 883 """Return the flags value for a path or a default value if missing."""
884 884
885 885 def copy():
886 886 """Return a copy of this manifest."""
887 887
888 888 def items():
889 889 """Returns an iterable of (path, node) for items in this manifest."""
890 890
891 891 def iteritems():
892 892 """Identical to items()."""
893 893
894 894 def iterentries():
895 895 """Returns an iterable of (path, node, flags) for this manifest.
896 896
897 897 Similar to ``iteritems()`` except items are a 3-tuple and include
898 898 flags.
899 899 """
900 900
901 901 def text():
902 902 """Obtain the raw data representation for this manifest.
903 903
904 904 Result is used to create a manifest revision.
905 905 """
906 906
907 907 def fastdelta(base, changes):
908 908 """Obtain a delta between this manifest and another given changes.
909 909
910 910 ``base`` in the raw data representation for another manifest.
911 911
912 912 ``changes`` is an iterable of ``(path, to_delete)``.
913 913
914 914 Returns a 2-tuple containing ``bytearray(self.text())`` and the
915 915 delta between ``base`` and this manifest.
916 916 """
917 917
918 918 class imanifestrevisionbase(interfaceutil.Interface):
919 919 """Base interface representing a single revision of a manifest.
920 920
921 921 Should not be used as a primary interface: should always be inherited
922 922 as part of a larger interface.
923 923 """
924 924
925 925 def new():
926 926 """Obtain a new manifest instance.
927 927
928 928 Returns an object conforming to the ``imanifestrevisionwritable``
929 929 interface. The instance will be associated with the same
930 930 ``imanifestlog`` collection as this instance.
931 931 """
932 932
933 933 def copy():
934 934 """Obtain a copy of this manifest instance.
935 935
936 936 Returns an object conforming to the ``imanifestrevisionwritable``
937 937 interface. The instance will be associated with the same
938 938 ``imanifestlog`` collection as this instance.
939 939 """
940 940
941 941 def read():
942 942 """Obtain the parsed manifest data structure.
943 943
944 944 The returned object conforms to the ``imanifestdict`` interface.
945 945 """
946 946
947 947 class imanifestrevisionstored(imanifestrevisionbase):
948 948 """Interface representing a manifest revision committed to storage."""
949 949
950 950 def node():
951 951 """The binary node for this manifest."""
952 952
953 953 parents = interfaceutil.Attribute(
954 954 """List of binary nodes that are parents for this manifest revision."""
955 955 )
956 956
957 957 def readdelta(shallow=False):
958 958 """Obtain the manifest data structure representing changes from parent.
959 959
960 960 This manifest is compared to its 1st parent. A new manifest representing
961 961 those differences is constructed.
962 962
963 963 The returned object conforms to the ``imanifestdict`` interface.
964 964 """
965 965
966 966 def readfast(shallow=False):
967 967 """Calls either ``read()`` or ``readdelta()``.
968 968
969 969 The faster of the two options is called.
970 970 """
971 971
972 972 def find(key):
973 973 """Calls self.read().find(key)``.
974 974
975 975 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
976 976 """
977 977
978 978 class imanifestrevisionwritable(imanifestrevisionbase):
979 979 """Interface representing a manifest revision that can be committed."""
980 980
981 981 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
982 982 """Add this revision to storage.
983 983
984 984 Takes a transaction object, the changeset revision number it will
985 985 be associated with, its parent nodes, and lists of added and
986 986 removed paths.
987 987
988 988 If match is provided, storage can choose not to inspect or write out
989 989 items that do not match. Storage is still required to be able to provide
990 990 the full manifest in the future for any directories written (these
991 991 manifests should not be "narrowed on disk").
992 992
993 993 Returns the binary node of the created revision.
994 994 """
995 995
996 996 class imanifeststorage(interfaceutil.Interface):
997 997 """Storage interface for manifest data."""
998 998
999 999 tree = interfaceutil.Attribute(
1000 1000 """The path to the directory this manifest tracks.
1001 1001
1002 1002 The empty bytestring represents the root manifest.
1003 1003 """)
1004 1004
1005 1005 index = interfaceutil.Attribute(
1006 1006 """An ``ifilerevisionssequence`` instance.""")
1007 1007
1008 1008 indexfile = interfaceutil.Attribute(
1009 1009 """Path of revlog index file.
1010 1010
1011 1011 TODO this is revlog specific and should not be exposed.
1012 1012 """)
1013 1013
1014 1014 opener = interfaceutil.Attribute(
1015 1015 """VFS opener to use to access underlying files used for storage.
1016 1016
1017 1017 TODO this is revlog specific and should not be exposed.
1018 1018 """)
1019 1019
1020 1020 version = interfaceutil.Attribute(
1021 1021 """Revlog version number.
1022 1022
1023 1023 TODO this is revlog specific and should not be exposed.
1024 1024 """)
1025 1025
1026 1026 _generaldelta = interfaceutil.Attribute(
1027 1027 """Whether generaldelta storage is being used.
1028 1028
1029 1029 TODO this is revlog specific and should not be exposed.
1030 1030 """)
1031 1031
1032 1032 fulltextcache = interfaceutil.Attribute(
1033 1033 """Dict with cache of fulltexts.
1034 1034
1035 1035 TODO this doesn't feel appropriate for the storage interface.
1036 1036 """)
1037 1037
1038 1038 def __len__():
1039 1039 """Obtain the number of revisions stored for this manifest."""
1040 1040
1041 1041 def __iter__():
1042 1042 """Iterate over revision numbers for this manifest."""
1043 1043
1044 1044 def rev(node):
1045 1045 """Obtain the revision number given a binary node.
1046 1046
1047 1047 Raises ``error.LookupError`` if the node is not known.
1048 1048 """
1049 1049
1050 1050 def node(rev):
1051 1051 """Obtain the node value given a revision number.
1052 1052
1053 1053 Raises ``error.LookupError`` if the revision is not known.
1054 1054 """
1055 1055
1056 1056 def lookup(value):
1057 1057 """Attempt to resolve a value to a node.
1058 1058
1059 1059 Value can be a binary node, hex node, revision number, or a bytes
1060 1060 that can be converted to an integer.
1061 1061
1062 1062 Raises ``error.LookupError`` if a ndoe could not be resolved.
1063 1063
1064 1064 TODO this is only used by debug* commands and can probably be deleted
1065 1065 easily.
1066 1066 """
1067 1067
1068 1068 def parents(node):
1069 1069 """Returns a 2-tuple of parent nodes for a node.
1070 1070
1071 1071 Values will be ``nullid`` if the parent is empty.
1072 1072 """
1073 1073
1074 1074 def parentrevs(rev):
1075 1075 """Like parents() but operates on revision numbers."""
1076 1076
1077 1077 def linkrev(rev):
1078 1078 """Obtain the changeset revision number a revision is linked to."""
1079 1079
1080 1080 def revision(node, _df=None, raw=False):
1081 1081 """Obtain fulltext data for a node."""
1082 1082
1083 1083 def revdiff(rev1, rev2):
1084 1084 """Obtain a delta between two revision numbers.
1085 1085
1086 1086 The returned data is the result of ``bdiff.bdiff()`` on the raw
1087 1087 revision data.
1088 1088 """
1089 1089
1090 1090 def cmp(node, fulltext):
1091 1091 """Compare fulltext to another revision.
1092 1092
1093 1093 Returns True if the fulltext is different from what is stored.
1094 1094 """
1095 1095
1096 1096 def emitrevisiondeltas(requests):
1097 1097 """Produce ``irevisiondelta`` from ``irevisiondeltarequest``s.
1098 1098
1099 1099 See the documentation for ``ifiledata`` for more.
1100 1100 """
1101 1101
1102 1102 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1103 1103 """Process a series of deltas for storage.
1104 1104
1105 1105 See the documentation in ``ifilemutation`` for more.
1106 1106 """
1107 1107
1108 1108 def getstrippoint(minlink):
1109 1109 """Find minimum revision that must be stripped to strip a linkrev.
1110 1110
1111 1111 See the documentation in ``ifilemutation`` for more.
1112 1112 """
1113 1113
1114 1114 def strip(minlink, transaction):
1115 1115 """Remove storage of items starting at a linkrev.
1116 1116
1117 1117 See the documentation in ``ifilemutation`` for more.
1118 1118 """
1119 1119
1120 1120 def checksize():
1121 1121 """Obtain the expected sizes of backing files.
1122 1122
1123 1123 TODO this is used by verify and it should not be part of the interface.
1124 1124 """
1125 1125
1126 1126 def files():
1127 1127 """Obtain paths that are backing storage for this manifest.
1128 1128
1129 1129 TODO this is used by verify and there should probably be a better API
1130 1130 for this functionality.
1131 1131 """
1132 1132
1133 1133 def deltaparent(rev):
1134 1134 """Obtain the revision that a revision is delta'd against.
1135 1135
1136 1136 TODO delta encoding is an implementation detail of storage and should
1137 1137 not be exposed to the storage interface.
1138 1138 """
1139 1139
1140 1140 def clone(tr, dest, **kwargs):
1141 1141 """Clone this instance to another."""
1142 1142
1143 1143 def clearcaches(clear_persisted_data=False):
1144 1144 """Clear any caches associated with this instance."""
1145 1145
1146 1146 def dirlog(d):
1147 1147 """Obtain a manifest storage instance for a tree."""
1148 1148
1149 1149 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1150 1150 match=None):
1151 1151 """Add a revision to storage.
1152 1152
1153 1153 ``m`` is an object conforming to ``imanifestdict``.
1154 1154
1155 1155 ``link`` is the linkrev revision number.
1156 1156
1157 1157 ``p1`` and ``p2`` are the parent revision numbers.
1158 1158
1159 1159 ``added`` and ``removed`` are iterables of added and removed paths,
1160 1160 respectively.
1161 1161
1162 1162 ``readtree`` is a function that can be used to read the child tree(s)
1163 1163 when recursively writing the full tree structure when using
1164 1164 treemanifets.
1165 1165
1166 1166 ``match`` is a matcher that can be used to hint to storage that not all
1167 1167 paths must be inspected; this is an optimization and can be safely
1168 1168 ignored. Note that the storage must still be able to reproduce a full
1169 1169 manifest including files that did not match.
1170 1170 """
1171 1171
1172 1172 class imanifestlog(interfaceutil.Interface):
1173 1173 """Interface representing a collection of manifest snapshots.
1174 1174
1175 1175 Represents the root manifest in a repository.
1176 1176
1177 1177 Also serves as a means to access nested tree manifests and to cache
1178 1178 tree manifests.
1179 1179 """
1180 1180
1181 1181 def __getitem__(node):
1182 1182 """Obtain a manifest instance for a given binary node.
1183 1183
1184 1184 Equivalent to calling ``self.get('', node)``.
1185 1185
1186 1186 The returned object conforms to the ``imanifestrevisionstored``
1187 1187 interface.
1188 1188 """
1189 1189
1190 1190 def get(tree, node, verify=True):
1191 1191 """Retrieve the manifest instance for a given directory and binary node.
1192 1192
1193 1193 ``node`` always refers to the node of the root manifest (which will be
1194 1194 the only manifest if flat manifests are being used).
1195 1195
1196 1196 If ``tree`` is the empty string, the root manifest is returned.
1197 1197 Otherwise the manifest for the specified directory will be returned
1198 1198 (requires tree manifests).
1199 1199
1200 1200 If ``verify`` is True, ``LookupError`` is raised if the node is not
1201 1201 known.
1202 1202
1203 1203 The returned object conforms to the ``imanifestrevisionstored``
1204 1204 interface.
1205 1205 """
1206 1206
1207 1207 def getstorage(tree):
1208 1208 """Retrieve an interface to storage for a particular tree.
1209 1209
1210 1210 If ``tree`` is the empty bytestring, storage for the root manifest will
1211 1211 be returned. Otherwise storage for a tree manifest is returned.
1212 1212
1213 1213 TODO formalize interface for returned object.
1214 1214 """
1215 1215
1216 1216 def clearcaches():
1217 1217 """Clear caches associated with this collection."""
1218 1218
1219 1219 def rev(node):
1220 1220 """Obtain the revision number for a binary node.
1221 1221
1222 1222 Raises ``error.LookupError`` if the node is not known.
1223 1223 """
1224 1224
1225 1225 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1226 1226 """Local repository sub-interface providing access to tracked file storage.
1227 1227
1228 1228 This interface defines how a repository accesses storage for a single
1229 1229 tracked file path.
1230 1230 """
1231 1231
1232 1232 def file(f):
1233 1233 """Obtain a filelog for a tracked path.
1234 1234
1235 1235 The returned type conforms to the ``ifilestorage`` interface.
1236 1236 """
1237 1237
1238 1238 class ilocalrepositorymain(interfaceutil.Interface):
1239 1239 """Main interface for local repositories.
1240 1240
1241 1241 This currently captures the reality of things - not how things should be.
1242 1242 """
1243 1243
1244 1244 supportedformats = interfaceutil.Attribute(
1245 1245 """Set of requirements that apply to stream clone.
1246 1246
1247 1247 This is actually a class attribute and is shared among all instances.
1248 1248 """)
1249 1249
1250 1250 supported = interfaceutil.Attribute(
1251 1251 """Set of requirements that this repo is capable of opening.""")
1252 1252
1253 1253 requirements = interfaceutil.Attribute(
1254 1254 """Set of requirements this repo uses.""")
1255 1255
1256 1256 filtername = interfaceutil.Attribute(
1257 1257 """Name of the repoview that is active on this repo.""")
1258 1258
1259 1259 wvfs = interfaceutil.Attribute(
1260 1260 """VFS used to access the working directory.""")
1261 1261
1262 1262 vfs = interfaceutil.Attribute(
1263 1263 """VFS rooted at the .hg directory.
1264 1264
1265 1265 Used to access repository data not in the store.
1266 1266 """)
1267 1267
1268 1268 svfs = interfaceutil.Attribute(
1269 1269 """VFS rooted at the store.
1270 1270
1271 1271 Used to access repository data in the store. Typically .hg/store.
1272 1272 But can point elsewhere if the store is shared.
1273 1273 """)
1274 1274
1275 1275 root = interfaceutil.Attribute(
1276 1276 """Path to the root of the working directory.""")
1277 1277
1278 1278 path = interfaceutil.Attribute(
1279 1279 """Path to the .hg directory.""")
1280 1280
1281 1281 origroot = interfaceutil.Attribute(
1282 1282 """The filesystem path that was used to construct the repo.""")
1283 1283
1284 1284 auditor = interfaceutil.Attribute(
1285 1285 """A pathauditor for the working directory.
1286 1286
1287 1287 This checks if a path refers to a nested repository.
1288 1288
1289 1289 Operates on the filesystem.
1290 1290 """)
1291 1291
1292 1292 nofsauditor = interfaceutil.Attribute(
1293 1293 """A pathauditor for the working directory.
1294 1294
1295 1295 This is like ``auditor`` except it doesn't do filesystem checks.
1296 1296 """)
1297 1297
1298 1298 baseui = interfaceutil.Attribute(
1299 1299 """Original ui instance passed into constructor.""")
1300 1300
1301 1301 ui = interfaceutil.Attribute(
1302 1302 """Main ui instance for this instance.""")
1303 1303
1304 1304 sharedpath = interfaceutil.Attribute(
1305 1305 """Path to the .hg directory of the repo this repo was shared from.""")
1306 1306
1307 1307 store = interfaceutil.Attribute(
1308 1308 """A store instance.""")
1309 1309
1310 1310 spath = interfaceutil.Attribute(
1311 1311 """Path to the store.""")
1312 1312
1313 1313 sjoin = interfaceutil.Attribute(
1314 1314 """Alias to self.store.join.""")
1315 1315
1316 1316 cachevfs = interfaceutil.Attribute(
1317 1317 """A VFS used to access the cache directory.
1318 1318
1319 1319 Typically .hg/cache.
1320 1320 """)
1321 1321
1322 1322 filteredrevcache = interfaceutil.Attribute(
1323 1323 """Holds sets of revisions to be filtered.""")
1324 1324
1325 1325 names = interfaceutil.Attribute(
1326 1326 """A ``namespaces`` instance.""")
1327 1327
1328 1328 def close():
1329 1329 """Close the handle on this repository."""
1330 1330
1331 1331 def peer():
1332 1332 """Obtain an object conforming to the ``peer`` interface."""
1333 1333
1334 1334 def unfiltered():
1335 1335 """Obtain an unfiltered/raw view of this repo."""
1336 1336
1337 1337 def filtered(name, visibilityexceptions=None):
1338 1338 """Obtain a named view of this repository."""
1339 1339
1340 1340 obsstore = interfaceutil.Attribute(
1341 1341 """A store of obsolescence data.""")
1342 1342
1343 1343 changelog = interfaceutil.Attribute(
1344 1344 """A handle on the changelog revlog.""")
1345 1345
1346 1346 manifestlog = interfaceutil.Attribute(
1347 1347 """An instance conforming to the ``imanifestlog`` interface.
1348 1348
1349 1349 Provides access to manifests for the repository.
1350 1350 """)
1351 1351
1352 1352 dirstate = interfaceutil.Attribute(
1353 1353 """Working directory state.""")
1354 1354
1355 1355 narrowpats = interfaceutil.Attribute(
1356 1356 """Matcher patterns for this repository's narrowspec.""")
1357 1357
1358 1358 def narrowmatch():
1359 1359 """Obtain a matcher for the narrowspec."""
1360 1360
1361 1361 def setnarrowpats(newincludes, newexcludes):
1362 1362 """Define the narrowspec for this repository."""
1363 1363
1364 1364 def __getitem__(changeid):
1365 1365 """Try to resolve a changectx."""
1366 1366
1367 1367 def __contains__(changeid):
1368 1368 """Whether a changeset exists."""
1369 1369
1370 1370 def __nonzero__():
1371 1371 """Always returns True."""
1372 1372 return True
1373 1373
1374 1374 __bool__ = __nonzero__
1375 1375
1376 1376 def __len__():
1377 1377 """Returns the number of changesets in the repo."""
1378 1378
1379 1379 def __iter__():
1380 1380 """Iterate over revisions in the changelog."""
1381 1381
1382 1382 def revs(expr, *args):
1383 1383 """Evaluate a revset.
1384 1384
1385 1385 Emits revisions.
1386 1386 """
1387 1387
1388 1388 def set(expr, *args):
1389 1389 """Evaluate a revset.
1390 1390
1391 1391 Emits changectx instances.
1392 1392 """
1393 1393
1394 1394 def anyrevs(specs, user=False, localalias=None):
1395 1395 """Find revisions matching one of the given revsets."""
1396 1396
1397 1397 def url():
1398 1398 """Returns a string representing the location of this repo."""
1399 1399
1400 1400 def hook(name, throw=False, **args):
1401 1401 """Call a hook."""
1402 1402
1403 1403 def tags():
1404 1404 """Return a mapping of tag to node."""
1405 1405
1406 1406 def tagtype(tagname):
1407 1407 """Return the type of a given tag."""
1408 1408
1409 1409 def tagslist():
1410 1410 """Return a list of tags ordered by revision."""
1411 1411
1412 1412 def nodetags(node):
1413 1413 """Return the tags associated with a node."""
1414 1414
1415 1415 def nodebookmarks(node):
1416 1416 """Return the list of bookmarks pointing to the specified node."""
1417 1417
1418 1418 def branchmap():
1419 1419 """Return a mapping of branch to heads in that branch."""
1420 1420
1421 1421 def revbranchcache():
1422 1422 pass
1423 1423
1424 1424 def branchtip(branchtip, ignoremissing=False):
1425 1425 """Return the tip node for a given branch."""
1426 1426
1427 1427 def lookup(key):
1428 1428 """Resolve the node for a revision."""
1429 1429
1430 1430 def lookupbranch(key):
1431 1431 """Look up the branch name of the given revision or branch name."""
1432 1432
1433 1433 def known(nodes):
1434 1434 """Determine whether a series of nodes is known.
1435 1435
1436 1436 Returns a list of bools.
1437 1437 """
1438 1438
1439 1439 def local():
1440 1440 """Whether the repository is local."""
1441 1441 return True
1442 1442
1443 1443 def publishing():
1444 1444 """Whether the repository is a publishing repository."""
1445 1445
1446 1446 def cancopy():
1447 1447 pass
1448 1448
1449 1449 def shared():
1450 1450 """The type of shared repository or None."""
1451 1451
1452 1452 def wjoin(f, *insidef):
1453 1453 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1454 1454
1455 1455 def setparents(p1, p2):
1456 1456 """Set the parent nodes of the working directory."""
1457 1457
1458 1458 def filectx(path, changeid=None, fileid=None):
1459 1459 """Obtain a filectx for the given file revision."""
1460 1460
1461 1461 def getcwd():
1462 1462 """Obtain the current working directory from the dirstate."""
1463 1463
1464 1464 def pathto(f, cwd=None):
1465 1465 """Obtain the relative path to a file."""
1466 1466
1467 1467 def adddatafilter(name, fltr):
1468 1468 pass
1469 1469
1470 1470 def wread(filename):
1471 1471 """Read a file from wvfs, using data filters."""
1472 1472
1473 1473 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1474 1474 """Write data to a file in the wvfs, using data filters."""
1475 1475
1476 1476 def wwritedata(filename, data):
1477 1477 """Resolve data for writing to the wvfs, using data filters."""
1478 1478
1479 1479 def currenttransaction():
1480 1480 """Obtain the current transaction instance or None."""
1481 1481
1482 1482 def transaction(desc, report=None):
1483 1483 """Open a new transaction to write to the repository."""
1484 1484
1485 1485 def undofiles():
1486 1486 """Returns a list of (vfs, path) for files to undo transactions."""
1487 1487
1488 1488 def recover():
1489 1489 """Roll back an interrupted transaction."""
1490 1490
1491 1491 def rollback(dryrun=False, force=False):
1492 1492 """Undo the last transaction.
1493 1493
1494 1494 DANGEROUS.
1495 1495 """
1496 1496
1497 1497 def updatecaches(tr=None, full=False):
1498 1498 """Warm repo caches."""
1499 1499
1500 1500 def invalidatecaches():
1501 1501 """Invalidate cached data due to the repository mutating."""
1502 1502
1503 1503 def invalidatevolatilesets():
1504 1504 pass
1505 1505
1506 1506 def invalidatedirstate():
1507 1507 """Invalidate the dirstate."""
1508 1508
1509 1509 def invalidate(clearfilecache=False):
1510 1510 pass
1511 1511
1512 1512 def invalidateall():
1513 1513 pass
1514 1514
1515 1515 def lock(wait=True):
1516 1516 """Lock the repository store and return a lock instance."""
1517 1517
1518 1518 def wlock(wait=True):
1519 1519 """Lock the non-store parts of the repository."""
1520 1520
1521 1521 def currentwlock():
1522 1522 """Return the wlock if it's held or None."""
1523 1523
1524 1524 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1525 1525 pass
1526 1526
1527 1527 def commit(text='', user=None, date=None, match=None, force=False,
1528 1528 editor=False, extra=None):
1529 1529 """Add a new revision to the repository."""
1530 1530
1531 1531 def commitctx(ctx, error=False):
1532 1532 """Commit a commitctx instance to the repository."""
1533 1533
1534 1534 def destroying():
1535 1535 """Inform the repository that nodes are about to be destroyed."""
1536 1536
1537 1537 def destroyed():
1538 1538 """Inform the repository that nodes have been destroyed."""
1539 1539
1540 1540 def status(node1='.', node2=None, match=None, ignored=False,
1541 1541 clean=False, unknown=False, listsubrepos=False):
1542 1542 """Convenience method to call repo[x].status()."""
1543 1543
1544 1544 def addpostdsstatus(ps):
1545 1545 pass
1546 1546
1547 1547 def postdsstatus():
1548 1548 pass
1549 1549
1550 1550 def clearpostdsstatus():
1551 1551 pass
1552 1552
1553 1553 def heads(start=None):
1554 1554 """Obtain list of nodes that are DAG heads."""
1555 1555
1556 1556 def branchheads(branch=None, start=None, closed=False):
1557 1557 pass
1558 1558
1559 1559 def branches(nodes):
1560 1560 pass
1561 1561
1562 1562 def between(pairs):
1563 1563 pass
1564 1564
1565 1565 def checkpush(pushop):
1566 1566 pass
1567 1567
1568 1568 prepushoutgoinghooks = interfaceutil.Attribute(
1569 1569 """util.hooks instance.""")
1570 1570
1571 1571 def pushkey(namespace, key, old, new):
1572 1572 pass
1573 1573
1574 1574 def listkeys(namespace):
1575 1575 pass
1576 1576
1577 1577 def debugwireargs(one, two, three=None, four=None, five=None):
1578 1578 pass
1579 1579
1580 1580 def savecommitmessage(text):
1581 1581 pass
1582 1582
1583 1583 class completelocalrepository(ilocalrepositorymain,
1584 1584 ilocalrepositoryfilestorage):
1585 1585 """Complete interface for a local repository."""
@@ -1,984 +1,984
1 1 # storage.py - Testing of storage primitives.
2 2 #
3 3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import unittest
11 11
12 12 from ..node import (
13 13 hex,
14 14 nullid,
15 15 nullrev,
16 16 )
17 17 from .. import (
18 18 error,
19 19 mdiff,
20 20 revlog,
21 21 )
22 22
23 23 class basetestcase(unittest.TestCase):
24 24 if not getattr(unittest.TestCase, r'assertRaisesRegex', False):
25 25 assertRaisesRegex = (# camelcase-required
26 26 unittest.TestCase.assertRaisesRegexp)
27 27
28 28 class revisiondeltarequest(object):
29 29 def __init__(self, node, p1, p2, linknode, basenode, ellipsis):
30 30 self.node = node
31 31 self.p1node = p1
32 32 self.p2node = p2
33 33 self.linknode = linknode
34 34 self.basenode = basenode
35 35 self.ellipsis = ellipsis
36 36
37 37 class ifileindextests(basetestcase):
38 38 """Generic tests for the ifileindex interface.
39 39
40 40 All file storage backends for index data should conform to the tests in this
41 41 class.
42 42
43 43 Use ``makeifileindextests()`` to create an instance of this type.
44 44 """
45 45 def testempty(self):
46 46 f = self._makefilefn()
47 47 self.assertEqual(len(f), 0, 'new file store has 0 length by default')
48 48 self.assertEqual(list(f), [], 'iter yields nothing by default')
49 49
50 50 gen = iter(f)
51 51 with self.assertRaises(StopIteration):
52 52 next(gen)
53 53
54 54 # revs() should evaluate to an empty list.
55 55 self.assertEqual(list(f.revs()), [])
56 56
57 57 revs = iter(f.revs())
58 58 with self.assertRaises(StopIteration):
59 59 next(revs)
60 60
61 61 self.assertEqual(list(f.revs(start=20)), [])
62 62
63 63 # parents() and parentrevs() work with nullid/nullrev.
64 64 self.assertEqual(f.parents(nullid), (nullid, nullid))
65 65 self.assertEqual(f.parentrevs(nullrev), (nullrev, nullrev))
66 66
67 67 with self.assertRaises(error.LookupError):
68 68 f.parents(b'\x01' * 20)
69 69
70 70 for i in range(-5, 5):
71 71 if i == nullrev:
72 72 continue
73 73
74 74 with self.assertRaises(IndexError):
75 75 f.parentrevs(i)
76 76
77 77 # nullid/nullrev lookup always works.
78 78 self.assertEqual(f.rev(nullid), nullrev)
79 79 self.assertEqual(f.node(nullrev), nullid)
80 80
81 81 with self.assertRaises(error.LookupError):
82 82 f.rev(b'\x01' * 20)
83 83
84 84 for i in range(-5, 5):
85 85 if i == nullrev:
86 86 continue
87 87
88 88 with self.assertRaises(IndexError):
89 89 f.node(i)
90 90
91 91 self.assertEqual(f.lookup(nullid), nullid)
92 92 self.assertEqual(f.lookup(nullrev), nullid)
93 93 self.assertEqual(f.lookup(hex(nullid)), nullid)
94 94
95 95 # String converted to integer doesn't work for nullrev.
96 96 with self.assertRaises(error.LookupError):
97 97 f.lookup(b'%d' % nullrev)
98 98
99 99 self.assertEqual(f.linkrev(nullrev), nullrev)
100 100
101 101 for i in range(-5, 5):
102 102 if i == nullrev:
103 103 continue
104 104
105 105 with self.assertRaises(IndexError):
106 106 f.linkrev(i)
107 107
108 108 self.assertEqual(f.flags(nullrev), 0)
109 109
110 110 for i in range(-5, 5):
111 111 if i == nullrev:
112 112 continue
113 113
114 114 with self.assertRaises(IndexError):
115 115 f.flags(i)
116 116
117 117 self.assertFalse(f.iscensored(nullrev))
118 118
119 119 for i in range(-5, 5):
120 120 if i == nullrev:
121 121 continue
122 122
123 123 with self.assertRaises(IndexError):
124 124 f.iscensored(i)
125 125
126 126 self.assertEqual(list(f.commonancestorsheads(nullid, nullid)), [])
127 127
128 128 with self.assertRaises(ValueError):
129 129 self.assertEqual(list(f.descendants([])), [])
130 130
131 131 self.assertEqual(list(f.descendants([nullrev])), [])
132 132
133 133 self.assertEqual(f.headrevs(), [nullrev])
134 134 self.assertEqual(f.heads(), [nullid])
135 135 self.assertEqual(f.heads(nullid), [nullid])
136 136 self.assertEqual(f.heads(None, [nullid]), [nullid])
137 137 self.assertEqual(f.heads(nullid, [nullid]), [nullid])
138 138
139 139 self.assertEqual(f.children(nullid), [])
140 140
141 141 with self.assertRaises(error.LookupError):
142 142 f.children(b'\x01' * 20)
143 143
144 144 self.assertEqual(f.deltaparent(nullrev), nullrev)
145 145
146 146 for i in range(-5, 5):
147 147 if i == nullrev:
148 148 continue
149 149
150 150 with self.assertRaises(IndexError):
151 151 f.deltaparent(i)
152 152
153 153 def testsinglerevision(self):
154 154 f = self._makefilefn()
155 155 with self._maketransactionfn() as tr:
156 156 node = f.add(b'initial', None, tr, 0, nullid, nullid)
157 157
158 158 self.assertEqual(len(f), 1)
159 159 self.assertEqual(list(f), [0])
160 160
161 161 gen = iter(f)
162 162 self.assertEqual(next(gen), 0)
163 163
164 164 with self.assertRaises(StopIteration):
165 165 next(gen)
166 166
167 167 self.assertEqual(list(f.revs()), [0])
168 168 self.assertEqual(list(f.revs(start=1)), [])
169 169 self.assertEqual(list(f.revs(start=0)), [0])
170 170 self.assertEqual(list(f.revs(stop=0)), [0])
171 171 self.assertEqual(list(f.revs(stop=1)), [0])
172 172 self.assertEqual(list(f.revs(1, 1)), [])
173 173 # TODO buggy
174 174 self.assertEqual(list(f.revs(1, 0)), [1, 0])
175 175 self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
176 176
177 177 self.assertEqual(f.parents(node), (nullid, nullid))
178 178 self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
179 179
180 180 with self.assertRaises(error.LookupError):
181 181 f.parents(b'\x01' * 20)
182 182
183 183 with self.assertRaises(IndexError):
184 184 f.parentrevs(1)
185 185
186 186 self.assertEqual(f.rev(node), 0)
187 187
188 188 with self.assertRaises(error.LookupError):
189 189 f.rev(b'\x01' * 20)
190 190
191 191 self.assertEqual(f.node(0), node)
192 192
193 193 with self.assertRaises(IndexError):
194 194 f.node(1)
195 195
196 196 self.assertEqual(f.lookup(node), node)
197 197 self.assertEqual(f.lookup(0), node)
198 198 self.assertEqual(f.lookup(b'0'), node)
199 199 self.assertEqual(f.lookup(hex(node)), node)
200 200
201 201 self.assertEqual(f.linkrev(0), 0)
202 202
203 203 with self.assertRaises(IndexError):
204 204 f.linkrev(1)
205 205
206 206 self.assertEqual(f.flags(0), 0)
207 207
208 208 with self.assertRaises(IndexError):
209 209 f.flags(1)
210 210
211 211 self.assertFalse(f.iscensored(0))
212 212
213 213 with self.assertRaises(IndexError):
214 214 f.iscensored(1)
215 215
216 216 self.assertEqual(list(f.descendants([0])), [])
217 217
218 218 self.assertEqual(f.headrevs(), [0])
219 219
220 220 self.assertEqual(f.heads(), [node])
221 221 self.assertEqual(f.heads(node), [node])
222 222 self.assertEqual(f.heads(stop=[node]), [node])
223 223
224 224 with self.assertRaises(error.LookupError):
225 225 f.heads(stop=[b'\x01' * 20])
226 226
227 227 self.assertEqual(f.children(node), [])
228 228
229 229 self.assertEqual(f.deltaparent(0), nullrev)
230 230
231 231 def testmultiplerevisions(self):
232 232 fulltext0 = b'x' * 1024
233 233 fulltext1 = fulltext0 + b'y'
234 234 fulltext2 = b'y' + fulltext0 + b'z'
235 235
236 236 f = self._makefilefn()
237 237 with self._maketransactionfn() as tr:
238 238 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
239 239 node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
240 240 node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
241 241
242 242 self.assertEqual(len(f), 3)
243 243 self.assertEqual(list(f), [0, 1, 2])
244 244
245 245 gen = iter(f)
246 246 self.assertEqual(next(gen), 0)
247 247 self.assertEqual(next(gen), 1)
248 248 self.assertEqual(next(gen), 2)
249 249
250 250 with self.assertRaises(StopIteration):
251 251 next(gen)
252 252
253 253 self.assertEqual(list(f.revs()), [0, 1, 2])
254 254 self.assertEqual(list(f.revs(0)), [0, 1, 2])
255 255 self.assertEqual(list(f.revs(1)), [1, 2])
256 256 self.assertEqual(list(f.revs(2)), [2])
257 257 self.assertEqual(list(f.revs(3)), [])
258 258 self.assertEqual(list(f.revs(stop=1)), [0, 1])
259 259 self.assertEqual(list(f.revs(stop=2)), [0, 1, 2])
260 260 self.assertEqual(list(f.revs(stop=3)), [0, 1, 2])
261 261 self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
262 262 self.assertEqual(list(f.revs(2, 1)), [2, 1])
263 263 # TODO this is wrong
264 264 self.assertEqual(list(f.revs(3, 2)), [3, 2])
265 265
266 266 self.assertEqual(f.parents(node0), (nullid, nullid))
267 267 self.assertEqual(f.parents(node1), (node0, nullid))
268 268 self.assertEqual(f.parents(node2), (node1, nullid))
269 269
270 270 self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
271 271 self.assertEqual(f.parentrevs(1), (0, nullrev))
272 272 self.assertEqual(f.parentrevs(2), (1, nullrev))
273 273
274 274 self.assertEqual(f.rev(node0), 0)
275 275 self.assertEqual(f.rev(node1), 1)
276 276 self.assertEqual(f.rev(node2), 2)
277 277
278 278 with self.assertRaises(error.LookupError):
279 279 f.rev(b'\x01' * 20)
280 280
281 281 self.assertEqual(f.node(0), node0)
282 282 self.assertEqual(f.node(1), node1)
283 283 self.assertEqual(f.node(2), node2)
284 284
285 285 with self.assertRaises(IndexError):
286 286 f.node(3)
287 287
288 288 self.assertEqual(f.lookup(node0), node0)
289 289 self.assertEqual(f.lookup(0), node0)
290 290 self.assertEqual(f.lookup(b'0'), node0)
291 291 self.assertEqual(f.lookup(hex(node0)), node0)
292 292
293 293 self.assertEqual(f.lookup(node1), node1)
294 294 self.assertEqual(f.lookup(1), node1)
295 295 self.assertEqual(f.lookup(b'1'), node1)
296 296 self.assertEqual(f.lookup(hex(node1)), node1)
297 297
298 298 self.assertEqual(f.linkrev(0), 0)
299 299 self.assertEqual(f.linkrev(1), 1)
300 300 self.assertEqual(f.linkrev(2), 3)
301 301
302 302 with self.assertRaises(IndexError):
303 303 f.linkrev(3)
304 304
305 305 self.assertEqual(f.flags(0), 0)
306 306 self.assertEqual(f.flags(1), 0)
307 307 self.assertEqual(f.flags(2), 0)
308 308
309 309 with self.assertRaises(IndexError):
310 310 f.flags(3)
311 311
312 312 self.assertFalse(f.iscensored(0))
313 313 self.assertFalse(f.iscensored(1))
314 314 self.assertFalse(f.iscensored(2))
315 315
316 316 with self.assertRaises(IndexError):
317 317 f.iscensored(3)
318 318
319 319 self.assertEqual(f.commonancestorsheads(node1, nullid), [])
320 320 self.assertEqual(f.commonancestorsheads(node1, node0), [node0])
321 321 self.assertEqual(f.commonancestorsheads(node1, node1), [node1])
322 322 self.assertEqual(f.commonancestorsheads(node0, node1), [node0])
323 323 self.assertEqual(f.commonancestorsheads(node1, node2), [node1])
324 324 self.assertEqual(f.commonancestorsheads(node2, node1), [node1])
325 325
326 326 self.assertEqual(list(f.descendants([0])), [1, 2])
327 327 self.assertEqual(list(f.descendants([1])), [2])
328 328 self.assertEqual(list(f.descendants([0, 1])), [1, 2])
329 329
330 330 self.assertEqual(f.headrevs(), [2])
331 331
332 332 self.assertEqual(f.heads(), [node2])
333 333 self.assertEqual(f.heads(node0), [node2])
334 334 self.assertEqual(f.heads(node1), [node2])
335 335 self.assertEqual(f.heads(node2), [node2])
336 336
337 337 # TODO this behavior seems wonky. Is it correct? If so, the
338 338 # docstring for heads() should be updated to reflect desired
339 339 # behavior.
340 340 self.assertEqual(f.heads(stop=[node1]), [node1, node2])
341 341 self.assertEqual(f.heads(stop=[node0]), [node0, node2])
342 342 self.assertEqual(f.heads(stop=[node1, node2]), [node1, node2])
343 343
344 344 with self.assertRaises(error.LookupError):
345 345 f.heads(stop=[b'\x01' * 20])
346 346
347 347 self.assertEqual(f.children(node0), [node1])
348 348 self.assertEqual(f.children(node1), [node2])
349 349 self.assertEqual(f.children(node2), [])
350 350
351 351 self.assertEqual(f.deltaparent(0), nullrev)
352 352 self.assertEqual(f.deltaparent(1), 0)
353 353 self.assertEqual(f.deltaparent(2), 1)
354 354
355 355 def testmultipleheads(self):
356 356 f = self._makefilefn()
357 357
358 358 with self._maketransactionfn() as tr:
359 359 node0 = f.add(b'0', None, tr, 0, nullid, nullid)
360 360 node1 = f.add(b'1', None, tr, 1, node0, nullid)
361 361 node2 = f.add(b'2', None, tr, 2, node1, nullid)
362 362 node3 = f.add(b'3', None, tr, 3, node0, nullid)
363 363 node4 = f.add(b'4', None, tr, 4, node3, nullid)
364 364 node5 = f.add(b'5', None, tr, 5, node0, nullid)
365 365
366 366 self.assertEqual(len(f), 6)
367 367
368 368 self.assertEqual(list(f.descendants([0])), [1, 2, 3, 4, 5])
369 369 self.assertEqual(list(f.descendants([1])), [2])
370 370 self.assertEqual(list(f.descendants([2])), [])
371 371 self.assertEqual(list(f.descendants([3])), [4])
372 372 self.assertEqual(list(f.descendants([0, 1])), [1, 2, 3, 4, 5])
373 373 self.assertEqual(list(f.descendants([1, 3])), [2, 4])
374 374
375 375 self.assertEqual(f.headrevs(), [2, 4, 5])
376 376
377 377 self.assertEqual(f.heads(), [node2, node4, node5])
378 378 self.assertEqual(f.heads(node0), [node2, node4, node5])
379 379 self.assertEqual(f.heads(node1), [node2])
380 380 self.assertEqual(f.heads(node2), [node2])
381 381 self.assertEqual(f.heads(node3), [node4])
382 382 self.assertEqual(f.heads(node4), [node4])
383 383 self.assertEqual(f.heads(node5), [node5])
384 384
385 385 # TODO this seems wrong.
386 386 self.assertEqual(f.heads(stop=[node0]), [node0, node2, node4, node5])
387 387 self.assertEqual(f.heads(stop=[node1]), [node1, node2, node4, node5])
388 388
389 389 self.assertEqual(f.children(node0), [node1, node3, node5])
390 390 self.assertEqual(f.children(node1), [node2])
391 391 self.assertEqual(f.children(node2), [])
392 392 self.assertEqual(f.children(node3), [node4])
393 393 self.assertEqual(f.children(node4), [])
394 394 self.assertEqual(f.children(node5), [])
395 395
396 396 class ifiledatatests(basetestcase):
397 397 """Generic tests for the ifiledata interface.
398 398
399 399 All file storage backends for data should conform to the tests in this
400 400 class.
401 401
402 402 Use ``makeifiledatatests()`` to create an instance of this type.
403 403 """
404 404 def testempty(self):
405 405 f = self._makefilefn()
406 406
407 407 self.assertEqual(f.rawsize(nullrev), 0)
408 408
409 409 for i in range(-5, 5):
410 410 if i == nullrev:
411 411 continue
412 412
413 413 with self.assertRaises(IndexError):
414 414 f.rawsize(i)
415 415
416 416 self.assertEqual(f.size(nullrev), 0)
417 417
418 418 for i in range(-5, 5):
419 419 if i == nullrev:
420 420 continue
421 421
422 422 with self.assertRaises(IndexError):
423 423 f.size(i)
424 424
425 with self.assertRaises(error.RevlogError):
425 with self.assertRaises(error.StorageError):
426 426 f.checkhash(b'', nullid)
427 427
428 428 with self.assertRaises(error.LookupError):
429 429 f.checkhash(b'', b'\x01' * 20)
430 430
431 431 self.assertEqual(f.revision(nullid), b'')
432 432 self.assertEqual(f.revision(nullid, raw=True), b'')
433 433
434 434 with self.assertRaises(error.LookupError):
435 435 f.revision(b'\x01' * 20)
436 436
437 437 self.assertEqual(f.read(nullid), b'')
438 438
439 439 with self.assertRaises(error.LookupError):
440 440 f.read(b'\x01' * 20)
441 441
442 442 self.assertFalse(f.renamed(nullid))
443 443
444 444 with self.assertRaises(error.LookupError):
445 445 f.read(b'\x01' * 20)
446 446
447 447 self.assertTrue(f.cmp(nullid, b''))
448 448 self.assertTrue(f.cmp(nullid, b'foo'))
449 449
450 450 with self.assertRaises(error.LookupError):
451 451 f.cmp(b'\x01' * 20, b'irrelevant')
452 452
453 453 self.assertEqual(f.revdiff(nullrev, nullrev), b'')
454 454
455 455 with self.assertRaises(IndexError):
456 456 f.revdiff(0, nullrev)
457 457
458 458 with self.assertRaises(IndexError):
459 459 f.revdiff(nullrev, 0)
460 460
461 461 with self.assertRaises(IndexError):
462 462 f.revdiff(0, 0)
463 463
464 464 gen = f.emitrevisiondeltas([])
465 465 with self.assertRaises(StopIteration):
466 466 next(gen)
467 467
468 468 requests = [
469 469 revisiondeltarequest(nullid, nullid, nullid, nullid, nullid, False),
470 470 ]
471 471 gen = f.emitrevisiondeltas(requests)
472 472
473 473 delta = next(gen)
474 474
475 475 self.assertEqual(delta.node, nullid)
476 476 self.assertEqual(delta.p1node, nullid)
477 477 self.assertEqual(delta.p2node, nullid)
478 478 self.assertEqual(delta.linknode, nullid)
479 479 self.assertEqual(delta.basenode, nullid)
480 480 self.assertIsNone(delta.baserevisionsize)
481 481 self.assertEqual(delta.revision, b'')
482 482 self.assertIsNone(delta.delta)
483 483
484 484 with self.assertRaises(StopIteration):
485 485 next(gen)
486 486
487 487 requests = [
488 488 revisiondeltarequest(nullid, nullid, nullid, nullid, nullid, False),
489 489 revisiondeltarequest(nullid, b'\x01' * 20, b'\x02' * 20,
490 490 b'\x03' * 20, nullid, False)
491 491 ]
492 492
493 493 gen = f.emitrevisiondeltas(requests)
494 494
495 495 next(gen)
496 496 delta = next(gen)
497 497
498 498 self.assertEqual(delta.node, nullid)
499 499 self.assertEqual(delta.p1node, b'\x01' * 20)
500 500 self.assertEqual(delta.p2node, b'\x02' * 20)
501 501 self.assertEqual(delta.linknode, b'\x03' * 20)
502 502 self.assertEqual(delta.basenode, nullid)
503 503 self.assertIsNone(delta.baserevisionsize)
504 504 self.assertEqual(delta.revision, b'')
505 505 self.assertIsNone(delta.delta)
506 506
507 507 with self.assertRaises(StopIteration):
508 508 next(gen)
509 509
510 510 def testsinglerevision(self):
511 511 fulltext = b'initial'
512 512
513 513 f = self._makefilefn()
514 514 with self._maketransactionfn() as tr:
515 515 node = f.add(fulltext, None, tr, 0, nullid, nullid)
516 516
517 517 self.assertEqual(f.rawsize(0), len(fulltext))
518 518
519 519 with self.assertRaises(IndexError):
520 520 f.rawsize(1)
521 521
522 522 self.assertEqual(f.size(0), len(fulltext))
523 523
524 524 with self.assertRaises(IndexError):
525 525 f.size(1)
526 526
527 527 f.checkhash(fulltext, node)
528 528 f.checkhash(fulltext, node, nullid, nullid)
529 529
530 with self.assertRaises(error.RevlogError):
530 with self.assertRaises(error.StorageError):
531 531 f.checkhash(fulltext + b'extra', node)
532 532
533 with self.assertRaises(error.RevlogError):
533 with self.assertRaises(error.StorageError):
534 534 f.checkhash(fulltext, node, b'\x01' * 20, nullid)
535 535
536 with self.assertRaises(error.RevlogError):
536 with self.assertRaises(error.StorageError):
537 537 f.checkhash(fulltext, node, nullid, b'\x01' * 20)
538 538
539 539 self.assertEqual(f.revision(node), fulltext)
540 540 self.assertEqual(f.revision(node, raw=True), fulltext)
541 541
542 542 self.assertEqual(f.read(node), fulltext)
543 543
544 544 self.assertFalse(f.renamed(node))
545 545
546 546 self.assertFalse(f.cmp(node, fulltext))
547 547 self.assertTrue(f.cmp(node, fulltext + b'extra'))
548 548
549 549 self.assertEqual(f.revdiff(0, 0), b'')
550 550 self.assertEqual(f.revdiff(nullrev, 0),
551 551 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07%s' %
552 552 fulltext)
553 553
554 554 self.assertEqual(f.revdiff(0, nullrev),
555 555 b'\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00')
556 556
557 557 requests = [
558 558 revisiondeltarequest(node, nullid, nullid, nullid, nullid, False),
559 559 ]
560 560 gen = f.emitrevisiondeltas(requests)
561 561
562 562 delta = next(gen)
563 563
564 564 self.assertEqual(delta.node, node)
565 565 self.assertEqual(delta.p1node, nullid)
566 566 self.assertEqual(delta.p2node, nullid)
567 567 self.assertEqual(delta.linknode, nullid)
568 568 self.assertEqual(delta.basenode, nullid)
569 569 self.assertIsNone(delta.baserevisionsize)
570 570 self.assertEqual(delta.revision, fulltext)
571 571 self.assertIsNone(delta.delta)
572 572
573 573 with self.assertRaises(StopIteration):
574 574 next(gen)
575 575
576 576 def testmultiplerevisions(self):
577 577 fulltext0 = b'x' * 1024
578 578 fulltext1 = fulltext0 + b'y'
579 579 fulltext2 = b'y' + fulltext0 + b'z'
580 580
581 581 f = self._makefilefn()
582 582 with self._maketransactionfn() as tr:
583 583 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
584 584 node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
585 585 node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
586 586
587 587 self.assertEqual(f.rawsize(0), len(fulltext0))
588 588 self.assertEqual(f.rawsize(1), len(fulltext1))
589 589 self.assertEqual(f.rawsize(2), len(fulltext2))
590 590
591 591 with self.assertRaises(IndexError):
592 592 f.rawsize(3)
593 593
594 594 self.assertEqual(f.size(0), len(fulltext0))
595 595 self.assertEqual(f.size(1), len(fulltext1))
596 596 self.assertEqual(f.size(2), len(fulltext2))
597 597
598 598 with self.assertRaises(IndexError):
599 599 f.size(3)
600 600
601 601 f.checkhash(fulltext0, node0)
602 602 f.checkhash(fulltext1, node1)
603 603 f.checkhash(fulltext1, node1, node0, nullid)
604 604 f.checkhash(fulltext2, node2, node1, nullid)
605 605
606 with self.assertRaises(error.RevlogError):
606 with self.assertRaises(error.StorageError):
607 607 f.checkhash(fulltext1, b'\x01' * 20)
608 608
609 with self.assertRaises(error.RevlogError):
609 with self.assertRaises(error.StorageError):
610 610 f.checkhash(fulltext1 + b'extra', node1, node0, nullid)
611 611
612 with self.assertRaises(error.RevlogError):
612 with self.assertRaises(error.StorageError):
613 613 f.checkhash(fulltext1, node1, node0, node0)
614 614
615 615 self.assertEqual(f.revision(node0), fulltext0)
616 616 self.assertEqual(f.revision(node0, raw=True), fulltext0)
617 617 self.assertEqual(f.revision(node1), fulltext1)
618 618 self.assertEqual(f.revision(node1, raw=True), fulltext1)
619 619 self.assertEqual(f.revision(node2), fulltext2)
620 620 self.assertEqual(f.revision(node2, raw=True), fulltext2)
621 621
622 622 with self.assertRaises(error.LookupError):
623 623 f.revision(b'\x01' * 20)
624 624
625 625 self.assertEqual(f.read(node0), fulltext0)
626 626 self.assertEqual(f.read(node1), fulltext1)
627 627 self.assertEqual(f.read(node2), fulltext2)
628 628
629 629 with self.assertRaises(error.LookupError):
630 630 f.read(b'\x01' * 20)
631 631
632 632 self.assertFalse(f.renamed(node0))
633 633 self.assertFalse(f.renamed(node1))
634 634 self.assertFalse(f.renamed(node2))
635 635
636 636 with self.assertRaises(error.LookupError):
637 637 f.renamed(b'\x01' * 20)
638 638
639 639 self.assertFalse(f.cmp(node0, fulltext0))
640 640 self.assertFalse(f.cmp(node1, fulltext1))
641 641 self.assertFalse(f.cmp(node2, fulltext2))
642 642
643 643 self.assertTrue(f.cmp(node1, fulltext0))
644 644 self.assertTrue(f.cmp(node2, fulltext1))
645 645
646 646 with self.assertRaises(error.LookupError):
647 647 f.cmp(b'\x01' * 20, b'irrelevant')
648 648
649 649 self.assertEqual(f.revdiff(0, 1),
650 650 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
651 651 fulltext1)
652 652
653 653 self.assertEqual(f.revdiff(0, 2),
654 654 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x02' +
655 655 fulltext2)
656 656
657 657 requests = [
658 658 revisiondeltarequest(node0, nullid, nullid, b'\x01' * 20, nullid,
659 659 False),
660 660 revisiondeltarequest(node1, node0, nullid, b'\x02' * 20, node0,
661 661 False),
662 662 revisiondeltarequest(node2, node1, nullid, b'\x03' * 20, node1,
663 663 False),
664 664 ]
665 665 gen = f.emitrevisiondeltas(requests)
666 666
667 667 delta = next(gen)
668 668
669 669 self.assertEqual(delta.node, node0)
670 670 self.assertEqual(delta.p1node, nullid)
671 671 self.assertEqual(delta.p2node, nullid)
672 672 self.assertEqual(delta.linknode, b'\x01' * 20)
673 673 self.assertEqual(delta.basenode, nullid)
674 674 self.assertIsNone(delta.baserevisionsize)
675 675 self.assertEqual(delta.revision, fulltext0)
676 676 self.assertIsNone(delta.delta)
677 677
678 678 delta = next(gen)
679 679
680 680 self.assertEqual(delta.node, node1)
681 681 self.assertEqual(delta.p1node, node0)
682 682 self.assertEqual(delta.p2node, nullid)
683 683 self.assertEqual(delta.linknode, b'\x02' * 20)
684 684 self.assertEqual(delta.basenode, node0)
685 685 self.assertIsNone(delta.baserevisionsize)
686 686 self.assertIsNone(delta.revision)
687 687 self.assertEqual(delta.delta,
688 688 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
689 689 fulltext1)
690 690
691 691 delta = next(gen)
692 692
693 693 self.assertEqual(delta.node, node2)
694 694 self.assertEqual(delta.p1node, node1)
695 695 self.assertEqual(delta.p2node, nullid)
696 696 self.assertEqual(delta.linknode, b'\x03' * 20)
697 697 self.assertEqual(delta.basenode, node1)
698 698 self.assertIsNone(delta.baserevisionsize)
699 699 self.assertIsNone(delta.revision)
700 700 self.assertEqual(delta.delta,
701 701 b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
702 702 fulltext2)
703 703
704 704 with self.assertRaises(StopIteration):
705 705 next(gen)
706 706
707 707 def testrenamed(self):
708 708 fulltext0 = b'foo'
709 709 fulltext1 = b'bar'
710 710 fulltext2 = b'baz'
711 711
712 712 meta1 = {
713 713 b'copy': b'source0',
714 714 b'copyrev': b'a' * 40,
715 715 }
716 716
717 717 meta2 = {
718 718 b'copy': b'source1',
719 719 b'copyrev': b'b' * 40,
720 720 }
721 721
722 722 stored1 = b''.join([
723 723 b'\x01\ncopy: source0\n',
724 724 b'copyrev: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n\x01\n',
725 725 fulltext1,
726 726 ])
727 727
728 728 stored2 = b''.join([
729 729 b'\x01\ncopy: source1\n',
730 730 b'copyrev: bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n\x01\n',
731 731 fulltext2,
732 732 ])
733 733
734 734 f = self._makefilefn()
735 735 with self._maketransactionfn() as tr:
736 736 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
737 737 node1 = f.add(fulltext1, meta1, tr, 1, node0, nullid)
738 738 node2 = f.add(fulltext2, meta2, tr, 2, nullid, nullid)
739 739
740 740 self.assertEqual(f.rawsize(1), len(stored1))
741 741 self.assertEqual(f.rawsize(2), len(stored2))
742 742
743 743 # Metadata header isn't recognized when parent isn't nullid.
744 744 self.assertEqual(f.size(1), len(stored1))
745 745 self.assertEqual(f.size(2), len(fulltext2))
746 746
747 747 self.assertEqual(f.revision(node1), stored1)
748 748 self.assertEqual(f.revision(node1, raw=True), stored1)
749 749 self.assertEqual(f.revision(node2), stored2)
750 750 self.assertEqual(f.revision(node2, raw=True), stored2)
751 751
752 752 self.assertEqual(f.read(node1), fulltext1)
753 753 self.assertEqual(f.read(node2), fulltext2)
754 754
755 755 # Returns False when first parent is set.
756 756 self.assertFalse(f.renamed(node1))
757 757 self.assertEqual(f.renamed(node2), (b'source1', b'\xbb' * 20))
758 758
759 759 self.assertTrue(f.cmp(node1, fulltext1))
760 760 self.assertTrue(f.cmp(node1, stored1))
761 761 self.assertFalse(f.cmp(node2, fulltext2))
762 762 self.assertTrue(f.cmp(node2, stored2))
763 763
764 764 def testmetadataprefix(self):
765 765 # Content with metadata prefix has extra prefix inserted in storage.
766 766 fulltext0 = b'\x01\nfoo'
767 767 stored0 = b'\x01\n\x01\n\x01\nfoo'
768 768
769 769 fulltext1 = b'\x01\nbar'
770 770 meta1 = {
771 771 b'copy': b'source0',
772 772 b'copyrev': b'b' * 40,
773 773 }
774 774 stored1 = b''.join([
775 775 b'\x01\ncopy: source0\n',
776 776 b'copyrev: %s\n' % (b'b' * 40),
777 777 b'\x01\n\x01\nbar',
778 778 ])
779 779
780 780 f = self._makefilefn()
781 781 with self._maketransactionfn() as tr:
782 782 node0 = f.add(fulltext0, {}, tr, 0, nullid, nullid)
783 783 node1 = f.add(fulltext1, meta1, tr, 1, nullid, nullid)
784 784
785 785 self.assertEqual(f.rawsize(0), len(stored0))
786 786 self.assertEqual(f.rawsize(1), len(stored1))
787 787
788 788 # TODO this is buggy.
789 789 self.assertEqual(f.size(0), len(fulltext0) + 4)
790 790
791 791 self.assertEqual(f.size(1), len(fulltext1))
792 792
793 793 self.assertEqual(f.revision(node0), stored0)
794 794 self.assertEqual(f.revision(node0, raw=True), stored0)
795 795
796 796 self.assertEqual(f.revision(node1), stored1)
797 797 self.assertEqual(f.revision(node1, raw=True), stored1)
798 798
799 799 self.assertEqual(f.read(node0), fulltext0)
800 800 self.assertEqual(f.read(node1), fulltext1)
801 801
802 802 self.assertFalse(f.cmp(node0, fulltext0))
803 803 self.assertTrue(f.cmp(node0, stored0))
804 804
805 805 self.assertFalse(f.cmp(node1, fulltext1))
806 806 self.assertTrue(f.cmp(node1, stored0))
807 807
808 808 def testcensored(self):
809 809 f = self._makefilefn()
810 810
811 811 stored1 = revlog.packmeta({
812 812 b'censored': b'tombstone',
813 813 }, b'')
814 814
815 815 # TODO tests are incomplete because we need the node to be
816 816 # different due to presence of censor metadata. But we can't
817 817 # do this with addrevision().
818 818 with self._maketransactionfn() as tr:
819 819 node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
820 820 f.addrevision(stored1, tr, 1, node0, nullid,
821 821 flags=revlog.REVIDX_ISCENSORED)
822 822
823 823 self.assertEqual(f.flags(1), revlog.REVIDX_ISCENSORED)
824 824 self.assertTrue(f.iscensored(1))
825 825
826 826 self.assertEqual(f.revision(1), stored1)
827 827 self.assertEqual(f.revision(1, raw=True), stored1)
828 828
829 829 self.assertEqual(f.read(1), b'')
830 830
831 831 class ifilemutationtests(basetestcase):
832 832 """Generic tests for the ifilemutation interface.
833 833
834 834 All file storage backends that support writing should conform to this
835 835 interface.
836 836
837 837 Use ``makeifilemutationtests()`` to create an instance of this type.
838 838 """
839 839 def testaddnoop(self):
840 840 f = self._makefilefn()
841 841 with self._maketransactionfn() as tr:
842 842 node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
843 843 node1 = f.add(b'foo', None, tr, 0, nullid, nullid)
844 844 # Varying by linkrev shouldn't impact hash.
845 845 node2 = f.add(b'foo', None, tr, 1, nullid, nullid)
846 846
847 847 self.assertEqual(node1, node0)
848 848 self.assertEqual(node2, node0)
849 849 self.assertEqual(len(f), 1)
850 850
851 851 def testaddrevisionbadnode(self):
852 852 f = self._makefilefn()
853 853 with self._maketransactionfn() as tr:
854 854 # Adding a revision with bad node value fails.
855 with self.assertRaises(error.RevlogError):
855 with self.assertRaises(error.StorageError):
856 856 f.addrevision(b'foo', tr, 0, nullid, nullid, node=b'\x01' * 20)
857 857
858 858 def testaddrevisionunknownflag(self):
859 859 f = self._makefilefn()
860 860 with self._maketransactionfn() as tr:
861 861 for i in range(15, 0, -1):
862 862 if (1 << i) & ~revlog.REVIDX_KNOWN_FLAGS:
863 863 flags = 1 << i
864 864 break
865 865
866 with self.assertRaises(error.RevlogError):
866 with self.assertRaises(error.StorageError):
867 867 f.addrevision(b'foo', tr, 0, nullid, nullid, flags=flags)
868 868
869 869 def testaddgroupsimple(self):
870 870 f = self._makefilefn()
871 871
872 872 callbackargs = []
873 873 def cb(*args, **kwargs):
874 874 callbackargs.append((args, kwargs))
875 875
876 876 def linkmapper(node):
877 877 return 0
878 878
879 879 with self._maketransactionfn() as tr:
880 880 nodes = f.addgroup([], None, tr, addrevisioncb=cb)
881 881
882 882 self.assertEqual(nodes, [])
883 883 self.assertEqual(callbackargs, [])
884 884 self.assertEqual(len(f), 0)
885 885
886 886 fulltext0 = b'foo'
887 887 delta0 = mdiff.trivialdiffheader(len(fulltext0)) + fulltext0
888 888
889 889 deltas = [
890 890 (b'\x01' * 20, nullid, nullid, nullid, nullid, delta0, 0),
891 891 ]
892 892
893 893 with self._maketransactionfn() as tr:
894 with self.assertRaises(error.RevlogError):
894 with self.assertRaises(error.StorageError):
895 895 f.addgroup(deltas, linkmapper, tr, addrevisioncb=cb)
896 896
897 897 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
898 898
899 899 f = self._makefilefn()
900 900
901 901 deltas = [
902 902 (node0, nullid, nullid, nullid, nullid, delta0, 0),
903 903 ]
904 904
905 905 with self._maketransactionfn() as tr:
906 906 nodes = f.addgroup(deltas, linkmapper, tr, addrevisioncb=cb)
907 907
908 908 self.assertEqual(nodes, [
909 909 b'\x49\xd8\xcb\xb1\x5c\xe2\x57\x92\x04\x47'
910 910 b'\x00\x6b\x46\x97\x8b\x7a\xf9\x80\xa9\x79'])
911 911
912 912 self.assertEqual(len(callbackargs), 1)
913 913 self.assertEqual(callbackargs[0][0][1], nodes[0])
914 914
915 915 self.assertEqual(list(f.revs()), [0])
916 916 self.assertEqual(f.rev(nodes[0]), 0)
917 917 self.assertEqual(f.node(0), nodes[0])
918 918
919 919 def testaddgroupmultiple(self):
920 920 f = self._makefilefn()
921 921
922 922 fulltexts = [
923 923 b'foo',
924 924 b'bar',
925 925 b'x' * 1024,
926 926 ]
927 927
928 928 nodes = []
929 929 with self._maketransactionfn() as tr:
930 930 for fulltext in fulltexts:
931 931 nodes.append(f.add(fulltext, None, tr, 0, nullid, nullid))
932 932
933 933 f = self._makefilefn()
934 934 deltas = []
935 935 for i, fulltext in enumerate(fulltexts):
936 936 delta = mdiff.trivialdiffheader(len(fulltext)) + fulltext
937 937
938 938 deltas.append((nodes[i], nullid, nullid, nullid, nullid, delta, 0))
939 939
940 940 with self._maketransactionfn() as tr:
941 941 self.assertEqual(f.addgroup(deltas, lambda x: 0, tr), nodes)
942 942
943 943 self.assertEqual(len(f), len(deltas))
944 944 self.assertEqual(list(f.revs()), [0, 1, 2])
945 945 self.assertEqual(f.rev(nodes[0]), 0)
946 946 self.assertEqual(f.rev(nodes[1]), 1)
947 947 self.assertEqual(f.rev(nodes[2]), 2)
948 948 self.assertEqual(f.node(0), nodes[0])
949 949 self.assertEqual(f.node(1), nodes[1])
950 950 self.assertEqual(f.node(2), nodes[2])
951 951
952 952 def makeifileindextests(makefilefn, maketransactionfn):
953 953 """Create a unittest.TestCase class suitable for testing file storage.
954 954
955 955 ``makefilefn`` is a callable which receives the test case as an
956 956 argument and returns an object implementing the ``ifilestorage`` interface.
957 957
958 958 ``maketransactionfn`` is a callable which receives the test case as an
959 959 argument and returns a transaction object.
960 960
961 961 Returns a type that is a ``unittest.TestCase`` that can be used for
962 962 testing the object implementing the file storage interface. Simply
963 963 assign the returned value to a module-level attribute and a test loader
964 964 should find and run it automatically.
965 965 """
966 966 d = {
967 967 r'_makefilefn': makefilefn,
968 968 r'_maketransactionfn': maketransactionfn,
969 969 }
970 970 return type(r'ifileindextests', (ifileindextests,), d)
971 971
972 972 def makeifiledatatests(makefilefn, maketransactionfn):
973 973 d = {
974 974 r'_makefilefn': makefilefn,
975 975 r'_maketransactionfn': maketransactionfn,
976 976 }
977 977 return type(r'ifiledatatests', (ifiledatatests,), d)
978 978
979 979 def makeifilemutationtests(makefilefn, maketransactionfn):
980 980 d = {
981 981 r'_makefilefn': makefilefn,
982 982 r'_maketransactionfn': maketransactionfn,
983 983 }
984 984 return type(r'ifilemutationtests', (ifilemutationtests,), d)
General Comments 0
You need to be logged in to leave comments. Login now