##// END OF EJS Templates
filelog: stop proxying deltaparent() (API)...
Gregory Szorc -
r39912:a269fa55 default
parent child Browse files
Show More
@@ -1,258 +1,255 b''
1 1 # filelog.py - file history class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from . import (
11 11 error,
12 12 repository,
13 13 revlog,
14 14 )
15 15 from .utils import (
16 16 interfaceutil,
17 17 )
18 18
19 19 @interfaceutil.implementer(repository.ifilestorage)
20 20 class filelog(object):
21 21 def __init__(self, opener, path):
22 22 self._revlog = revlog.revlog(opener,
23 23 '/'.join(('data', path + '.i')),
24 24 censorable=True)
25 25 # Full name of the user visible file, relative to the repository root.
26 26 # Used by LFS.
27 27 self._revlog.filename = path
28 28
29 29 def __len__(self):
30 30 return len(self._revlog)
31 31
32 32 def __iter__(self):
33 33 return self._revlog.__iter__()
34 34
35 35 def revs(self, start=0, stop=None):
36 36 return self._revlog.revs(start=start, stop=stop)
37 37
38 38 def parents(self, node):
39 39 return self._revlog.parents(node)
40 40
41 41 def parentrevs(self, rev):
42 42 return self._revlog.parentrevs(rev)
43 43
44 44 def rev(self, node):
45 45 return self._revlog.rev(node)
46 46
47 47 def node(self, rev):
48 48 return self._revlog.node(rev)
49 49
50 50 def lookup(self, node):
51 51 return self._revlog.lookup(node)
52 52
53 53 def linkrev(self, rev):
54 54 return self._revlog.linkrev(rev)
55 55
56 56 def commonancestorsheads(self, node1, node2):
57 57 return self._revlog.commonancestorsheads(node1, node2)
58 58
59 59 # Used by dagop.blockdescendants().
60 60 def descendants(self, revs):
61 61 return self._revlog.descendants(revs)
62 62
63 63 def heads(self, start=None, stop=None):
64 64 return self._revlog.heads(start, stop)
65 65
66 66 # Used by hgweb, children extension.
67 67 def children(self, node):
68 68 return self._revlog.children(node)
69 69
70 def deltaparent(self, rev):
71 return self._revlog.deltaparent(rev)
72
73 70 def iscensored(self, rev):
74 71 return self._revlog.iscensored(rev)
75 72
76 73 # Might be unused.
77 74 def checkhash(self, text, node, p1=None, p2=None, rev=None):
78 75 return self._revlog.checkhash(text, node, p1=p1, p2=p2, rev=rev)
79 76
80 77 def revision(self, node, _df=None, raw=False):
81 78 return self._revlog.revision(node, _df=_df, raw=raw)
82 79
83 80 def revdiff(self, rev1, rev2):
84 81 return self._revlog.revdiff(rev1, rev2)
85 82
86 83 def emitrevisions(self, nodes, nodesorder=None,
87 84 revisiondata=False, assumehaveparentrevisions=False,
88 85 deltaprevious=False):
89 86 return self._revlog.emitrevisions(
90 87 nodes, nodesorder=nodesorder, revisiondata=revisiondata,
91 88 assumehaveparentrevisions=assumehaveparentrevisions,
92 89 deltaprevious=deltaprevious)
93 90
94 91 def addrevision(self, revisiondata, transaction, linkrev, p1, p2,
95 92 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS,
96 93 cachedelta=None):
97 94 return self._revlog.addrevision(revisiondata, transaction, linkrev,
98 95 p1, p2, node=node, flags=flags,
99 96 cachedelta=cachedelta)
100 97
101 98 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
102 99 return self._revlog.addgroup(deltas, linkmapper, transaction,
103 100 addrevisioncb=addrevisioncb)
104 101
105 102 def getstrippoint(self, minlink):
106 103 return self._revlog.getstrippoint(minlink)
107 104
108 105 def strip(self, minlink, transaction):
109 106 return self._revlog.strip(minlink, transaction)
110 107
111 108 def censorrevision(self, tr, node, tombstone=b''):
112 109 return self._revlog.censorrevision(node, tombstone=tombstone)
113 110
114 111 def files(self):
115 112 return self._revlog.files()
116 113
117 114 def read(self, node):
118 115 t = self.revision(node)
119 116 if not t.startswith('\1\n'):
120 117 return t
121 118 s = t.index('\1\n', 2)
122 119 return t[s + 2:]
123 120
124 121 def add(self, text, meta, transaction, link, p1=None, p2=None):
125 122 if meta or text.startswith('\1\n'):
126 123 text = revlog.packmeta(meta, text)
127 124 return self.addrevision(text, transaction, link, p1, p2)
128 125
129 126 def renamed(self, node):
130 127 if self.parents(node)[0] != revlog.nullid:
131 128 return False
132 129 t = self.revision(node)
133 130 m = revlog.parsemeta(t)[0]
134 131 # copy and copyrev occur in pairs. In rare cases due to bugs,
135 132 # one can occur without the other.
136 133 if m and "copy" in m and "copyrev" in m:
137 134 return (m["copy"], revlog.bin(m["copyrev"]))
138 135 return False
139 136
140 137 def size(self, rev):
141 138 """return the size of a given revision"""
142 139
143 140 # for revisions with renames, we have to go the slow way
144 141 node = self.node(rev)
145 142 if self.renamed(node):
146 143 return len(self.read(node))
147 144 if self.iscensored(rev):
148 145 return 0
149 146
150 147 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
151 148 return self._revlog.size(rev)
152 149
153 150 def cmp(self, node, text):
154 151 """compare text with a given file revision
155 152
156 153 returns True if text is different than what is stored.
157 154 """
158 155
159 156 t = text
160 157 if text.startswith('\1\n'):
161 158 t = '\1\n\1\n' + text
162 159
163 160 samehashes = not self._revlog.cmp(node, t)
164 161 if samehashes:
165 162 return False
166 163
167 164 # censored files compare against the empty file
168 165 if self.iscensored(self.rev(node)):
169 166 return text != ''
170 167
171 168 # renaming a file produces a different hash, even if the data
172 169 # remains unchanged. Check if it's the case (slow):
173 170 if self.renamed(node):
174 171 t2 = self.read(node)
175 172 return t2 != text
176 173
177 174 return True
178 175
179 176 def verifyintegrity(self, state):
180 177 return self._revlog.verifyintegrity(state)
181 178
182 179 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
183 180 revisionscount=False, trackedsize=False,
184 181 storedsize=False):
185 182 return self._revlog.storageinfo(
186 183 exclusivefiles=exclusivefiles, sharedfiles=sharedfiles,
187 184 revisionscount=revisionscount, trackedsize=trackedsize,
188 185 storedsize=storedsize)
189 186
190 187 # TODO these aren't part of the interface and aren't internal methods.
191 188 # Callers should be fixed to not use them.
192 189
193 190 # Used by bundlefilelog, unionfilelog.
194 191 @property
195 192 def indexfile(self):
196 193 return self._revlog.indexfile
197 194
198 195 @indexfile.setter
199 196 def indexfile(self, value):
200 197 self._revlog.indexfile = value
201 198
202 199 # Used by repo upgrade.
203 200 def clone(self, tr, destrevlog, **kwargs):
204 201 if not isinstance(destrevlog, filelog):
205 202 raise error.ProgrammingError('expected filelog to clone()')
206 203
207 204 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
208 205
209 206 class narrowfilelog(filelog):
210 207 """Filelog variation to be used with narrow stores."""
211 208
212 209 def __init__(self, opener, path, narrowmatch):
213 210 super(narrowfilelog, self).__init__(opener, path)
214 211 self._narrowmatch = narrowmatch
215 212
216 213 def renamed(self, node):
217 214 res = super(narrowfilelog, self).renamed(node)
218 215
219 216 # Renames that come from outside the narrowspec are problematic
220 217 # because we may lack the base text for the rename. This can result
221 218 # in code attempting to walk the ancestry or compute a diff
222 219 # encountering a missing revision. We address this by silently
223 220 # removing rename metadata if the source file is outside the
224 221 # narrow spec.
225 222 #
226 223 # A better solution would be to see if the base revision is available,
227 224 # rather than assuming it isn't.
228 225 #
229 226 # An even better solution would be to teach all consumers of rename
230 227 # metadata that the base revision may not be available.
231 228 #
232 229 # TODO consider better ways of doing this.
233 230 if res and not self._narrowmatch(res[0]):
234 231 return None
235 232
236 233 return res
237 234
238 235 def size(self, rev):
239 236 # Because we have a custom renamed() that may lie, we need to call
240 237 # the base renamed() to report accurate results.
241 238 node = self.node(rev)
242 239 if super(narrowfilelog, self).renamed(node):
243 240 return len(self.read(node))
244 241 else:
245 242 return super(narrowfilelog, self).size(rev)
246 243
247 244 def cmp(self, node, text):
248 245 different = super(narrowfilelog, self).cmp(node, text)
249 246
250 247 # Because renamed() may lie, we may get false positives for
251 248 # different content. Check for this by comparing against the original
252 249 # renamed() implementation.
253 250 if different:
254 251 if super(narrowfilelog, self).renamed(node):
255 252 t2 = self.read(node)
256 253 return t2 != text
257 254
258 255 return different
@@ -1,1680 +1,1677 b''
1 1 # repository.py - Interfaces and base classes for repositories and peers.
2 2 #
3 3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from .i18n import _
11 11 from . import (
12 12 error,
13 13 )
14 14 from .utils import (
15 15 interfaceutil,
16 16 )
17 17
18 18 # When narrowing is finalized and no longer subject to format changes,
19 19 # we should move this to just "narrow" or similar.
20 20 NARROW_REQUIREMENT = 'narrowhg-experimental'
21 21
22 22 # Local repository feature string.
23 23
24 24 # Revlogs are being used for file storage.
25 25 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
26 26 # The storage part of the repository is shared from an external source.
27 27 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
28 28 # LFS supported for backing file storage.
29 29 REPO_FEATURE_LFS = b'lfs'
30 30
31 31 class ipeerconnection(interfaceutil.Interface):
32 32 """Represents a "connection" to a repository.
33 33
34 34 This is the base interface for representing a connection to a repository.
35 35 It holds basic properties and methods applicable to all peer types.
36 36
37 37 This is not a complete interface definition and should not be used
38 38 outside of this module.
39 39 """
40 40 ui = interfaceutil.Attribute("""ui.ui instance""")
41 41
42 42 def url():
43 43 """Returns a URL string representing this peer.
44 44
45 45 Currently, implementations expose the raw URL used to construct the
46 46 instance. It may contain credentials as part of the URL. The
47 47 expectations of the value aren't well-defined and this could lead to
48 48 data leakage.
49 49
50 50 TODO audit/clean consumers and more clearly define the contents of this
51 51 value.
52 52 """
53 53
54 54 def local():
55 55 """Returns a local repository instance.
56 56
57 57 If the peer represents a local repository, returns an object that
58 58 can be used to interface with it. Otherwise returns ``None``.
59 59 """
60 60
61 61 def peer():
62 62 """Returns an object conforming to this interface.
63 63
64 64 Most implementations will ``return self``.
65 65 """
66 66
67 67 def canpush():
68 68 """Returns a boolean indicating if this peer can be pushed to."""
69 69
70 70 def close():
71 71 """Close the connection to this peer.
72 72
73 73 This is called when the peer will no longer be used. Resources
74 74 associated with the peer should be cleaned up.
75 75 """
76 76
77 77 class ipeercapabilities(interfaceutil.Interface):
78 78 """Peer sub-interface related to capabilities."""
79 79
80 80 def capable(name):
81 81 """Determine support for a named capability.
82 82
83 83 Returns ``False`` if capability not supported.
84 84
85 85 Returns ``True`` if boolean capability is supported. Returns a string
86 86 if capability support is non-boolean.
87 87
88 88 Capability strings may or may not map to wire protocol capabilities.
89 89 """
90 90
91 91 def requirecap(name, purpose):
92 92 """Require a capability to be present.
93 93
94 94 Raises a ``CapabilityError`` if the capability isn't present.
95 95 """
96 96
97 97 class ipeercommands(interfaceutil.Interface):
98 98 """Client-side interface for communicating over the wire protocol.
99 99
100 100 This interface is used as a gateway to the Mercurial wire protocol.
101 101 methods commonly call wire protocol commands of the same name.
102 102 """
103 103
104 104 def branchmap():
105 105 """Obtain heads in named branches.
106 106
107 107 Returns a dict mapping branch name to an iterable of nodes that are
108 108 heads on that branch.
109 109 """
110 110
111 111 def capabilities():
112 112 """Obtain capabilities of the peer.
113 113
114 114 Returns a set of string capabilities.
115 115 """
116 116
117 117 def clonebundles():
118 118 """Obtains the clone bundles manifest for the repo.
119 119
120 120 Returns the manifest as unparsed bytes.
121 121 """
122 122
123 123 def debugwireargs(one, two, three=None, four=None, five=None):
124 124 """Used to facilitate debugging of arguments passed over the wire."""
125 125
126 126 def getbundle(source, **kwargs):
127 127 """Obtain remote repository data as a bundle.
128 128
129 129 This command is how the bulk of repository data is transferred from
130 130 the peer to the local repository
131 131
132 132 Returns a generator of bundle data.
133 133 """
134 134
135 135 def heads():
136 136 """Determine all known head revisions in the peer.
137 137
138 138 Returns an iterable of binary nodes.
139 139 """
140 140
141 141 def known(nodes):
142 142 """Determine whether multiple nodes are known.
143 143
144 144 Accepts an iterable of nodes whose presence to check for.
145 145
146 146 Returns an iterable of booleans indicating of the corresponding node
147 147 at that index is known to the peer.
148 148 """
149 149
150 150 def listkeys(namespace):
151 151 """Obtain all keys in a pushkey namespace.
152 152
153 153 Returns an iterable of key names.
154 154 """
155 155
156 156 def lookup(key):
157 157 """Resolve a value to a known revision.
158 158
159 159 Returns a binary node of the resolved revision on success.
160 160 """
161 161
162 162 def pushkey(namespace, key, old, new):
163 163 """Set a value using the ``pushkey`` protocol.
164 164
165 165 Arguments correspond to the pushkey namespace and key to operate on and
166 166 the old and new values for that key.
167 167
168 168 Returns a string with the peer result. The value inside varies by the
169 169 namespace.
170 170 """
171 171
172 172 def stream_out():
173 173 """Obtain streaming clone data.
174 174
175 175 Successful result should be a generator of data chunks.
176 176 """
177 177
178 178 def unbundle(bundle, heads, url):
179 179 """Transfer repository data to the peer.
180 180
181 181 This is how the bulk of data during a push is transferred.
182 182
183 183 Returns the integer number of heads added to the peer.
184 184 """
185 185
186 186 class ipeerlegacycommands(interfaceutil.Interface):
187 187 """Interface for implementing support for legacy wire protocol commands.
188 188
189 189 Wire protocol commands transition to legacy status when they are no longer
190 190 used by modern clients. To facilitate identifying which commands are
191 191 legacy, the interfaces are split.
192 192 """
193 193
194 194 def between(pairs):
195 195 """Obtain nodes between pairs of nodes.
196 196
197 197 ``pairs`` is an iterable of node pairs.
198 198
199 199 Returns an iterable of iterables of nodes corresponding to each
200 200 requested pair.
201 201 """
202 202
203 203 def branches(nodes):
204 204 """Obtain ancestor changesets of specific nodes back to a branch point.
205 205
206 206 For each requested node, the peer finds the first ancestor node that is
207 207 a DAG root or is a merge.
208 208
209 209 Returns an iterable of iterables with the resolved values for each node.
210 210 """
211 211
212 212 def changegroup(nodes, source):
213 213 """Obtain a changegroup with data for descendants of specified nodes."""
214 214
215 215 def changegroupsubset(bases, heads, source):
216 216 pass
217 217
218 218 class ipeercommandexecutor(interfaceutil.Interface):
219 219 """Represents a mechanism to execute remote commands.
220 220
221 221 This is the primary interface for requesting that wire protocol commands
222 222 be executed. Instances of this interface are active in a context manager
223 223 and have a well-defined lifetime. When the context manager exits, all
224 224 outstanding requests are waited on.
225 225 """
226 226
227 227 def callcommand(name, args):
228 228 """Request that a named command be executed.
229 229
230 230 Receives the command name and a dictionary of command arguments.
231 231
232 232 Returns a ``concurrent.futures.Future`` that will resolve to the
233 233 result of that command request. That exact value is left up to
234 234 the implementation and possibly varies by command.
235 235
236 236 Not all commands can coexist with other commands in an executor
237 237 instance: it depends on the underlying wire protocol transport being
238 238 used and the command itself.
239 239
240 240 Implementations MAY call ``sendcommands()`` automatically if the
241 241 requested command can not coexist with other commands in this executor.
242 242
243 243 Implementations MAY call ``sendcommands()`` automatically when the
244 244 future's ``result()`` is called. So, consumers using multiple
245 245 commands with an executor MUST ensure that ``result()`` is not called
246 246 until all command requests have been issued.
247 247 """
248 248
249 249 def sendcommands():
250 250 """Trigger submission of queued command requests.
251 251
252 252 Not all transports submit commands as soon as they are requested to
253 253 run. When called, this method forces queued command requests to be
254 254 issued. It will no-op if all commands have already been sent.
255 255
256 256 When called, no more new commands may be issued with this executor.
257 257 """
258 258
259 259 def close():
260 260 """Signal that this command request is finished.
261 261
262 262 When called, no more new commands may be issued. All outstanding
263 263 commands that have previously been issued are waited on before
264 264 returning. This not only includes waiting for the futures to resolve,
265 265 but also waiting for all response data to arrive. In other words,
266 266 calling this waits for all on-wire state for issued command requests
267 267 to finish.
268 268
269 269 When used as a context manager, this method is called when exiting the
270 270 context manager.
271 271
272 272 This method may call ``sendcommands()`` if there are buffered commands.
273 273 """
274 274
275 275 class ipeerrequests(interfaceutil.Interface):
276 276 """Interface for executing commands on a peer."""
277 277
278 278 def commandexecutor():
279 279 """A context manager that resolves to an ipeercommandexecutor.
280 280
281 281 The object this resolves to can be used to issue command requests
282 282 to the peer.
283 283
284 284 Callers should call its ``callcommand`` method to issue command
285 285 requests.
286 286
287 287 A new executor should be obtained for each distinct set of commands
288 288 (possibly just a single command) that the consumer wants to execute
289 289 as part of a single operation or round trip. This is because some
290 290 peers are half-duplex and/or don't support persistent connections.
291 291 e.g. in the case of HTTP peers, commands sent to an executor represent
292 292 a single HTTP request. While some peers may support multiple command
293 293 sends over the wire per executor, consumers need to code to the least
294 294 capable peer. So it should be assumed that command executors buffer
295 295 called commands until they are told to send them and that each
296 296 command executor could result in a new connection or wire-level request
297 297 being issued.
298 298 """
299 299
300 300 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
301 301 """Unified interface for peer repositories.
302 302
303 303 All peer instances must conform to this interface.
304 304 """
305 305
306 306 @interfaceutil.implementer(ipeerbase)
307 307 class peer(object):
308 308 """Base class for peer repositories."""
309 309
310 310 def capable(self, name):
311 311 caps = self.capabilities()
312 312 if name in caps:
313 313 return True
314 314
315 315 name = '%s=' % name
316 316 for cap in caps:
317 317 if cap.startswith(name):
318 318 return cap[len(name):]
319 319
320 320 return False
321 321
322 322 def requirecap(self, name, purpose):
323 323 if self.capable(name):
324 324 return
325 325
326 326 raise error.CapabilityError(
327 327 _('cannot %s; remote repository does not support the %r '
328 328 'capability') % (purpose, name))
329 329
330 330 class iverifyproblem(interfaceutil.Interface):
331 331 """Represents a problem with the integrity of the repository.
332 332
333 333 Instances of this interface are emitted to describe an integrity issue
334 334 with a repository (e.g. corrupt storage, missing data, etc).
335 335
336 336 Instances are essentially messages associated with severity.
337 337 """
338 338 warning = interfaceutil.Attribute(
339 339 """Message indicating a non-fatal problem.""")
340 340
341 341 error = interfaceutil.Attribute(
342 342 """Message indicating a fatal problem.""")
343 343
344 344 node = interfaceutil.Attribute(
345 345 """Revision encountering the problem.
346 346
347 347 ``None`` means the problem doesn't apply to a single revision.
348 348 """)
349 349
350 350 class irevisiondelta(interfaceutil.Interface):
351 351 """Represents a delta between one revision and another.
352 352
353 353 Instances convey enough information to allow a revision to be exchanged
354 354 with another repository.
355 355
356 356 Instances represent the fulltext revision data or a delta against
357 357 another revision. Therefore the ``revision`` and ``delta`` attributes
358 358 are mutually exclusive.
359 359
360 360 Typically used for changegroup generation.
361 361 """
362 362
363 363 node = interfaceutil.Attribute(
364 364 """20 byte node of this revision.""")
365 365
366 366 p1node = interfaceutil.Attribute(
367 367 """20 byte node of 1st parent of this revision.""")
368 368
369 369 p2node = interfaceutil.Attribute(
370 370 """20 byte node of 2nd parent of this revision.""")
371 371
372 372 linknode = interfaceutil.Attribute(
373 373 """20 byte node of the changelog revision this node is linked to.""")
374 374
375 375 flags = interfaceutil.Attribute(
376 376 """2 bytes of integer flags that apply to this revision.""")
377 377
378 378 basenode = interfaceutil.Attribute(
379 379 """20 byte node of the revision this data is a delta against.
380 380
381 381 ``nullid`` indicates that the revision is a full revision and not
382 382 a delta.
383 383 """)
384 384
385 385 baserevisionsize = interfaceutil.Attribute(
386 386 """Size of base revision this delta is against.
387 387
388 388 May be ``None`` if ``basenode`` is ``nullid``.
389 389 """)
390 390
391 391 revision = interfaceutil.Attribute(
392 392 """Raw fulltext of revision data for this node.""")
393 393
394 394 delta = interfaceutil.Attribute(
395 395 """Delta between ``basenode`` and ``node``.
396 396
397 397 Stored in the bdiff delta format.
398 398 """)
399 399
400 400 class ifilerevisionssequence(interfaceutil.Interface):
401 401 """Contains index data for all revisions of a file.
402 402
403 403 Types implementing this behave like lists of tuples. The index
404 404 in the list corresponds to the revision number. The values contain
405 405 index metadata.
406 406
407 407 The *null* revision (revision number -1) is always the last item
408 408 in the index.
409 409 """
410 410
411 411 def __len__():
412 412 """The total number of revisions."""
413 413
414 414 def __getitem__(rev):
415 415 """Returns the object having a specific revision number.
416 416
417 417 Returns an 8-tuple with the following fields:
418 418
419 419 offset+flags
420 420 Contains the offset and flags for the revision. 64-bit unsigned
421 421 integer where first 6 bytes are the offset and the next 2 bytes
422 422 are flags. The offset can be 0 if it is not used by the store.
423 423 compressed size
424 424 Size of the revision data in the store. It can be 0 if it isn't
425 425 needed by the store.
426 426 uncompressed size
427 427 Fulltext size. It can be 0 if it isn't needed by the store.
428 428 base revision
429 429 Revision number of revision the delta for storage is encoded
430 430 against. -1 indicates not encoded against a base revision.
431 431 link revision
432 432 Revision number of changelog revision this entry is related to.
433 433 p1 revision
434 434 Revision number of 1st parent. -1 if no 1st parent.
435 435 p2 revision
436 436 Revision number of 2nd parent. -1 if no 1st parent.
437 437 node
438 438 Binary node value for this revision number.
439 439
440 440 Negative values should index off the end of the sequence. ``-1``
441 441 should return the null revision. ``-2`` should return the most
442 442 recent revision.
443 443 """
444 444
445 445 def __contains__(rev):
446 446 """Whether a revision number exists."""
447 447
448 448 def insert(self, i, entry):
449 449 """Add an item to the index at specific revision."""
450 450
451 451 class ifileindex(interfaceutil.Interface):
452 452 """Storage interface for index data of a single file.
453 453
454 454 File storage data is divided into index metadata and data storage.
455 455 This interface defines the index portion of the interface.
456 456
457 457 The index logically consists of:
458 458
459 459 * A mapping between revision numbers and nodes.
460 460 * DAG data (storing and querying the relationship between nodes).
461 461 * Metadata to facilitate storage.
462 462 """
463 463 def __len__():
464 464 """Obtain the number of revisions stored for this file."""
465 465
466 466 def __iter__():
467 467 """Iterate over revision numbers for this file."""
468 468
469 469 def revs(start=0, stop=None):
470 470 """Iterate over revision numbers for this file, with control."""
471 471
472 472 def parents(node):
473 473 """Returns a 2-tuple of parent nodes for a revision.
474 474
475 475 Values will be ``nullid`` if the parent is empty.
476 476 """
477 477
478 478 def parentrevs(rev):
479 479 """Like parents() but operates on revision numbers."""
480 480
481 481 def rev(node):
482 482 """Obtain the revision number given a node.
483 483
484 484 Raises ``error.LookupError`` if the node is not known.
485 485 """
486 486
487 487 def node(rev):
488 488 """Obtain the node value given a revision number.
489 489
490 490 Raises ``IndexError`` if the node is not known.
491 491 """
492 492
493 493 def lookup(node):
494 494 """Attempt to resolve a value to a node.
495 495
496 496 Value can be a binary node, hex node, revision number, or a string
497 497 that can be converted to an integer.
498 498
499 499 Raises ``error.LookupError`` if a node could not be resolved.
500 500 """
501 501
502 502 def linkrev(rev):
503 503 """Obtain the changeset revision number a revision is linked to."""
504 504
505 505 def iscensored(rev):
506 506 """Return whether a revision's content has been censored."""
507 507
508 508 def commonancestorsheads(node1, node2):
509 509 """Obtain an iterable of nodes containing heads of common ancestors.
510 510
511 511 See ``ancestor.commonancestorsheads()``.
512 512 """
513 513
514 514 def descendants(revs):
515 515 """Obtain descendant revision numbers for a set of revision numbers.
516 516
517 517 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
518 518 """
519 519
520 520 def heads(start=None, stop=None):
521 521 """Obtain a list of nodes that are DAG heads, with control.
522 522
523 523 The set of revisions examined can be limited by specifying
524 524 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
525 525 iterable of nodes. DAG traversal starts at earlier revision
526 526 ``start`` and iterates forward until any node in ``stop`` is
527 527 encountered.
528 528 """
529 529
530 530 def children(node):
531 531 """Obtain nodes that are children of a node.
532 532
533 533 Returns a list of nodes.
534 534 """
535 535
536 def deltaparent(rev):
537 """"Return the revision that is a suitable parent to delta against."""
538
539 536 class ifiledata(interfaceutil.Interface):
540 537 """Storage interface for data storage of a specific file.
541 538
542 539 This complements ``ifileindex`` and provides an interface for accessing
543 540 data for a tracked file.
544 541 """
545 542 def size(rev):
546 543 """Obtain the fulltext size of file data.
547 544
548 545 Any metadata is excluded from size measurements.
549 546 """
550 547
551 548 def checkhash(fulltext, node, p1=None, p2=None, rev=None):
552 549 """Validate the stored hash of a given fulltext and node.
553 550
554 551 Raises ``error.StorageError`` is hash validation fails.
555 552 """
556 553
557 554 def revision(node, raw=False):
558 555 """"Obtain fulltext data for a node.
559 556
560 557 By default, any storage transformations are applied before the data
561 558 is returned. If ``raw`` is True, non-raw storage transformations
562 559 are not applied.
563 560
564 561 The fulltext data may contain a header containing metadata. Most
565 562 consumers should use ``read()`` to obtain the actual file data.
566 563 """
567 564
568 565 def read(node):
569 566 """Resolve file fulltext data.
570 567
571 568 This is similar to ``revision()`` except any metadata in the data
572 569 headers is stripped.
573 570 """
574 571
575 572 def renamed(node):
576 573 """Obtain copy metadata for a node.
577 574
578 575 Returns ``False`` if no copy metadata is stored or a 2-tuple of
579 576 (path, node) from which this revision was copied.
580 577 """
581 578
582 579 def cmp(node, fulltext):
583 580 """Compare fulltext to another revision.
584 581
585 582 Returns True if the fulltext is different from what is stored.
586 583
587 584 This takes copy metadata into account.
588 585
589 586 TODO better document the copy metadata and censoring logic.
590 587 """
591 588
592 589 def revdiff(rev1, rev2):
593 590 """Obtain a delta between two revision numbers.
594 591
595 592 Operates on raw data in the store (``revision(node, raw=True)``).
596 593
597 594 The returned data is the result of ``bdiff.bdiff`` on the raw
598 595 revision data.
599 596 """
600 597
601 598 def emitrevisions(nodes,
602 599 nodesorder=None,
603 600 revisiondata=False,
604 601 assumehaveparentrevisions=False,
605 602 deltaprevious=False):
606 603 """Produce ``irevisiondelta`` for revisions.
607 604
608 605 Given an iterable of nodes, emits objects conforming to the
609 606 ``irevisiondelta`` interface that describe revisions in storage.
610 607
611 608 This method is a generator.
612 609
613 610 The input nodes may be unordered. Implementations must ensure that a
614 611 node's parents are emitted before the node itself. Transitively, this
615 612 means that a node may only be emitted once all its ancestors in
616 613 ``nodes`` have also been emitted.
617 614
618 615 By default, emits "index" data (the ``node``, ``p1node``, and
619 616 ``p2node`` attributes). If ``revisiondata`` is set, revision data
620 617 will also be present on the emitted objects.
621 618
622 619 With default argument values, implementations can choose to emit
623 620 either fulltext revision data or a delta. When emitting deltas,
624 621 implementations must consider whether the delta's base revision
625 622 fulltext is available to the receiver.
626 623
627 624 The base revision fulltext is guaranteed to be available if any of
628 625 the following are met:
629 626
630 627 * Its fulltext revision was emitted by this method call.
631 628 * A delta for that revision was emitted by this method call.
632 629 * ``assumehaveparentrevisions`` is True and the base revision is a
633 630 parent of the node.
634 631
635 632 ``nodesorder`` can be used to control the order that revisions are
636 633 emitted. By default, revisions can be reordered as long as they are
637 634 in DAG topological order (see above). If the value is ``nodes``,
638 635 the iteration order from ``nodes`` should be used. If the value is
639 636 ``storage``, then the native order from the backing storage layer
640 637 is used. (Not all storage layers will have strong ordering and behavior
641 638 of this mode is storage-dependent.) ``nodes`` ordering can force
642 639 revisions to be emitted before their ancestors, so consumers should
643 640 use it with care.
644 641
645 642 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
646 643 be set and it is the caller's responsibility to resolve it, if needed.
647 644
648 645 If ``deltaprevious`` is True and revision data is requested, all
649 646 revision data should be emitted as deltas against the revision
650 647 emitted just prior. The initial revision should be a delta against
651 648 its 1st parent.
652 649 """
653 650
654 651 class ifilemutation(interfaceutil.Interface):
655 652 """Storage interface for mutation events of a tracked file."""
656 653
657 654 def add(filedata, meta, transaction, linkrev, p1, p2):
658 655 """Add a new revision to the store.
659 656
660 657 Takes file data, dictionary of metadata, a transaction, linkrev,
661 658 and parent nodes.
662 659
663 660 Returns the node that was added.
664 661
665 662 May no-op if a revision matching the supplied data is already stored.
666 663 """
667 664
668 665 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
669 666 flags=0, cachedelta=None):
670 667 """Add a new revision to the store.
671 668
672 669 This is similar to ``add()`` except it operates at a lower level.
673 670
674 671 The data passed in already contains a metadata header, if any.
675 672
676 673 ``node`` and ``flags`` can be used to define the expected node and
677 674 the flags to use with storage.
678 675
679 676 ``add()`` is usually called when adding files from e.g. the working
680 677 directory. ``addrevision()`` is often called by ``add()`` and for
681 678 scenarios where revision data has already been computed, such as when
682 679 applying raw data from a peer repo.
683 680 """
684 681
685 682 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
686 683 """Process a series of deltas for storage.
687 684
688 685 ``deltas`` is an iterable of 7-tuples of
689 686 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
690 687 to add.
691 688
692 689 The ``delta`` field contains ``mpatch`` data to apply to a base
693 690 revision, identified by ``deltabase``. The base node can be
694 691 ``nullid``, in which case the header from the delta can be ignored
695 692 and the delta used as the fulltext.
696 693
697 694 ``addrevisioncb`` should be called for each node as it is committed.
698 695
699 696 Returns a list of nodes that were processed. A node will be in the list
700 697 even if it existed in the store previously.
701 698 """
702 699
703 700 def censorrevision(tr, node, tombstone=b''):
704 701 """Remove the content of a single revision.
705 702
706 703 The specified ``node`` will have its content purged from storage.
707 704 Future attempts to access the revision data for this node will
708 705 result in failure.
709 706
710 707 A ``tombstone`` message can optionally be stored. This message may be
711 708 displayed to users when they attempt to access the missing revision
712 709 data.
713 710
714 711 Storage backends may have stored deltas against the previous content
715 712 in this revision. As part of censoring a revision, these storage
716 713 backends are expected to rewrite any internally stored deltas such
717 714 that they no longer reference the deleted content.
718 715 """
719 716
720 717 def getstrippoint(minlink):
721 718 """Find the minimum revision that must be stripped to strip a linkrev.
722 719
723 720 Returns a 2-tuple containing the minimum revision number and a set
724 721 of all revisions numbers that would be broken by this strip.
725 722
726 723 TODO this is highly revlog centric and should be abstracted into
727 724 a higher-level deletion API. ``repair.strip()`` relies on this.
728 725 """
729 726
730 727 def strip(minlink, transaction):
731 728 """Remove storage of items starting at a linkrev.
732 729
733 730 This uses ``getstrippoint()`` to determine the first node to remove.
734 731 Then it effectively truncates storage for all revisions after that.
735 732
736 733 TODO this is highly revlog centric and should be abstracted into a
737 734 higher-level deletion API.
738 735 """
739 736
740 737 class ifilestorage(ifileindex, ifiledata, ifilemutation):
741 738 """Complete storage interface for a single tracked file."""
742 739
743 740 def files():
744 741 """Obtain paths that are backing storage for this file.
745 742
746 743 TODO this is used heavily by verify code and there should probably
747 744 be a better API for that.
748 745 """
749 746
750 747 def storageinfo(exclusivefiles=False, sharedfiles=False,
751 748 revisionscount=False, trackedsize=False,
752 749 storedsize=False):
753 750 """Obtain information about storage for this file's data.
754 751
755 752 Returns a dict describing storage for this tracked path. The keys
756 753 in the dict map to arguments of the same. The arguments are bools
757 754 indicating whether to calculate and obtain that data.
758 755
759 756 exclusivefiles
760 757 Iterable of (vfs, path) describing files that are exclusively
761 758 used to back storage for this tracked path.
762 759
763 760 sharedfiles
764 761 Iterable of (vfs, path) describing files that are used to back
765 762 storage for this tracked path. Those files may also provide storage
766 763 for other stored entities.
767 764
768 765 revisionscount
769 766 Number of revisions available for retrieval.
770 767
771 768 trackedsize
772 769 Total size in bytes of all tracked revisions. This is a sum of the
773 770 length of the fulltext of all revisions.
774 771
775 772 storedsize
776 773 Total size in bytes used to store data for all tracked revisions.
777 774 This is commonly less than ``trackedsize`` due to internal usage
778 775 of deltas rather than fulltext revisions.
779 776
780 777 Not all storage backends may support all queries are have a reasonable
781 778 value to use. In that case, the value should be set to ``None`` and
782 779 callers are expected to handle this special value.
783 780 """
784 781
785 782 def verifyintegrity(state):
786 783 """Verifies the integrity of file storage.
787 784
788 785 ``state`` is a dict holding state of the verifier process. It can be
789 786 used to communicate data between invocations of multiple storage
790 787 primitives.
791 788
792 789 If individual revisions cannot have their revision content resolved,
793 790 the method is expected to set the ``skipread`` key to a set of nodes
794 791 that encountered problems.
795 792
796 793 The method yields objects conforming to the ``iverifyproblem``
797 794 interface.
798 795 """
799 796
800 797 class idirs(interfaceutil.Interface):
801 798 """Interface representing a collection of directories from paths.
802 799
803 800 This interface is essentially a derived data structure representing
804 801 directories from a collection of paths.
805 802 """
806 803
807 804 def addpath(path):
808 805 """Add a path to the collection.
809 806
810 807 All directories in the path will be added to the collection.
811 808 """
812 809
813 810 def delpath(path):
814 811 """Remove a path from the collection.
815 812
816 813 If the removal was the last path in a particular directory, the
817 814 directory is removed from the collection.
818 815 """
819 816
820 817 def __iter__():
821 818 """Iterate over the directories in this collection of paths."""
822 819
823 820 def __contains__(path):
824 821 """Whether a specific directory is in this collection."""
825 822
826 823 class imanifestdict(interfaceutil.Interface):
827 824 """Interface representing a manifest data structure.
828 825
829 826 A manifest is effectively a dict mapping paths to entries. Each entry
830 827 consists of a binary node and extra flags affecting that entry.
831 828 """
832 829
833 830 def __getitem__(path):
834 831 """Returns the binary node value for a path in the manifest.
835 832
836 833 Raises ``KeyError`` if the path does not exist in the manifest.
837 834
838 835 Equivalent to ``self.find(path)[0]``.
839 836 """
840 837
841 838 def find(path):
842 839 """Returns the entry for a path in the manifest.
843 840
844 841 Returns a 2-tuple of (node, flags).
845 842
846 843 Raises ``KeyError`` if the path does not exist in the manifest.
847 844 """
848 845
849 846 def __len__():
850 847 """Return the number of entries in the manifest."""
851 848
852 849 def __nonzero__():
853 850 """Returns True if the manifest has entries, False otherwise."""
854 851
855 852 __bool__ = __nonzero__
856 853
857 854 def __setitem__(path, node):
858 855 """Define the node value for a path in the manifest.
859 856
860 857 If the path is already in the manifest, its flags will be copied to
861 858 the new entry.
862 859 """
863 860
864 861 def __contains__(path):
865 862 """Whether a path exists in the manifest."""
866 863
867 864 def __delitem__(path):
868 865 """Remove a path from the manifest.
869 866
870 867 Raises ``KeyError`` if the path is not in the manifest.
871 868 """
872 869
873 870 def __iter__():
874 871 """Iterate over paths in the manifest."""
875 872
876 873 def iterkeys():
877 874 """Iterate over paths in the manifest."""
878 875
879 876 def keys():
880 877 """Obtain a list of paths in the manifest."""
881 878
882 879 def filesnotin(other, match=None):
883 880 """Obtain the set of paths in this manifest but not in another.
884 881
885 882 ``match`` is an optional matcher function to be applied to both
886 883 manifests.
887 884
888 885 Returns a set of paths.
889 886 """
890 887
891 888 def dirs():
892 889 """Returns an object implementing the ``idirs`` interface."""
893 890
894 891 def hasdir(dir):
895 892 """Returns a bool indicating if a directory is in this manifest."""
896 893
897 894 def matches(match):
898 895 """Generate a new manifest filtered through a matcher.
899 896
900 897 Returns an object conforming to the ``imanifestdict`` interface.
901 898 """
902 899
903 900 def walk(match):
904 901 """Generator of paths in manifest satisfying a matcher.
905 902
906 903 This is equivalent to ``self.matches(match).iterkeys()`` except a new
907 904 manifest object is not created.
908 905
909 906 If the matcher has explicit files listed and they don't exist in
910 907 the manifest, ``match.bad()`` is called for each missing file.
911 908 """
912 909
913 910 def diff(other, match=None, clean=False):
914 911 """Find differences between this manifest and another.
915 912
916 913 This manifest is compared to ``other``.
917 914
918 915 If ``match`` is provided, the two manifests are filtered against this
919 916 matcher and only entries satisfying the matcher are compared.
920 917
921 918 If ``clean`` is True, unchanged files are included in the returned
922 919 object.
923 920
924 921 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
925 922 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
926 923 represents the node and flags for this manifest and ``(node2, flag2)``
927 924 are the same for the other manifest.
928 925 """
929 926
930 927 def setflag(path, flag):
931 928 """Set the flag value for a given path.
932 929
933 930 Raises ``KeyError`` if the path is not already in the manifest.
934 931 """
935 932
936 933 def get(path, default=None):
937 934 """Obtain the node value for a path or a default value if missing."""
938 935
939 936 def flags(path, default=''):
940 937 """Return the flags value for a path or a default value if missing."""
941 938
942 939 def copy():
943 940 """Return a copy of this manifest."""
944 941
945 942 def items():
946 943 """Returns an iterable of (path, node) for items in this manifest."""
947 944
948 945 def iteritems():
949 946 """Identical to items()."""
950 947
951 948 def iterentries():
952 949 """Returns an iterable of (path, node, flags) for this manifest.
953 950
954 951 Similar to ``iteritems()`` except items are a 3-tuple and include
955 952 flags.
956 953 """
957 954
958 955 def text():
959 956 """Obtain the raw data representation for this manifest.
960 957
961 958 Result is used to create a manifest revision.
962 959 """
963 960
964 961 def fastdelta(base, changes):
965 962 """Obtain a delta between this manifest and another given changes.
966 963
967 964 ``base`` in the raw data representation for another manifest.
968 965
969 966 ``changes`` is an iterable of ``(path, to_delete)``.
970 967
971 968 Returns a 2-tuple containing ``bytearray(self.text())`` and the
972 969 delta between ``base`` and this manifest.
973 970 """
974 971
975 972 class imanifestrevisionbase(interfaceutil.Interface):
976 973 """Base interface representing a single revision of a manifest.
977 974
978 975 Should not be used as a primary interface: should always be inherited
979 976 as part of a larger interface.
980 977 """
981 978
982 979 def new():
983 980 """Obtain a new manifest instance.
984 981
985 982 Returns an object conforming to the ``imanifestrevisionwritable``
986 983 interface. The instance will be associated with the same
987 984 ``imanifestlog`` collection as this instance.
988 985 """
989 986
990 987 def copy():
991 988 """Obtain a copy of this manifest instance.
992 989
993 990 Returns an object conforming to the ``imanifestrevisionwritable``
994 991 interface. The instance will be associated with the same
995 992 ``imanifestlog`` collection as this instance.
996 993 """
997 994
998 995 def read():
999 996 """Obtain the parsed manifest data structure.
1000 997
1001 998 The returned object conforms to the ``imanifestdict`` interface.
1002 999 """
1003 1000
1004 1001 class imanifestrevisionstored(imanifestrevisionbase):
1005 1002 """Interface representing a manifest revision committed to storage."""
1006 1003
1007 1004 def node():
1008 1005 """The binary node for this manifest."""
1009 1006
1010 1007 parents = interfaceutil.Attribute(
1011 1008 """List of binary nodes that are parents for this manifest revision."""
1012 1009 )
1013 1010
1014 1011 def readdelta(shallow=False):
1015 1012 """Obtain the manifest data structure representing changes from parent.
1016 1013
1017 1014 This manifest is compared to its 1st parent. A new manifest representing
1018 1015 those differences is constructed.
1019 1016
1020 1017 The returned object conforms to the ``imanifestdict`` interface.
1021 1018 """
1022 1019
1023 1020 def readfast(shallow=False):
1024 1021 """Calls either ``read()`` or ``readdelta()``.
1025 1022
1026 1023 The faster of the two options is called.
1027 1024 """
1028 1025
1029 1026 def find(key):
1030 1027 """Calls self.read().find(key)``.
1031 1028
1032 1029 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1033 1030 """
1034 1031
1035 1032 class imanifestrevisionwritable(imanifestrevisionbase):
1036 1033 """Interface representing a manifest revision that can be committed."""
1037 1034
1038 1035 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1039 1036 """Add this revision to storage.
1040 1037
1041 1038 Takes a transaction object, the changeset revision number it will
1042 1039 be associated with, its parent nodes, and lists of added and
1043 1040 removed paths.
1044 1041
1045 1042 If match is provided, storage can choose not to inspect or write out
1046 1043 items that do not match. Storage is still required to be able to provide
1047 1044 the full manifest in the future for any directories written (these
1048 1045 manifests should not be "narrowed on disk").
1049 1046
1050 1047 Returns the binary node of the created revision.
1051 1048 """
1052 1049
1053 1050 class imanifeststorage(interfaceutil.Interface):
1054 1051 """Storage interface for manifest data."""
1055 1052
1056 1053 tree = interfaceutil.Attribute(
1057 1054 """The path to the directory this manifest tracks.
1058 1055
1059 1056 The empty bytestring represents the root manifest.
1060 1057 """)
1061 1058
1062 1059 index = interfaceutil.Attribute(
1063 1060 """An ``ifilerevisionssequence`` instance.""")
1064 1061
1065 1062 indexfile = interfaceutil.Attribute(
1066 1063 """Path of revlog index file.
1067 1064
1068 1065 TODO this is revlog specific and should not be exposed.
1069 1066 """)
1070 1067
1071 1068 opener = interfaceutil.Attribute(
1072 1069 """VFS opener to use to access underlying files used for storage.
1073 1070
1074 1071 TODO this is revlog specific and should not be exposed.
1075 1072 """)
1076 1073
1077 1074 version = interfaceutil.Attribute(
1078 1075 """Revlog version number.
1079 1076
1080 1077 TODO this is revlog specific and should not be exposed.
1081 1078 """)
1082 1079
1083 1080 _generaldelta = interfaceutil.Attribute(
1084 1081 """Whether generaldelta storage is being used.
1085 1082
1086 1083 TODO this is revlog specific and should not be exposed.
1087 1084 """)
1088 1085
1089 1086 fulltextcache = interfaceutil.Attribute(
1090 1087 """Dict with cache of fulltexts.
1091 1088
1092 1089 TODO this doesn't feel appropriate for the storage interface.
1093 1090 """)
1094 1091
1095 1092 def __len__():
1096 1093 """Obtain the number of revisions stored for this manifest."""
1097 1094
1098 1095 def __iter__():
1099 1096 """Iterate over revision numbers for this manifest."""
1100 1097
1101 1098 def rev(node):
1102 1099 """Obtain the revision number given a binary node.
1103 1100
1104 1101 Raises ``error.LookupError`` if the node is not known.
1105 1102 """
1106 1103
1107 1104 def node(rev):
1108 1105 """Obtain the node value given a revision number.
1109 1106
1110 1107 Raises ``error.LookupError`` if the revision is not known.
1111 1108 """
1112 1109
1113 1110 def lookup(value):
1114 1111 """Attempt to resolve a value to a node.
1115 1112
1116 1113 Value can be a binary node, hex node, revision number, or a bytes
1117 1114 that can be converted to an integer.
1118 1115
1119 1116 Raises ``error.LookupError`` if a ndoe could not be resolved.
1120 1117
1121 1118 TODO this is only used by debug* commands and can probably be deleted
1122 1119 easily.
1123 1120 """
1124 1121
1125 1122 def parents(node):
1126 1123 """Returns a 2-tuple of parent nodes for a node.
1127 1124
1128 1125 Values will be ``nullid`` if the parent is empty.
1129 1126 """
1130 1127
1131 1128 def parentrevs(rev):
1132 1129 """Like parents() but operates on revision numbers."""
1133 1130
1134 1131 def linkrev(rev):
1135 1132 """Obtain the changeset revision number a revision is linked to."""
1136 1133
1137 1134 def revision(node, _df=None, raw=False):
1138 1135 """Obtain fulltext data for a node."""
1139 1136
1140 1137 def revdiff(rev1, rev2):
1141 1138 """Obtain a delta between two revision numbers.
1142 1139
1143 1140 The returned data is the result of ``bdiff.bdiff()`` on the raw
1144 1141 revision data.
1145 1142 """
1146 1143
1147 1144 def cmp(node, fulltext):
1148 1145 """Compare fulltext to another revision.
1149 1146
1150 1147 Returns True if the fulltext is different from what is stored.
1151 1148 """
1152 1149
1153 1150 def emitrevisions(nodes,
1154 1151 nodesorder=None,
1155 1152 revisiondata=False,
1156 1153 assumehaveparentrevisions=False):
1157 1154 """Produce ``irevisiondelta`` describing revisions.
1158 1155
1159 1156 See the documentation for ``ifiledata`` for more.
1160 1157 """
1161 1158
1162 1159 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1163 1160 """Process a series of deltas for storage.
1164 1161
1165 1162 See the documentation in ``ifilemutation`` for more.
1166 1163 """
1167 1164
1168 1165 def rawsize(rev):
1169 1166 """Obtain the size of tracked data.
1170 1167
1171 1168 Is equivalent to ``len(m.revision(node, raw=True))``.
1172 1169
1173 1170 TODO this method is only used by upgrade code and may be removed.
1174 1171 """
1175 1172
1176 1173 def getstrippoint(minlink):
1177 1174 """Find minimum revision that must be stripped to strip a linkrev.
1178 1175
1179 1176 See the documentation in ``ifilemutation`` for more.
1180 1177 """
1181 1178
1182 1179 def strip(minlink, transaction):
1183 1180 """Remove storage of items starting at a linkrev.
1184 1181
1185 1182 See the documentation in ``ifilemutation`` for more.
1186 1183 """
1187 1184
1188 1185 def checksize():
1189 1186 """Obtain the expected sizes of backing files.
1190 1187
1191 1188 TODO this is used by verify and it should not be part of the interface.
1192 1189 """
1193 1190
1194 1191 def files():
1195 1192 """Obtain paths that are backing storage for this manifest.
1196 1193
1197 1194 TODO this is used by verify and there should probably be a better API
1198 1195 for this functionality.
1199 1196 """
1200 1197
1201 1198 def deltaparent(rev):
1202 1199 """Obtain the revision that a revision is delta'd against.
1203 1200
1204 1201 TODO delta encoding is an implementation detail of storage and should
1205 1202 not be exposed to the storage interface.
1206 1203 """
1207 1204
1208 1205 def clone(tr, dest, **kwargs):
1209 1206 """Clone this instance to another."""
1210 1207
1211 1208 def clearcaches(clear_persisted_data=False):
1212 1209 """Clear any caches associated with this instance."""
1213 1210
1214 1211 def dirlog(d):
1215 1212 """Obtain a manifest storage instance for a tree."""
1216 1213
1217 1214 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1218 1215 match=None):
1219 1216 """Add a revision to storage.
1220 1217
1221 1218 ``m`` is an object conforming to ``imanifestdict``.
1222 1219
1223 1220 ``link`` is the linkrev revision number.
1224 1221
1225 1222 ``p1`` and ``p2`` are the parent revision numbers.
1226 1223
1227 1224 ``added`` and ``removed`` are iterables of added and removed paths,
1228 1225 respectively.
1229 1226
1230 1227 ``readtree`` is a function that can be used to read the child tree(s)
1231 1228 when recursively writing the full tree structure when using
1232 1229 treemanifets.
1233 1230
1234 1231 ``match`` is a matcher that can be used to hint to storage that not all
1235 1232 paths must be inspected; this is an optimization and can be safely
1236 1233 ignored. Note that the storage must still be able to reproduce a full
1237 1234 manifest including files that did not match.
1238 1235 """
1239 1236
1240 1237 def storageinfo(exclusivefiles=False, sharedfiles=False,
1241 1238 revisionscount=False, trackedsize=False,
1242 1239 storedsize=False):
1243 1240 """Obtain information about storage for this manifest's data.
1244 1241
1245 1242 See ``ifilestorage.storageinfo()`` for a description of this method.
1246 1243 This one behaves the same way, except for manifest data.
1247 1244 """
1248 1245
1249 1246 class imanifestlog(interfaceutil.Interface):
1250 1247 """Interface representing a collection of manifest snapshots.
1251 1248
1252 1249 Represents the root manifest in a repository.
1253 1250
1254 1251 Also serves as a means to access nested tree manifests and to cache
1255 1252 tree manifests.
1256 1253 """
1257 1254
1258 1255 def __getitem__(node):
1259 1256 """Obtain a manifest instance for a given binary node.
1260 1257
1261 1258 Equivalent to calling ``self.get('', node)``.
1262 1259
1263 1260 The returned object conforms to the ``imanifestrevisionstored``
1264 1261 interface.
1265 1262 """
1266 1263
1267 1264 def get(tree, node, verify=True):
1268 1265 """Retrieve the manifest instance for a given directory and binary node.
1269 1266
1270 1267 ``node`` always refers to the node of the root manifest (which will be
1271 1268 the only manifest if flat manifests are being used).
1272 1269
1273 1270 If ``tree`` is the empty string, the root manifest is returned.
1274 1271 Otherwise the manifest for the specified directory will be returned
1275 1272 (requires tree manifests).
1276 1273
1277 1274 If ``verify`` is True, ``LookupError`` is raised if the node is not
1278 1275 known.
1279 1276
1280 1277 The returned object conforms to the ``imanifestrevisionstored``
1281 1278 interface.
1282 1279 """
1283 1280
1284 1281 def getstorage(tree):
1285 1282 """Retrieve an interface to storage for a particular tree.
1286 1283
1287 1284 If ``tree`` is the empty bytestring, storage for the root manifest will
1288 1285 be returned. Otherwise storage for a tree manifest is returned.
1289 1286
1290 1287 TODO formalize interface for returned object.
1291 1288 """
1292 1289
1293 1290 def clearcaches():
1294 1291 """Clear caches associated with this collection."""
1295 1292
1296 1293 def rev(node):
1297 1294 """Obtain the revision number for a binary node.
1298 1295
1299 1296 Raises ``error.LookupError`` if the node is not known.
1300 1297 """
1301 1298
1302 1299 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1303 1300 """Local repository sub-interface providing access to tracked file storage.
1304 1301
1305 1302 This interface defines how a repository accesses storage for a single
1306 1303 tracked file path.
1307 1304 """
1308 1305
1309 1306 def file(f):
1310 1307 """Obtain a filelog for a tracked path.
1311 1308
1312 1309 The returned type conforms to the ``ifilestorage`` interface.
1313 1310 """
1314 1311
1315 1312 class ilocalrepositorymain(interfaceutil.Interface):
1316 1313 """Main interface for local repositories.
1317 1314
1318 1315 This currently captures the reality of things - not how things should be.
1319 1316 """
1320 1317
1321 1318 supportedformats = interfaceutil.Attribute(
1322 1319 """Set of requirements that apply to stream clone.
1323 1320
1324 1321 This is actually a class attribute and is shared among all instances.
1325 1322 """)
1326 1323
1327 1324 supported = interfaceutil.Attribute(
1328 1325 """Set of requirements that this repo is capable of opening.""")
1329 1326
1330 1327 requirements = interfaceutil.Attribute(
1331 1328 """Set of requirements this repo uses.""")
1332 1329
1333 1330 features = interfaceutil.Attribute(
1334 1331 """Set of "features" this repository supports.
1335 1332
1336 1333 A "feature" is a loosely-defined term. It can refer to a feature
1337 1334 in the classical sense or can describe an implementation detail
1338 1335 of the repository. For example, a ``readonly`` feature may denote
1339 1336 the repository as read-only. Or a ``revlogfilestore`` feature may
1340 1337 denote that the repository is using revlogs for file storage.
1341 1338
1342 1339 The intent of features is to provide a machine-queryable mechanism
1343 1340 for repo consumers to test for various repository characteristics.
1344 1341
1345 1342 Features are similar to ``requirements``. The main difference is that
1346 1343 requirements are stored on-disk and represent requirements to open the
1347 1344 repository. Features are more run-time capabilities of the repository
1348 1345 and more granular capabilities (which may be derived from requirements).
1349 1346 """)
1350 1347
1351 1348 filtername = interfaceutil.Attribute(
1352 1349 """Name of the repoview that is active on this repo.""")
1353 1350
1354 1351 wvfs = interfaceutil.Attribute(
1355 1352 """VFS used to access the working directory.""")
1356 1353
1357 1354 vfs = interfaceutil.Attribute(
1358 1355 """VFS rooted at the .hg directory.
1359 1356
1360 1357 Used to access repository data not in the store.
1361 1358 """)
1362 1359
1363 1360 svfs = interfaceutil.Attribute(
1364 1361 """VFS rooted at the store.
1365 1362
1366 1363 Used to access repository data in the store. Typically .hg/store.
1367 1364 But can point elsewhere if the store is shared.
1368 1365 """)
1369 1366
1370 1367 root = interfaceutil.Attribute(
1371 1368 """Path to the root of the working directory.""")
1372 1369
1373 1370 path = interfaceutil.Attribute(
1374 1371 """Path to the .hg directory.""")
1375 1372
1376 1373 origroot = interfaceutil.Attribute(
1377 1374 """The filesystem path that was used to construct the repo.""")
1378 1375
1379 1376 auditor = interfaceutil.Attribute(
1380 1377 """A pathauditor for the working directory.
1381 1378
1382 1379 This checks if a path refers to a nested repository.
1383 1380
1384 1381 Operates on the filesystem.
1385 1382 """)
1386 1383
1387 1384 nofsauditor = interfaceutil.Attribute(
1388 1385 """A pathauditor for the working directory.
1389 1386
1390 1387 This is like ``auditor`` except it doesn't do filesystem checks.
1391 1388 """)
1392 1389
1393 1390 baseui = interfaceutil.Attribute(
1394 1391 """Original ui instance passed into constructor.""")
1395 1392
1396 1393 ui = interfaceutil.Attribute(
1397 1394 """Main ui instance for this instance.""")
1398 1395
1399 1396 sharedpath = interfaceutil.Attribute(
1400 1397 """Path to the .hg directory of the repo this repo was shared from.""")
1401 1398
1402 1399 store = interfaceutil.Attribute(
1403 1400 """A store instance.""")
1404 1401
1405 1402 spath = interfaceutil.Attribute(
1406 1403 """Path to the store.""")
1407 1404
1408 1405 sjoin = interfaceutil.Attribute(
1409 1406 """Alias to self.store.join.""")
1410 1407
1411 1408 cachevfs = interfaceutil.Attribute(
1412 1409 """A VFS used to access the cache directory.
1413 1410
1414 1411 Typically .hg/cache.
1415 1412 """)
1416 1413
1417 1414 filteredrevcache = interfaceutil.Attribute(
1418 1415 """Holds sets of revisions to be filtered.""")
1419 1416
1420 1417 names = interfaceutil.Attribute(
1421 1418 """A ``namespaces`` instance.""")
1422 1419
1423 1420 def close():
1424 1421 """Close the handle on this repository."""
1425 1422
1426 1423 def peer():
1427 1424 """Obtain an object conforming to the ``peer`` interface."""
1428 1425
1429 1426 def unfiltered():
1430 1427 """Obtain an unfiltered/raw view of this repo."""
1431 1428
1432 1429 def filtered(name, visibilityexceptions=None):
1433 1430 """Obtain a named view of this repository."""
1434 1431
1435 1432 obsstore = interfaceutil.Attribute(
1436 1433 """A store of obsolescence data.""")
1437 1434
1438 1435 changelog = interfaceutil.Attribute(
1439 1436 """A handle on the changelog revlog.""")
1440 1437
1441 1438 manifestlog = interfaceutil.Attribute(
1442 1439 """An instance conforming to the ``imanifestlog`` interface.
1443 1440
1444 1441 Provides access to manifests for the repository.
1445 1442 """)
1446 1443
1447 1444 dirstate = interfaceutil.Attribute(
1448 1445 """Working directory state.""")
1449 1446
1450 1447 narrowpats = interfaceutil.Attribute(
1451 1448 """Matcher patterns for this repository's narrowspec.""")
1452 1449
1453 1450 def narrowmatch():
1454 1451 """Obtain a matcher for the narrowspec."""
1455 1452
1456 1453 def setnarrowpats(newincludes, newexcludes):
1457 1454 """Define the narrowspec for this repository."""
1458 1455
1459 1456 def __getitem__(changeid):
1460 1457 """Try to resolve a changectx."""
1461 1458
1462 1459 def __contains__(changeid):
1463 1460 """Whether a changeset exists."""
1464 1461
1465 1462 def __nonzero__():
1466 1463 """Always returns True."""
1467 1464 return True
1468 1465
1469 1466 __bool__ = __nonzero__
1470 1467
1471 1468 def __len__():
1472 1469 """Returns the number of changesets in the repo."""
1473 1470
1474 1471 def __iter__():
1475 1472 """Iterate over revisions in the changelog."""
1476 1473
1477 1474 def revs(expr, *args):
1478 1475 """Evaluate a revset.
1479 1476
1480 1477 Emits revisions.
1481 1478 """
1482 1479
1483 1480 def set(expr, *args):
1484 1481 """Evaluate a revset.
1485 1482
1486 1483 Emits changectx instances.
1487 1484 """
1488 1485
1489 1486 def anyrevs(specs, user=False, localalias=None):
1490 1487 """Find revisions matching one of the given revsets."""
1491 1488
1492 1489 def url():
1493 1490 """Returns a string representing the location of this repo."""
1494 1491
1495 1492 def hook(name, throw=False, **args):
1496 1493 """Call a hook."""
1497 1494
1498 1495 def tags():
1499 1496 """Return a mapping of tag to node."""
1500 1497
1501 1498 def tagtype(tagname):
1502 1499 """Return the type of a given tag."""
1503 1500
1504 1501 def tagslist():
1505 1502 """Return a list of tags ordered by revision."""
1506 1503
1507 1504 def nodetags(node):
1508 1505 """Return the tags associated with a node."""
1509 1506
1510 1507 def nodebookmarks(node):
1511 1508 """Return the list of bookmarks pointing to the specified node."""
1512 1509
1513 1510 def branchmap():
1514 1511 """Return a mapping of branch to heads in that branch."""
1515 1512
1516 1513 def revbranchcache():
1517 1514 pass
1518 1515
1519 1516 def branchtip(branchtip, ignoremissing=False):
1520 1517 """Return the tip node for a given branch."""
1521 1518
1522 1519 def lookup(key):
1523 1520 """Resolve the node for a revision."""
1524 1521
1525 1522 def lookupbranch(key):
1526 1523 """Look up the branch name of the given revision or branch name."""
1527 1524
1528 1525 def known(nodes):
1529 1526 """Determine whether a series of nodes is known.
1530 1527
1531 1528 Returns a list of bools.
1532 1529 """
1533 1530
1534 1531 def local():
1535 1532 """Whether the repository is local."""
1536 1533 return True
1537 1534
1538 1535 def publishing():
1539 1536 """Whether the repository is a publishing repository."""
1540 1537
1541 1538 def cancopy():
1542 1539 pass
1543 1540
1544 1541 def shared():
1545 1542 """The type of shared repository or None."""
1546 1543
1547 1544 def wjoin(f, *insidef):
1548 1545 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1549 1546
1550 1547 def setparents(p1, p2):
1551 1548 """Set the parent nodes of the working directory."""
1552 1549
1553 1550 def filectx(path, changeid=None, fileid=None):
1554 1551 """Obtain a filectx for the given file revision."""
1555 1552
1556 1553 def getcwd():
1557 1554 """Obtain the current working directory from the dirstate."""
1558 1555
1559 1556 def pathto(f, cwd=None):
1560 1557 """Obtain the relative path to a file."""
1561 1558
1562 1559 def adddatafilter(name, fltr):
1563 1560 pass
1564 1561
1565 1562 def wread(filename):
1566 1563 """Read a file from wvfs, using data filters."""
1567 1564
1568 1565 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1569 1566 """Write data to a file in the wvfs, using data filters."""
1570 1567
1571 1568 def wwritedata(filename, data):
1572 1569 """Resolve data for writing to the wvfs, using data filters."""
1573 1570
1574 1571 def currenttransaction():
1575 1572 """Obtain the current transaction instance or None."""
1576 1573
1577 1574 def transaction(desc, report=None):
1578 1575 """Open a new transaction to write to the repository."""
1579 1576
1580 1577 def undofiles():
1581 1578 """Returns a list of (vfs, path) for files to undo transactions."""
1582 1579
1583 1580 def recover():
1584 1581 """Roll back an interrupted transaction."""
1585 1582
1586 1583 def rollback(dryrun=False, force=False):
1587 1584 """Undo the last transaction.
1588 1585
1589 1586 DANGEROUS.
1590 1587 """
1591 1588
1592 1589 def updatecaches(tr=None, full=False):
1593 1590 """Warm repo caches."""
1594 1591
1595 1592 def invalidatecaches():
1596 1593 """Invalidate cached data due to the repository mutating."""
1597 1594
1598 1595 def invalidatevolatilesets():
1599 1596 pass
1600 1597
1601 1598 def invalidatedirstate():
1602 1599 """Invalidate the dirstate."""
1603 1600
1604 1601 def invalidate(clearfilecache=False):
1605 1602 pass
1606 1603
1607 1604 def invalidateall():
1608 1605 pass
1609 1606
1610 1607 def lock(wait=True):
1611 1608 """Lock the repository store and return a lock instance."""
1612 1609
1613 1610 def wlock(wait=True):
1614 1611 """Lock the non-store parts of the repository."""
1615 1612
1616 1613 def currentwlock():
1617 1614 """Return the wlock if it's held or None."""
1618 1615
1619 1616 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1620 1617 pass
1621 1618
1622 1619 def commit(text='', user=None, date=None, match=None, force=False,
1623 1620 editor=False, extra=None):
1624 1621 """Add a new revision to the repository."""
1625 1622
1626 1623 def commitctx(ctx, error=False):
1627 1624 """Commit a commitctx instance to the repository."""
1628 1625
1629 1626 def destroying():
1630 1627 """Inform the repository that nodes are about to be destroyed."""
1631 1628
1632 1629 def destroyed():
1633 1630 """Inform the repository that nodes have been destroyed."""
1634 1631
1635 1632 def status(node1='.', node2=None, match=None, ignored=False,
1636 1633 clean=False, unknown=False, listsubrepos=False):
1637 1634 """Convenience method to call repo[x].status()."""
1638 1635
1639 1636 def addpostdsstatus(ps):
1640 1637 pass
1641 1638
1642 1639 def postdsstatus():
1643 1640 pass
1644 1641
1645 1642 def clearpostdsstatus():
1646 1643 pass
1647 1644
1648 1645 def heads(start=None):
1649 1646 """Obtain list of nodes that are DAG heads."""
1650 1647
1651 1648 def branchheads(branch=None, start=None, closed=False):
1652 1649 pass
1653 1650
1654 1651 def branches(nodes):
1655 1652 pass
1656 1653
1657 1654 def between(pairs):
1658 1655 pass
1659 1656
1660 1657 def checkpush(pushop):
1661 1658 pass
1662 1659
1663 1660 prepushoutgoinghooks = interfaceutil.Attribute(
1664 1661 """util.hooks instance.""")
1665 1662
1666 1663 def pushkey(namespace, key, old, new):
1667 1664 pass
1668 1665
1669 1666 def listkeys(namespace):
1670 1667 pass
1671 1668
1672 1669 def debugwireargs(one, two, three=None, four=None, five=None):
1673 1670 pass
1674 1671
1675 1672 def savecommitmessage(text):
1676 1673 pass
1677 1674
1678 1675 class completelocalrepository(ilocalrepositorymain,
1679 1676 ilocalrepositoryfilestorage):
1680 1677 """Complete interface for a local repository."""
@@ -1,1072 +1,1057 b''
1 1 # storage.py - Testing of storage primitives.
2 2 #
3 3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import unittest
11 11
12 12 from ..node import (
13 13 hex,
14 14 nullid,
15 15 nullrev,
16 16 )
17 17 from .. import (
18 18 error,
19 19 mdiff,
20 20 revlog,
21 21 )
22 22
23 23 class basetestcase(unittest.TestCase):
24 24 if not getattr(unittest.TestCase, r'assertRaisesRegex', False):
25 25 assertRaisesRegex = (# camelcase-required
26 26 unittest.TestCase.assertRaisesRegexp)
27 27
28 28 class ifileindextests(basetestcase):
29 29 """Generic tests for the ifileindex interface.
30 30
31 31 All file storage backends for index data should conform to the tests in this
32 32 class.
33 33
34 34 Use ``makeifileindextests()`` to create an instance of this type.
35 35 """
36 36 def testempty(self):
37 37 f = self._makefilefn()
38 38 self.assertEqual(len(f), 0, 'new file store has 0 length by default')
39 39 self.assertEqual(list(f), [], 'iter yields nothing by default')
40 40
41 41 gen = iter(f)
42 42 with self.assertRaises(StopIteration):
43 43 next(gen)
44 44
45 45 # revs() should evaluate to an empty list.
46 46 self.assertEqual(list(f.revs()), [])
47 47
48 48 revs = iter(f.revs())
49 49 with self.assertRaises(StopIteration):
50 50 next(revs)
51 51
52 52 self.assertEqual(list(f.revs(start=20)), [])
53 53
54 54 # parents() and parentrevs() work with nullid/nullrev.
55 55 self.assertEqual(f.parents(nullid), (nullid, nullid))
56 56 self.assertEqual(f.parentrevs(nullrev), (nullrev, nullrev))
57 57
58 58 with self.assertRaises(error.LookupError):
59 59 f.parents(b'\x01' * 20)
60 60
61 61 for i in range(-5, 5):
62 62 if i == nullrev:
63 63 continue
64 64
65 65 with self.assertRaises(IndexError):
66 66 f.parentrevs(i)
67 67
68 68 # nullid/nullrev lookup always works.
69 69 self.assertEqual(f.rev(nullid), nullrev)
70 70 self.assertEqual(f.node(nullrev), nullid)
71 71
72 72 with self.assertRaises(error.LookupError):
73 73 f.rev(b'\x01' * 20)
74 74
75 75 for i in range(-5, 5):
76 76 if i == nullrev:
77 77 continue
78 78
79 79 with self.assertRaises(IndexError):
80 80 f.node(i)
81 81
82 82 self.assertEqual(f.lookup(nullid), nullid)
83 83 self.assertEqual(f.lookup(nullrev), nullid)
84 84 self.assertEqual(f.lookup(hex(nullid)), nullid)
85 85
86 86 # String converted to integer doesn't work for nullrev.
87 87 with self.assertRaises(error.LookupError):
88 88 f.lookup(b'%d' % nullrev)
89 89
90 90 self.assertEqual(f.linkrev(nullrev), nullrev)
91 91
92 92 for i in range(-5, 5):
93 93 if i == nullrev:
94 94 continue
95 95
96 96 with self.assertRaises(IndexError):
97 97 f.linkrev(i)
98 98
99 99 self.assertFalse(f.iscensored(nullrev))
100 100
101 101 for i in range(-5, 5):
102 102 if i == nullrev:
103 103 continue
104 104
105 105 with self.assertRaises(IndexError):
106 106 f.iscensored(i)
107 107
108 108 self.assertEqual(list(f.commonancestorsheads(nullid, nullid)), [])
109 109
110 110 with self.assertRaises(ValueError):
111 111 self.assertEqual(list(f.descendants([])), [])
112 112
113 113 self.assertEqual(list(f.descendants([nullrev])), [])
114 114
115 115 self.assertEqual(f.heads(), [nullid])
116 116 self.assertEqual(f.heads(nullid), [nullid])
117 117 self.assertEqual(f.heads(None, [nullid]), [nullid])
118 118 self.assertEqual(f.heads(nullid, [nullid]), [nullid])
119 119
120 120 self.assertEqual(f.children(nullid), [])
121 121
122 122 with self.assertRaises(error.LookupError):
123 123 f.children(b'\x01' * 20)
124 124
125 self.assertEqual(f.deltaparent(nullrev), nullrev)
126
127 for i in range(-5, 5):
128 if i == nullrev:
129 continue
130
131 with self.assertRaises(IndexError):
132 f.deltaparent(i)
133
134 125 def testsinglerevision(self):
135 126 f = self._makefilefn()
136 127 with self._maketransactionfn() as tr:
137 128 node = f.add(b'initial', None, tr, 0, nullid, nullid)
138 129
139 130 self.assertEqual(len(f), 1)
140 131 self.assertEqual(list(f), [0])
141 132
142 133 gen = iter(f)
143 134 self.assertEqual(next(gen), 0)
144 135
145 136 with self.assertRaises(StopIteration):
146 137 next(gen)
147 138
148 139 self.assertEqual(list(f.revs()), [0])
149 140 self.assertEqual(list(f.revs(start=1)), [])
150 141 self.assertEqual(list(f.revs(start=0)), [0])
151 142 self.assertEqual(list(f.revs(stop=0)), [0])
152 143 self.assertEqual(list(f.revs(stop=1)), [0])
153 144 self.assertEqual(list(f.revs(1, 1)), [])
154 145 # TODO buggy
155 146 self.assertEqual(list(f.revs(1, 0)), [1, 0])
156 147 self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
157 148
158 149 self.assertEqual(f.parents(node), (nullid, nullid))
159 150 self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
160 151
161 152 with self.assertRaises(error.LookupError):
162 153 f.parents(b'\x01' * 20)
163 154
164 155 with self.assertRaises(IndexError):
165 156 f.parentrevs(1)
166 157
167 158 self.assertEqual(f.rev(node), 0)
168 159
169 160 with self.assertRaises(error.LookupError):
170 161 f.rev(b'\x01' * 20)
171 162
172 163 self.assertEqual(f.node(0), node)
173 164
174 165 with self.assertRaises(IndexError):
175 166 f.node(1)
176 167
177 168 self.assertEqual(f.lookup(node), node)
178 169 self.assertEqual(f.lookup(0), node)
179 170 self.assertEqual(f.lookup(b'0'), node)
180 171 self.assertEqual(f.lookup(hex(node)), node)
181 172
182 173 self.assertEqual(f.linkrev(0), 0)
183 174
184 175 with self.assertRaises(IndexError):
185 176 f.linkrev(1)
186 177
187 178 self.assertFalse(f.iscensored(0))
188 179
189 180 with self.assertRaises(IndexError):
190 181 f.iscensored(1)
191 182
192 183 self.assertEqual(list(f.descendants([0])), [])
193 184
194 185 self.assertEqual(f.heads(), [node])
195 186 self.assertEqual(f.heads(node), [node])
196 187 self.assertEqual(f.heads(stop=[node]), [node])
197 188
198 189 with self.assertRaises(error.LookupError):
199 190 f.heads(stop=[b'\x01' * 20])
200 191
201 192 self.assertEqual(f.children(node), [])
202 193
203 self.assertEqual(f.deltaparent(0), nullrev)
204
205 194 def testmultiplerevisions(self):
206 195 fulltext0 = b'x' * 1024
207 196 fulltext1 = fulltext0 + b'y'
208 197 fulltext2 = b'y' + fulltext0 + b'z'
209 198
210 199 f = self._makefilefn()
211 200 with self._maketransactionfn() as tr:
212 201 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
213 202 node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
214 203 node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
215 204
216 205 self.assertEqual(len(f), 3)
217 206 self.assertEqual(list(f), [0, 1, 2])
218 207
219 208 gen = iter(f)
220 209 self.assertEqual(next(gen), 0)
221 210 self.assertEqual(next(gen), 1)
222 211 self.assertEqual(next(gen), 2)
223 212
224 213 with self.assertRaises(StopIteration):
225 214 next(gen)
226 215
227 216 self.assertEqual(list(f.revs()), [0, 1, 2])
228 217 self.assertEqual(list(f.revs(0)), [0, 1, 2])
229 218 self.assertEqual(list(f.revs(1)), [1, 2])
230 219 self.assertEqual(list(f.revs(2)), [2])
231 220 self.assertEqual(list(f.revs(3)), [])
232 221 self.assertEqual(list(f.revs(stop=1)), [0, 1])
233 222 self.assertEqual(list(f.revs(stop=2)), [0, 1, 2])
234 223 self.assertEqual(list(f.revs(stop=3)), [0, 1, 2])
235 224 self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
236 225 self.assertEqual(list(f.revs(2, 1)), [2, 1])
237 226 # TODO this is wrong
238 227 self.assertEqual(list(f.revs(3, 2)), [3, 2])
239 228
240 229 self.assertEqual(f.parents(node0), (nullid, nullid))
241 230 self.assertEqual(f.parents(node1), (node0, nullid))
242 231 self.assertEqual(f.parents(node2), (node1, nullid))
243 232
244 233 self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
245 234 self.assertEqual(f.parentrevs(1), (0, nullrev))
246 235 self.assertEqual(f.parentrevs(2), (1, nullrev))
247 236
248 237 self.assertEqual(f.rev(node0), 0)
249 238 self.assertEqual(f.rev(node1), 1)
250 239 self.assertEqual(f.rev(node2), 2)
251 240
252 241 with self.assertRaises(error.LookupError):
253 242 f.rev(b'\x01' * 20)
254 243
255 244 self.assertEqual(f.node(0), node0)
256 245 self.assertEqual(f.node(1), node1)
257 246 self.assertEqual(f.node(2), node2)
258 247
259 248 with self.assertRaises(IndexError):
260 249 f.node(3)
261 250
262 251 self.assertEqual(f.lookup(node0), node0)
263 252 self.assertEqual(f.lookup(0), node0)
264 253 self.assertEqual(f.lookup(b'0'), node0)
265 254 self.assertEqual(f.lookup(hex(node0)), node0)
266 255
267 256 self.assertEqual(f.lookup(node1), node1)
268 257 self.assertEqual(f.lookup(1), node1)
269 258 self.assertEqual(f.lookup(b'1'), node1)
270 259 self.assertEqual(f.lookup(hex(node1)), node1)
271 260
272 261 self.assertEqual(f.linkrev(0), 0)
273 262 self.assertEqual(f.linkrev(1), 1)
274 263 self.assertEqual(f.linkrev(2), 3)
275 264
276 265 with self.assertRaises(IndexError):
277 266 f.linkrev(3)
278 267
279 268 self.assertFalse(f.iscensored(0))
280 269 self.assertFalse(f.iscensored(1))
281 270 self.assertFalse(f.iscensored(2))
282 271
283 272 with self.assertRaises(IndexError):
284 273 f.iscensored(3)
285 274
286 275 self.assertEqual(f.commonancestorsheads(node1, nullid), [])
287 276 self.assertEqual(f.commonancestorsheads(node1, node0), [node0])
288 277 self.assertEqual(f.commonancestorsheads(node1, node1), [node1])
289 278 self.assertEqual(f.commonancestorsheads(node0, node1), [node0])
290 279 self.assertEqual(f.commonancestorsheads(node1, node2), [node1])
291 280 self.assertEqual(f.commonancestorsheads(node2, node1), [node1])
292 281
293 282 self.assertEqual(list(f.descendants([0])), [1, 2])
294 283 self.assertEqual(list(f.descendants([1])), [2])
295 284 self.assertEqual(list(f.descendants([0, 1])), [1, 2])
296 285
297 286 self.assertEqual(f.heads(), [node2])
298 287 self.assertEqual(f.heads(node0), [node2])
299 288 self.assertEqual(f.heads(node1), [node2])
300 289 self.assertEqual(f.heads(node2), [node2])
301 290
302 291 # TODO this behavior seems wonky. Is it correct? If so, the
303 292 # docstring for heads() should be updated to reflect desired
304 293 # behavior.
305 294 self.assertEqual(f.heads(stop=[node1]), [node1, node2])
306 295 self.assertEqual(f.heads(stop=[node0]), [node0, node2])
307 296 self.assertEqual(f.heads(stop=[node1, node2]), [node1, node2])
308 297
309 298 with self.assertRaises(error.LookupError):
310 299 f.heads(stop=[b'\x01' * 20])
311 300
312 301 self.assertEqual(f.children(node0), [node1])
313 302 self.assertEqual(f.children(node1), [node2])
314 303 self.assertEqual(f.children(node2), [])
315 304
316 self.assertEqual(f.deltaparent(0), nullrev)
317 self.assertEqual(f.deltaparent(1), 0)
318 self.assertEqual(f.deltaparent(2), 1)
319
320 305 def testmultipleheads(self):
321 306 f = self._makefilefn()
322 307
323 308 with self._maketransactionfn() as tr:
324 309 node0 = f.add(b'0', None, tr, 0, nullid, nullid)
325 310 node1 = f.add(b'1', None, tr, 1, node0, nullid)
326 311 node2 = f.add(b'2', None, tr, 2, node1, nullid)
327 312 node3 = f.add(b'3', None, tr, 3, node0, nullid)
328 313 node4 = f.add(b'4', None, tr, 4, node3, nullid)
329 314 node5 = f.add(b'5', None, tr, 5, node0, nullid)
330 315
331 316 self.assertEqual(len(f), 6)
332 317
333 318 self.assertEqual(list(f.descendants([0])), [1, 2, 3, 4, 5])
334 319 self.assertEqual(list(f.descendants([1])), [2])
335 320 self.assertEqual(list(f.descendants([2])), [])
336 321 self.assertEqual(list(f.descendants([3])), [4])
337 322 self.assertEqual(list(f.descendants([0, 1])), [1, 2, 3, 4, 5])
338 323 self.assertEqual(list(f.descendants([1, 3])), [2, 4])
339 324
340 325 self.assertEqual(f.heads(), [node2, node4, node5])
341 326 self.assertEqual(f.heads(node0), [node2, node4, node5])
342 327 self.assertEqual(f.heads(node1), [node2])
343 328 self.assertEqual(f.heads(node2), [node2])
344 329 self.assertEqual(f.heads(node3), [node4])
345 330 self.assertEqual(f.heads(node4), [node4])
346 331 self.assertEqual(f.heads(node5), [node5])
347 332
348 333 # TODO this seems wrong.
349 334 self.assertEqual(f.heads(stop=[node0]), [node0, node2, node4, node5])
350 335 self.assertEqual(f.heads(stop=[node1]), [node1, node2, node4, node5])
351 336
352 337 self.assertEqual(f.children(node0), [node1, node3, node5])
353 338 self.assertEqual(f.children(node1), [node2])
354 339 self.assertEqual(f.children(node2), [])
355 340 self.assertEqual(f.children(node3), [node4])
356 341 self.assertEqual(f.children(node4), [])
357 342 self.assertEqual(f.children(node5), [])
358 343
359 344 class ifiledatatests(basetestcase):
360 345 """Generic tests for the ifiledata interface.
361 346
362 347 All file storage backends for data should conform to the tests in this
363 348 class.
364 349
365 350 Use ``makeifiledatatests()`` to create an instance of this type.
366 351 """
367 352 def testempty(self):
368 353 f = self._makefilefn()
369 354
370 355 self.assertEqual(f.storageinfo(), {})
371 356 self.assertEqual(f.storageinfo(revisionscount=True, trackedsize=True),
372 357 {'revisionscount': 0, 'trackedsize': 0})
373 358
374 359 self.assertEqual(f.size(nullrev), 0)
375 360
376 361 for i in range(-5, 5):
377 362 if i == nullrev:
378 363 continue
379 364
380 365 with self.assertRaises(IndexError):
381 366 f.size(i)
382 367
383 368 with self.assertRaises(error.StorageError):
384 369 f.checkhash(b'', nullid)
385 370
386 371 with self.assertRaises(error.LookupError):
387 372 f.checkhash(b'', b'\x01' * 20)
388 373
389 374 self.assertEqual(f.revision(nullid), b'')
390 375 self.assertEqual(f.revision(nullid, raw=True), b'')
391 376
392 377 with self.assertRaises(error.LookupError):
393 378 f.revision(b'\x01' * 20)
394 379
395 380 self.assertEqual(f.read(nullid), b'')
396 381
397 382 with self.assertRaises(error.LookupError):
398 383 f.read(b'\x01' * 20)
399 384
400 385 self.assertFalse(f.renamed(nullid))
401 386
402 387 with self.assertRaises(error.LookupError):
403 388 f.read(b'\x01' * 20)
404 389
405 390 self.assertTrue(f.cmp(nullid, b''))
406 391 self.assertTrue(f.cmp(nullid, b'foo'))
407 392
408 393 with self.assertRaises(error.LookupError):
409 394 f.cmp(b'\x01' * 20, b'irrelevant')
410 395
411 396 self.assertEqual(f.revdiff(nullrev, nullrev), b'')
412 397
413 398 with self.assertRaises(IndexError):
414 399 f.revdiff(0, nullrev)
415 400
416 401 with self.assertRaises(IndexError):
417 402 f.revdiff(nullrev, 0)
418 403
419 404 with self.assertRaises(IndexError):
420 405 f.revdiff(0, 0)
421 406
422 407 # Emitting empty list is an empty generator.
423 408 gen = f.emitrevisions([])
424 409 with self.assertRaises(StopIteration):
425 410 next(gen)
426 411
427 412 # Emitting null node yields nothing.
428 413 gen = f.emitrevisions([nullid])
429 414 with self.assertRaises(StopIteration):
430 415 next(gen)
431 416
432 417 # Requesting unknown node fails.
433 418 with self.assertRaises(error.LookupError):
434 419 list(f.emitrevisions([b'\x01' * 20]))
435 420
436 421 def testsinglerevision(self):
437 422 fulltext = b'initial'
438 423
439 424 f = self._makefilefn()
440 425 with self._maketransactionfn() as tr:
441 426 node = f.add(fulltext, None, tr, 0, nullid, nullid)
442 427
443 428 self.assertEqual(f.storageinfo(), {})
444 429 self.assertEqual(f.storageinfo(revisionscount=True, trackedsize=True),
445 430 {'revisionscount': 1, 'trackedsize': len(fulltext)})
446 431
447 432 self.assertEqual(f.size(0), len(fulltext))
448 433
449 434 with self.assertRaises(IndexError):
450 435 f.size(1)
451 436
452 437 f.checkhash(fulltext, node)
453 438 f.checkhash(fulltext, node, nullid, nullid)
454 439
455 440 with self.assertRaises(error.StorageError):
456 441 f.checkhash(fulltext + b'extra', node)
457 442
458 443 with self.assertRaises(error.StorageError):
459 444 f.checkhash(fulltext, node, b'\x01' * 20, nullid)
460 445
461 446 with self.assertRaises(error.StorageError):
462 447 f.checkhash(fulltext, node, nullid, b'\x01' * 20)
463 448
464 449 self.assertEqual(f.revision(node), fulltext)
465 450 self.assertEqual(f.revision(node, raw=True), fulltext)
466 451
467 452 self.assertEqual(f.read(node), fulltext)
468 453
469 454 self.assertFalse(f.renamed(node))
470 455
471 456 self.assertFalse(f.cmp(node, fulltext))
472 457 self.assertTrue(f.cmp(node, fulltext + b'extra'))
473 458
474 459 self.assertEqual(f.revdiff(0, 0), b'')
475 460 self.assertEqual(f.revdiff(nullrev, 0),
476 461 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07%s' %
477 462 fulltext)
478 463
479 464 self.assertEqual(f.revdiff(0, nullrev),
480 465 b'\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00')
481 466
482 467 # Emitting a single revision works.
483 468 gen = f.emitrevisions([node])
484 469 rev = next(gen)
485 470
486 471 self.assertEqual(rev.node, node)
487 472 self.assertEqual(rev.p1node, nullid)
488 473 self.assertEqual(rev.p2node, nullid)
489 474 self.assertIsNone(rev.linknode)
490 475 self.assertEqual(rev.basenode, nullid)
491 476 self.assertIsNone(rev.baserevisionsize)
492 477 self.assertIsNone(rev.revision)
493 478 self.assertIsNone(rev.delta)
494 479
495 480 with self.assertRaises(StopIteration):
496 481 next(gen)
497 482
498 483 # Requesting revision data works.
499 484 gen = f.emitrevisions([node], revisiondata=True)
500 485 rev = next(gen)
501 486
502 487 self.assertEqual(rev.node, node)
503 488 self.assertEqual(rev.p1node, nullid)
504 489 self.assertEqual(rev.p2node, nullid)
505 490 self.assertIsNone(rev.linknode)
506 491 self.assertEqual(rev.basenode, nullid)
507 492 self.assertIsNone(rev.baserevisionsize)
508 493 self.assertEqual(rev.revision, fulltext)
509 494 self.assertIsNone(rev.delta)
510 495
511 496 with self.assertRaises(StopIteration):
512 497 next(gen)
513 498
514 499 # Emitting an unknown node after a known revision results in error.
515 500 with self.assertRaises(error.LookupError):
516 501 list(f.emitrevisions([node, b'\x01' * 20]))
517 502
518 503 def testmultiplerevisions(self):
519 504 fulltext0 = b'x' * 1024
520 505 fulltext1 = fulltext0 + b'y'
521 506 fulltext2 = b'y' + fulltext0 + b'z'
522 507
523 508 f = self._makefilefn()
524 509 with self._maketransactionfn() as tr:
525 510 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
526 511 node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
527 512 node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
528 513
529 514 self.assertEqual(f.storageinfo(), {})
530 515 self.assertEqual(
531 516 f.storageinfo(revisionscount=True, trackedsize=True),
532 517 {
533 518 'revisionscount': 3,
534 519 'trackedsize': len(fulltext0) + len(fulltext1) + len(fulltext2),
535 520 })
536 521
537 522 self.assertEqual(f.size(0), len(fulltext0))
538 523 self.assertEqual(f.size(1), len(fulltext1))
539 524 self.assertEqual(f.size(2), len(fulltext2))
540 525
541 526 with self.assertRaises(IndexError):
542 527 f.size(3)
543 528
544 529 f.checkhash(fulltext0, node0)
545 530 f.checkhash(fulltext1, node1)
546 531 f.checkhash(fulltext1, node1, node0, nullid)
547 532 f.checkhash(fulltext2, node2, node1, nullid)
548 533
549 534 with self.assertRaises(error.StorageError):
550 535 f.checkhash(fulltext1, b'\x01' * 20)
551 536
552 537 with self.assertRaises(error.StorageError):
553 538 f.checkhash(fulltext1 + b'extra', node1, node0, nullid)
554 539
555 540 with self.assertRaises(error.StorageError):
556 541 f.checkhash(fulltext1, node1, node0, node0)
557 542
558 543 self.assertEqual(f.revision(node0), fulltext0)
559 544 self.assertEqual(f.revision(node0, raw=True), fulltext0)
560 545 self.assertEqual(f.revision(node1), fulltext1)
561 546 self.assertEqual(f.revision(node1, raw=True), fulltext1)
562 547 self.assertEqual(f.revision(node2), fulltext2)
563 548 self.assertEqual(f.revision(node2, raw=True), fulltext2)
564 549
565 550 with self.assertRaises(error.LookupError):
566 551 f.revision(b'\x01' * 20)
567 552
568 553 self.assertEqual(f.read(node0), fulltext0)
569 554 self.assertEqual(f.read(node1), fulltext1)
570 555 self.assertEqual(f.read(node2), fulltext2)
571 556
572 557 with self.assertRaises(error.LookupError):
573 558 f.read(b'\x01' * 20)
574 559
575 560 self.assertFalse(f.renamed(node0))
576 561 self.assertFalse(f.renamed(node1))
577 562 self.assertFalse(f.renamed(node2))
578 563
579 564 with self.assertRaises(error.LookupError):
580 565 f.renamed(b'\x01' * 20)
581 566
582 567 self.assertFalse(f.cmp(node0, fulltext0))
583 568 self.assertFalse(f.cmp(node1, fulltext1))
584 569 self.assertFalse(f.cmp(node2, fulltext2))
585 570
586 571 self.assertTrue(f.cmp(node1, fulltext0))
587 572 self.assertTrue(f.cmp(node2, fulltext1))
588 573
589 574 with self.assertRaises(error.LookupError):
590 575 f.cmp(b'\x01' * 20, b'irrelevant')
591 576
592 577 self.assertEqual(f.revdiff(0, 1),
593 578 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
594 579 fulltext1)
595 580
596 581 self.assertEqual(f.revdiff(0, 2),
597 582 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x02' +
598 583 fulltext2)
599 584
600 585 # Nodes should be emitted in order.
601 586 gen = f.emitrevisions([node0, node1, node2], revisiondata=True)
602 587
603 588 rev = next(gen)
604 589
605 590 self.assertEqual(rev.node, node0)
606 591 self.assertEqual(rev.p1node, nullid)
607 592 self.assertEqual(rev.p2node, nullid)
608 593 self.assertIsNone(rev.linknode)
609 594 self.assertEqual(rev.basenode, nullid)
610 595 self.assertIsNone(rev.baserevisionsize)
611 596 self.assertEqual(rev.revision, fulltext0)
612 597 self.assertIsNone(rev.delta)
613 598
614 599 rev = next(gen)
615 600
616 601 self.assertEqual(rev.node, node1)
617 602 self.assertEqual(rev.p1node, node0)
618 603 self.assertEqual(rev.p2node, nullid)
619 604 self.assertIsNone(rev.linknode)
620 605 self.assertEqual(rev.basenode, node0)
621 606 self.assertIsNone(rev.baserevisionsize)
622 607 self.assertIsNone(rev.revision)
623 608 self.assertEqual(rev.delta,
624 609 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
625 610 fulltext1)
626 611
627 612 rev = next(gen)
628 613
629 614 self.assertEqual(rev.node, node2)
630 615 self.assertEqual(rev.p1node, node1)
631 616 self.assertEqual(rev.p2node, nullid)
632 617 self.assertIsNone(rev.linknode)
633 618 self.assertEqual(rev.basenode, node1)
634 619 self.assertIsNone(rev.baserevisionsize)
635 620 self.assertIsNone(rev.revision)
636 621 self.assertEqual(rev.delta,
637 622 b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
638 623 fulltext2)
639 624
640 625 with self.assertRaises(StopIteration):
641 626 next(gen)
642 627
643 628 # Request not in DAG order is reordered to be in DAG order.
644 629 gen = f.emitrevisions([node2, node1, node0], revisiondata=True)
645 630
646 631 rev = next(gen)
647 632
648 633 self.assertEqual(rev.node, node0)
649 634 self.assertEqual(rev.p1node, nullid)
650 635 self.assertEqual(rev.p2node, nullid)
651 636 self.assertIsNone(rev.linknode)
652 637 self.assertEqual(rev.basenode, nullid)
653 638 self.assertIsNone(rev.baserevisionsize)
654 639 self.assertEqual(rev.revision, fulltext0)
655 640 self.assertIsNone(rev.delta)
656 641
657 642 rev = next(gen)
658 643
659 644 self.assertEqual(rev.node, node1)
660 645 self.assertEqual(rev.p1node, node0)
661 646 self.assertEqual(rev.p2node, nullid)
662 647 self.assertIsNone(rev.linknode)
663 648 self.assertEqual(rev.basenode, node0)
664 649 self.assertIsNone(rev.baserevisionsize)
665 650 self.assertIsNone(rev.revision)
666 651 self.assertEqual(rev.delta,
667 652 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
668 653 fulltext1)
669 654
670 655 rev = next(gen)
671 656
672 657 self.assertEqual(rev.node, node2)
673 658 self.assertEqual(rev.p1node, node1)
674 659 self.assertEqual(rev.p2node, nullid)
675 660 self.assertIsNone(rev.linknode)
676 661 self.assertEqual(rev.basenode, node1)
677 662 self.assertIsNone(rev.baserevisionsize)
678 663 self.assertIsNone(rev.revision)
679 664 self.assertEqual(rev.delta,
680 665 b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
681 666 fulltext2)
682 667
683 668 with self.assertRaises(StopIteration):
684 669 next(gen)
685 670
686 671 # Unrecognized nodesorder value raises ProgrammingError.
687 672 with self.assertRaises(error.ProgrammingError):
688 673 list(f.emitrevisions([], nodesorder='bad'))
689 674
690 675 # nodesorder=storage is recognized. But we can't test it thoroughly
691 676 # because behavior is storage-dependent.
692 677 res = list(f.emitrevisions([node2, node1, node0],
693 678 nodesorder='storage'))
694 679 self.assertEqual(len(res), 3)
695 680 self.assertEqual({o.node for o in res}, {node0, node1, node2})
696 681
697 682 # nodesorder=nodes forces the order.
698 683 gen = f.emitrevisions([node2, node0], nodesorder='nodes',
699 684 revisiondata=True)
700 685
701 686 rev = next(gen)
702 687 self.assertEqual(rev.node, node2)
703 688 self.assertEqual(rev.p1node, node1)
704 689 self.assertEqual(rev.p2node, nullid)
705 690 self.assertEqual(rev.basenode, nullid)
706 691 self.assertIsNone(rev.baserevisionsize)
707 692 self.assertEqual(rev.revision, fulltext2)
708 693 self.assertIsNone(rev.delta)
709 694
710 695 rev = next(gen)
711 696 self.assertEqual(rev.node, node0)
712 697 self.assertEqual(rev.p1node, nullid)
713 698 self.assertEqual(rev.p2node, nullid)
714 699 # Delta behavior is storage dependent, so we can't easily test it.
715 700
716 701 with self.assertRaises(StopIteration):
717 702 next(gen)
718 703
719 704 # assumehaveparentrevisions=False (the default) won't send a delta for
720 705 # the first revision.
721 706 gen = f.emitrevisions({node2, node1}, revisiondata=True)
722 707
723 708 rev = next(gen)
724 709 self.assertEqual(rev.node, node1)
725 710 self.assertEqual(rev.p1node, node0)
726 711 self.assertEqual(rev.p2node, nullid)
727 712 self.assertEqual(rev.basenode, nullid)
728 713 self.assertIsNone(rev.baserevisionsize)
729 714 self.assertEqual(rev.revision, fulltext1)
730 715 self.assertIsNone(rev.delta)
731 716
732 717 rev = next(gen)
733 718 self.assertEqual(rev.node, node2)
734 719 self.assertEqual(rev.p1node, node1)
735 720 self.assertEqual(rev.p2node, nullid)
736 721 self.assertEqual(rev.basenode, node1)
737 722 self.assertIsNone(rev.baserevisionsize)
738 723 self.assertIsNone(rev.revision)
739 724 self.assertEqual(rev.delta,
740 725 b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
741 726 fulltext2)
742 727
743 728 with self.assertRaises(StopIteration):
744 729 next(gen)
745 730
746 731 # assumehaveparentrevisions=True allows delta against initial revision.
747 732 gen = f.emitrevisions([node2, node1],
748 733 revisiondata=True, assumehaveparentrevisions=True)
749 734
750 735 rev = next(gen)
751 736 self.assertEqual(rev.node, node1)
752 737 self.assertEqual(rev.p1node, node0)
753 738 self.assertEqual(rev.p2node, nullid)
754 739 self.assertEqual(rev.basenode, node0)
755 740 self.assertIsNone(rev.baserevisionsize)
756 741 self.assertIsNone(rev.revision)
757 742 self.assertEqual(rev.delta,
758 743 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
759 744 fulltext1)
760 745
761 746 # forceprevious=True forces a delta against the previous revision.
762 747 # Special case for initial revision.
763 748 gen = f.emitrevisions([node0], revisiondata=True, deltaprevious=True)
764 749
765 750 rev = next(gen)
766 751 self.assertEqual(rev.node, node0)
767 752 self.assertEqual(rev.p1node, nullid)
768 753 self.assertEqual(rev.p2node, nullid)
769 754 self.assertEqual(rev.basenode, nullid)
770 755 self.assertIsNone(rev.baserevisionsize)
771 756 self.assertIsNone(rev.revision)
772 757 self.assertEqual(rev.delta,
773 758 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00' +
774 759 fulltext0)
775 760
776 761 with self.assertRaises(StopIteration):
777 762 next(gen)
778 763
779 764 gen = f.emitrevisions([node0, node2], revisiondata=True,
780 765 deltaprevious=True)
781 766
782 767 rev = next(gen)
783 768 self.assertEqual(rev.node, node0)
784 769 self.assertEqual(rev.p1node, nullid)
785 770 self.assertEqual(rev.p2node, nullid)
786 771 self.assertEqual(rev.basenode, nullid)
787 772 self.assertIsNone(rev.baserevisionsize)
788 773 self.assertIsNone(rev.revision)
789 774 self.assertEqual(rev.delta,
790 775 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00' +
791 776 fulltext0)
792 777
793 778 rev = next(gen)
794 779 self.assertEqual(rev.node, node2)
795 780 self.assertEqual(rev.p1node, node1)
796 781 self.assertEqual(rev.p2node, nullid)
797 782 self.assertEqual(rev.basenode, node0)
798 783
799 784 with self.assertRaises(StopIteration):
800 785 next(gen)
801 786
802 787 def testrenamed(self):
803 788 fulltext0 = b'foo'
804 789 fulltext1 = b'bar'
805 790 fulltext2 = b'baz'
806 791
807 792 meta1 = {
808 793 b'copy': b'source0',
809 794 b'copyrev': b'a' * 40,
810 795 }
811 796
812 797 meta2 = {
813 798 b'copy': b'source1',
814 799 b'copyrev': b'b' * 40,
815 800 }
816 801
817 802 stored1 = b''.join([
818 803 b'\x01\ncopy: source0\n',
819 804 b'copyrev: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n\x01\n',
820 805 fulltext1,
821 806 ])
822 807
823 808 stored2 = b''.join([
824 809 b'\x01\ncopy: source1\n',
825 810 b'copyrev: bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n\x01\n',
826 811 fulltext2,
827 812 ])
828 813
829 814 f = self._makefilefn()
830 815 with self._maketransactionfn() as tr:
831 816 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
832 817 node1 = f.add(fulltext1, meta1, tr, 1, node0, nullid)
833 818 node2 = f.add(fulltext2, meta2, tr, 2, nullid, nullid)
834 819
835 820 # Metadata header isn't recognized when parent isn't nullid.
836 821 self.assertEqual(f.size(1), len(stored1))
837 822 self.assertEqual(f.size(2), len(fulltext2))
838 823
839 824 self.assertEqual(f.revision(node1), stored1)
840 825 self.assertEqual(f.revision(node1, raw=True), stored1)
841 826 self.assertEqual(f.revision(node2), stored2)
842 827 self.assertEqual(f.revision(node2, raw=True), stored2)
843 828
844 829 self.assertEqual(f.read(node1), fulltext1)
845 830 self.assertEqual(f.read(node2), fulltext2)
846 831
847 832 # Returns False when first parent is set.
848 833 self.assertFalse(f.renamed(node1))
849 834 self.assertEqual(f.renamed(node2), (b'source1', b'\xbb' * 20))
850 835
851 836 self.assertTrue(f.cmp(node1, fulltext1))
852 837 self.assertTrue(f.cmp(node1, stored1))
853 838 self.assertFalse(f.cmp(node2, fulltext2))
854 839 self.assertTrue(f.cmp(node2, stored2))
855 840
856 841 def testmetadataprefix(self):
857 842 # Content with metadata prefix has extra prefix inserted in storage.
858 843 fulltext0 = b'\x01\nfoo'
859 844 stored0 = b'\x01\n\x01\n\x01\nfoo'
860 845
861 846 fulltext1 = b'\x01\nbar'
862 847 meta1 = {
863 848 b'copy': b'source0',
864 849 b'copyrev': b'b' * 40,
865 850 }
866 851 stored1 = b''.join([
867 852 b'\x01\ncopy: source0\n',
868 853 b'copyrev: %s\n' % (b'b' * 40),
869 854 b'\x01\n\x01\nbar',
870 855 ])
871 856
872 857 f = self._makefilefn()
873 858 with self._maketransactionfn() as tr:
874 859 node0 = f.add(fulltext0, {}, tr, 0, nullid, nullid)
875 860 node1 = f.add(fulltext1, meta1, tr, 1, nullid, nullid)
876 861
877 862 # TODO this is buggy.
878 863 self.assertEqual(f.size(0), len(fulltext0) + 4)
879 864
880 865 self.assertEqual(f.size(1), len(fulltext1))
881 866
882 867 self.assertEqual(f.revision(node0), stored0)
883 868 self.assertEqual(f.revision(node0, raw=True), stored0)
884 869
885 870 self.assertEqual(f.revision(node1), stored1)
886 871 self.assertEqual(f.revision(node1, raw=True), stored1)
887 872
888 873 self.assertEqual(f.read(node0), fulltext0)
889 874 self.assertEqual(f.read(node1), fulltext1)
890 875
891 876 self.assertFalse(f.cmp(node0, fulltext0))
892 877 self.assertTrue(f.cmp(node0, stored0))
893 878
894 879 self.assertFalse(f.cmp(node1, fulltext1))
895 880 self.assertTrue(f.cmp(node1, stored0))
896 881
897 882 def testcensored(self):
898 883 f = self._makefilefn()
899 884
900 885 stored1 = revlog.packmeta({
901 886 b'censored': b'tombstone',
902 887 }, b'')
903 888
904 889 # TODO tests are incomplete because we need the node to be
905 890 # different due to presence of censor metadata. But we can't
906 891 # do this with addrevision().
907 892 with self._maketransactionfn() as tr:
908 893 node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
909 894 f.addrevision(stored1, tr, 1, node0, nullid,
910 895 flags=revlog.REVIDX_ISCENSORED)
911 896
912 897 self.assertTrue(f.iscensored(1))
913 898
914 899 self.assertEqual(f.revision(1), stored1)
915 900 self.assertEqual(f.revision(1, raw=True), stored1)
916 901
917 902 self.assertEqual(f.read(1), b'')
918 903
919 904 class ifilemutationtests(basetestcase):
920 905 """Generic tests for the ifilemutation interface.
921 906
922 907 All file storage backends that support writing should conform to this
923 908 interface.
924 909
925 910 Use ``makeifilemutationtests()`` to create an instance of this type.
926 911 """
927 912 def testaddnoop(self):
928 913 f = self._makefilefn()
929 914 with self._maketransactionfn() as tr:
930 915 node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
931 916 node1 = f.add(b'foo', None, tr, 0, nullid, nullid)
932 917 # Varying by linkrev shouldn't impact hash.
933 918 node2 = f.add(b'foo', None, tr, 1, nullid, nullid)
934 919
935 920 self.assertEqual(node1, node0)
936 921 self.assertEqual(node2, node0)
937 922 self.assertEqual(len(f), 1)
938 923
939 924 def testaddrevisionbadnode(self):
940 925 f = self._makefilefn()
941 926 with self._maketransactionfn() as tr:
942 927 # Adding a revision with bad node value fails.
943 928 with self.assertRaises(error.StorageError):
944 929 f.addrevision(b'foo', tr, 0, nullid, nullid, node=b'\x01' * 20)
945 930
946 931 def testaddrevisionunknownflag(self):
947 932 f = self._makefilefn()
948 933 with self._maketransactionfn() as tr:
949 934 for i in range(15, 0, -1):
950 935 if (1 << i) & ~revlog.REVIDX_KNOWN_FLAGS:
951 936 flags = 1 << i
952 937 break
953 938
954 939 with self.assertRaises(error.StorageError):
955 940 f.addrevision(b'foo', tr, 0, nullid, nullid, flags=flags)
956 941
957 942 def testaddgroupsimple(self):
958 943 f = self._makefilefn()
959 944
960 945 callbackargs = []
961 946 def cb(*args, **kwargs):
962 947 callbackargs.append((args, kwargs))
963 948
964 949 def linkmapper(node):
965 950 return 0
966 951
967 952 with self._maketransactionfn() as tr:
968 953 nodes = f.addgroup([], None, tr, addrevisioncb=cb)
969 954
970 955 self.assertEqual(nodes, [])
971 956 self.assertEqual(callbackargs, [])
972 957 self.assertEqual(len(f), 0)
973 958
974 959 fulltext0 = b'foo'
975 960 delta0 = mdiff.trivialdiffheader(len(fulltext0)) + fulltext0
976 961
977 962 deltas = [
978 963 (b'\x01' * 20, nullid, nullid, nullid, nullid, delta0, 0),
979 964 ]
980 965
981 966 with self._maketransactionfn() as tr:
982 967 with self.assertRaises(error.StorageError):
983 968 f.addgroup(deltas, linkmapper, tr, addrevisioncb=cb)
984 969
985 970 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
986 971
987 972 f = self._makefilefn()
988 973
989 974 deltas = [
990 975 (node0, nullid, nullid, nullid, nullid, delta0, 0),
991 976 ]
992 977
993 978 with self._maketransactionfn() as tr:
994 979 nodes = f.addgroup(deltas, linkmapper, tr, addrevisioncb=cb)
995 980
996 981 self.assertEqual(nodes, [
997 982 b'\x49\xd8\xcb\xb1\x5c\xe2\x57\x92\x04\x47'
998 983 b'\x00\x6b\x46\x97\x8b\x7a\xf9\x80\xa9\x79'])
999 984
1000 985 self.assertEqual(len(callbackargs), 1)
1001 986 self.assertEqual(callbackargs[0][0][1], nodes[0])
1002 987
1003 988 self.assertEqual(list(f.revs()), [0])
1004 989 self.assertEqual(f.rev(nodes[0]), 0)
1005 990 self.assertEqual(f.node(0), nodes[0])
1006 991
1007 992 def testaddgroupmultiple(self):
1008 993 f = self._makefilefn()
1009 994
1010 995 fulltexts = [
1011 996 b'foo',
1012 997 b'bar',
1013 998 b'x' * 1024,
1014 999 ]
1015 1000
1016 1001 nodes = []
1017 1002 with self._maketransactionfn() as tr:
1018 1003 for fulltext in fulltexts:
1019 1004 nodes.append(f.add(fulltext, None, tr, 0, nullid, nullid))
1020 1005
1021 1006 f = self._makefilefn()
1022 1007 deltas = []
1023 1008 for i, fulltext in enumerate(fulltexts):
1024 1009 delta = mdiff.trivialdiffheader(len(fulltext)) + fulltext
1025 1010
1026 1011 deltas.append((nodes[i], nullid, nullid, nullid, nullid, delta, 0))
1027 1012
1028 1013 with self._maketransactionfn() as tr:
1029 1014 self.assertEqual(f.addgroup(deltas, lambda x: 0, tr), nodes)
1030 1015
1031 1016 self.assertEqual(len(f), len(deltas))
1032 1017 self.assertEqual(list(f.revs()), [0, 1, 2])
1033 1018 self.assertEqual(f.rev(nodes[0]), 0)
1034 1019 self.assertEqual(f.rev(nodes[1]), 1)
1035 1020 self.assertEqual(f.rev(nodes[2]), 2)
1036 1021 self.assertEqual(f.node(0), nodes[0])
1037 1022 self.assertEqual(f.node(1), nodes[1])
1038 1023 self.assertEqual(f.node(2), nodes[2])
1039 1024
1040 1025 def makeifileindextests(makefilefn, maketransactionfn):
1041 1026 """Create a unittest.TestCase class suitable for testing file storage.
1042 1027
1043 1028 ``makefilefn`` is a callable which receives the test case as an
1044 1029 argument and returns an object implementing the ``ifilestorage`` interface.
1045 1030
1046 1031 ``maketransactionfn`` is a callable which receives the test case as an
1047 1032 argument and returns a transaction object.
1048 1033
1049 1034 Returns a type that is a ``unittest.TestCase`` that can be used for
1050 1035 testing the object implementing the file storage interface. Simply
1051 1036 assign the returned value to a module-level attribute and a test loader
1052 1037 should find and run it automatically.
1053 1038 """
1054 1039 d = {
1055 1040 r'_makefilefn': makefilefn,
1056 1041 r'_maketransactionfn': maketransactionfn,
1057 1042 }
1058 1043 return type(r'ifileindextests', (ifileindextests,), d)
1059 1044
1060 1045 def makeifiledatatests(makefilefn, maketransactionfn):
1061 1046 d = {
1062 1047 r'_makefilefn': makefilefn,
1063 1048 r'_maketransactionfn': maketransactionfn,
1064 1049 }
1065 1050 return type(r'ifiledatatests', (ifiledatatests,), d)
1066 1051
1067 1052 def makeifilemutationtests(makefilefn, maketransactionfn):
1068 1053 d = {
1069 1054 r'_makefilefn': makefilefn,
1070 1055 r'_maketransactionfn': maketransactionfn,
1071 1056 }
1072 1057 return type(r'ifilemutationtests', (ifilemutationtests,), d)
@@ -1,679 +1,673 b''
1 1 # simplestorerepo.py - Extension that swaps in alternate repository storage.
2 2 #
3 3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 # To use this with the test suite:
9 9 #
10 10 # $ HGREPOFEATURES="simplestore" ./run-tests.py \
11 11 # --extra-config-opt extensions.simplestore=`pwd`/simplestorerepo.py
12 12
13 13 from __future__ import absolute_import
14 14
15 15 import stat
16 16
17 17 from mercurial.i18n import _
18 18 from mercurial.node import (
19 19 bin,
20 20 hex,
21 21 nullid,
22 22 nullrev,
23 23 )
24 24 from mercurial.thirdparty import (
25 25 attr,
26 26 cbor,
27 27 )
28 28 from mercurial import (
29 29 ancestor,
30 30 bundlerepo,
31 31 error,
32 32 extensions,
33 33 localrepo,
34 34 mdiff,
35 35 pycompat,
36 36 repository,
37 37 revlog,
38 38 store,
39 39 verify,
40 40 )
41 41 from mercurial.utils import (
42 42 interfaceutil,
43 43 )
44 44
45 45 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
46 46 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
47 47 # be specifying the version(s) of Mercurial they are tested with, or
48 48 # leave the attribute unspecified.
49 49 testedwith = 'ships-with-hg-core'
50 50
51 51 REQUIREMENT = 'testonly-simplestore'
52 52
53 53 def validatenode(node):
54 54 if isinstance(node, int):
55 55 raise ValueError('expected node; got int')
56 56
57 57 if len(node) != 20:
58 58 raise ValueError('expected 20 byte node')
59 59
60 60 def validaterev(rev):
61 61 if not isinstance(rev, int):
62 62 raise ValueError('expected int')
63 63
64 64 class simplestoreerror(error.StorageError):
65 65 pass
66 66
67 67 @interfaceutil.implementer(repository.irevisiondelta)
68 68 @attr.s(slots=True, frozen=True)
69 69 class simplestorerevisiondelta(object):
70 70 node = attr.ib()
71 71 p1node = attr.ib()
72 72 p2node = attr.ib()
73 73 basenode = attr.ib()
74 74 linknode = attr.ib()
75 75 flags = attr.ib()
76 76 baserevisionsize = attr.ib()
77 77 revision = attr.ib()
78 78 delta = attr.ib()
79 79
80 80 @interfaceutil.implementer(repository.ifilestorage)
81 81 class filestorage(object):
82 82 """Implements storage for a tracked path.
83 83
84 84 Data is stored in the VFS in a directory corresponding to the tracked
85 85 path.
86 86
87 87 Index data is stored in an ``index`` file using CBOR.
88 88
89 89 Fulltext data is stored in files having names of the node.
90 90 """
91 91
92 92 def __init__(self, svfs, path):
93 93 self._svfs = svfs
94 94 self._path = path
95 95
96 96 self._storepath = b'/'.join([b'data', path])
97 97 self._indexpath = b'/'.join([self._storepath, b'index'])
98 98
99 99 indexdata = self._svfs.tryread(self._indexpath)
100 100 if indexdata:
101 101 indexdata = cbor.loads(indexdata)
102 102
103 103 self._indexdata = indexdata or []
104 104 self._indexbynode = {}
105 105 self._indexbyrev = {}
106 106 self._index = []
107 107 self._refreshindex()
108 108
109 109 def _refreshindex(self):
110 110 self._indexbynode.clear()
111 111 self._indexbyrev.clear()
112 112 self._index = []
113 113
114 114 for i, entry in enumerate(self._indexdata):
115 115 self._indexbynode[entry[b'node']] = entry
116 116 self._indexbyrev[i] = entry
117 117
118 118 self._indexbynode[nullid] = {
119 119 b'node': nullid,
120 120 b'p1': nullid,
121 121 b'p2': nullid,
122 122 b'linkrev': nullrev,
123 123 b'flags': 0,
124 124 }
125 125
126 126 self._indexbyrev[nullrev] = {
127 127 b'node': nullid,
128 128 b'p1': nullid,
129 129 b'p2': nullid,
130 130 b'linkrev': nullrev,
131 131 b'flags': 0,
132 132 }
133 133
134 134 for i, entry in enumerate(self._indexdata):
135 135 p1rev, p2rev = self.parentrevs(self.rev(entry[b'node']))
136 136
137 137 # start, length, rawsize, chainbase, linkrev, p1, p2, node
138 138 self._index.append((0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev,
139 139 entry[b'node']))
140 140
141 141 self._index.append((0, 0, 0, -1, -1, -1, -1, nullid))
142 142
143 143 def __len__(self):
144 144 return len(self._indexdata)
145 145
146 146 def __iter__(self):
147 147 return iter(range(len(self)))
148 148
149 149 def revs(self, start=0, stop=None):
150 150 step = 1
151 151 if stop is not None:
152 152 if start > stop:
153 153 step = -1
154 154
155 155 stop += step
156 156 else:
157 157 stop = len(self)
158 158
159 159 return range(start, stop, step)
160 160
161 161 def parents(self, node):
162 162 validatenode(node)
163 163
164 164 if node not in self._indexbynode:
165 165 raise KeyError('unknown node')
166 166
167 167 entry = self._indexbynode[node]
168 168
169 169 return entry[b'p1'], entry[b'p2']
170 170
171 171 def parentrevs(self, rev):
172 172 p1, p2 = self.parents(self._indexbyrev[rev][b'node'])
173 173 return self.rev(p1), self.rev(p2)
174 174
175 175 def rev(self, node):
176 176 validatenode(node)
177 177
178 178 try:
179 179 self._indexbynode[node]
180 180 except KeyError:
181 181 raise error.LookupError(node, self._indexpath, _('no node'))
182 182
183 183 for rev, entry in self._indexbyrev.items():
184 184 if entry[b'node'] == node:
185 185 return rev
186 186
187 187 raise error.ProgrammingError('this should not occur')
188 188
189 189 def node(self, rev):
190 190 validaterev(rev)
191 191
192 192 return self._indexbyrev[rev][b'node']
193 193
194 194 def lookup(self, node):
195 195 if isinstance(node, int):
196 196 return self.node(node)
197 197
198 198 if len(node) == 20:
199 199 self.rev(node)
200 200 return node
201 201
202 202 try:
203 203 rev = int(node)
204 204 if '%d' % rev != node:
205 205 raise ValueError
206 206
207 207 if rev < 0:
208 208 rev = len(self) + rev
209 209 if rev < 0 or rev >= len(self):
210 210 raise ValueError
211 211
212 212 return self.node(rev)
213 213 except (ValueError, OverflowError):
214 214 pass
215 215
216 216 if len(node) == 40:
217 217 try:
218 218 rawnode = bin(node)
219 219 self.rev(rawnode)
220 220 return rawnode
221 221 except TypeError:
222 222 pass
223 223
224 224 raise error.LookupError(node, self._path, _('invalid lookup input'))
225 225
226 226 def linkrev(self, rev):
227 227 validaterev(rev)
228 228
229 229 return self._indexbyrev[rev][b'linkrev']
230 230
231 231 def _flags(self, rev):
232 232 validaterev(rev)
233 233
234 234 return self._indexbyrev[rev][b'flags']
235 235
236 def deltaparent(self, rev):
237 validaterev(rev)
238
239 p1node = self.parents(self.node(rev))[0]
240 return self.rev(p1node)
241
242 236 def _candelta(self, baserev, rev):
243 237 validaterev(baserev)
244 238 validaterev(rev)
245 239
246 240 if ((self._flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)
247 241 or (self._flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)):
248 242 return False
249 243
250 244 return True
251 245
252 246 def _processflags(self, text, flags, operation, raw=False):
253 247 if flags == 0:
254 248 return text, True
255 249
256 250 if flags & ~revlog.REVIDX_KNOWN_FLAGS:
257 251 raise simplestoreerror(_("incompatible revision flag '%#x'") %
258 252 (flags & ~revlog.REVIDX_KNOWN_FLAGS))
259 253
260 254 validatehash = True
261 255 # Depending on the operation (read or write), the order might be
262 256 # reversed due to non-commutative transforms.
263 257 orderedflags = revlog.REVIDX_FLAGS_ORDER
264 258 if operation == 'write':
265 259 orderedflags = reversed(orderedflags)
266 260
267 261 for flag in orderedflags:
268 262 # If a flagprocessor has been registered for a known flag, apply the
269 263 # related operation transform and update result tuple.
270 264 if flag & flags:
271 265 vhash = True
272 266
273 267 if flag not in revlog._flagprocessors:
274 268 message = _("missing processor for flag '%#x'") % (flag)
275 269 raise simplestoreerror(message)
276 270
277 271 processor = revlog._flagprocessors[flag]
278 272 if processor is not None:
279 273 readtransform, writetransform, rawtransform = processor
280 274
281 275 if raw:
282 276 vhash = rawtransform(self, text)
283 277 elif operation == 'read':
284 278 text, vhash = readtransform(self, text)
285 279 else: # write operation
286 280 text, vhash = writetransform(self, text)
287 281 validatehash = validatehash and vhash
288 282
289 283 return text, validatehash
290 284
291 285 def checkhash(self, text, node, p1=None, p2=None, rev=None):
292 286 if p1 is None and p2 is None:
293 287 p1, p2 = self.parents(node)
294 288 if node != revlog.hash(text, p1, p2):
295 289 raise simplestoreerror(_("integrity check failed on %s") %
296 290 self._path)
297 291
298 292 def revision(self, node, raw=False):
299 293 validatenode(node)
300 294
301 295 if node == nullid:
302 296 return b''
303 297
304 298 rev = self.rev(node)
305 299 flags = self._flags(rev)
306 300
307 301 path = b'/'.join([self._storepath, hex(node)])
308 302 rawtext = self._svfs.read(path)
309 303
310 304 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
311 305 if validatehash:
312 306 self.checkhash(text, node, rev=rev)
313 307
314 308 return text
315 309
316 310 def read(self, node):
317 311 validatenode(node)
318 312
319 313 revision = self.revision(node)
320 314
321 315 if not revision.startswith(b'\1\n'):
322 316 return revision
323 317
324 318 start = revision.index(b'\1\n', 2)
325 319 return revision[start + 2:]
326 320
327 321 def renamed(self, node):
328 322 validatenode(node)
329 323
330 324 if self.parents(node)[0] != nullid:
331 325 return False
332 326
333 327 fulltext = self.revision(node)
334 328 m = revlog.parsemeta(fulltext)[0]
335 329
336 330 if m and 'copy' in m:
337 331 return m['copy'], bin(m['copyrev'])
338 332
339 333 return False
340 334
341 335 def cmp(self, node, text):
342 336 validatenode(node)
343 337
344 338 t = text
345 339
346 340 if text.startswith(b'\1\n'):
347 341 t = b'\1\n\1\n' + text
348 342
349 343 p1, p2 = self.parents(node)
350 344
351 345 if revlog.hash(t, p1, p2) == node:
352 346 return False
353 347
354 348 if self.iscensored(self.rev(node)):
355 349 return text != b''
356 350
357 351 if self.renamed(node):
358 352 t2 = self.read(node)
359 353 return t2 != text
360 354
361 355 return True
362 356
363 357 def size(self, rev):
364 358 validaterev(rev)
365 359
366 360 node = self._indexbyrev[rev][b'node']
367 361
368 362 if self.renamed(node):
369 363 return len(self.read(node))
370 364
371 365 if self.iscensored(rev):
372 366 return 0
373 367
374 368 return len(self.revision(node))
375 369
376 370 def iscensored(self, rev):
377 371 validaterev(rev)
378 372
379 373 return self._flags(rev) & revlog.REVIDX_ISCENSORED
380 374
381 375 def commonancestorsheads(self, a, b):
382 376 validatenode(a)
383 377 validatenode(b)
384 378
385 379 a = self.rev(a)
386 380 b = self.rev(b)
387 381
388 382 ancestors = ancestor.commonancestorsheads(self.parentrevs, a, b)
389 383 return pycompat.maplist(self.node, ancestors)
390 384
391 385 def descendants(self, revs):
392 386 # This is a copy of revlog.descendants()
393 387 first = min(revs)
394 388 if first == nullrev:
395 389 for i in self:
396 390 yield i
397 391 return
398 392
399 393 seen = set(revs)
400 394 for i in self.revs(start=first + 1):
401 395 for x in self.parentrevs(i):
402 396 if x != nullrev and x in seen:
403 397 seen.add(i)
404 398 yield i
405 399 break
406 400
407 401 # Required by verify.
408 402 def files(self):
409 403 entries = self._svfs.listdir(self._storepath)
410 404
411 405 # Strip out undo.backup.* files created as part of transaction
412 406 # recording.
413 407 entries = [f for f in entries if not f.startswith('undo.backup.')]
414 408
415 409 return [b'/'.join((self._storepath, f)) for f in entries]
416 410
417 411 def add(self, text, meta, transaction, linkrev, p1, p2):
418 412 if meta or text.startswith(b'\1\n'):
419 413 text = revlog.packmeta(meta, text)
420 414
421 415 return self.addrevision(text, transaction, linkrev, p1, p2)
422 416
423 417 def addrevision(self, text, transaction, linkrev, p1, p2, node=None,
424 418 flags=revlog.REVIDX_DEFAULT_FLAGS, cachedelta=None):
425 419 validatenode(p1)
426 420 validatenode(p2)
427 421
428 422 if flags:
429 423 node = node or revlog.hash(text, p1, p2)
430 424
431 425 rawtext, validatehash = self._processflags(text, flags, 'write')
432 426
433 427 node = node or revlog.hash(text, p1, p2)
434 428
435 429 if node in self._indexbynode:
436 430 return node
437 431
438 432 if validatehash:
439 433 self.checkhash(rawtext, node, p1=p1, p2=p2)
440 434
441 435 return self._addrawrevision(node, rawtext, transaction, linkrev, p1, p2,
442 436 flags)
443 437
444 438 def _addrawrevision(self, node, rawtext, transaction, link, p1, p2, flags):
445 439 transaction.addbackup(self._indexpath)
446 440
447 441 path = b'/'.join([self._storepath, hex(node)])
448 442
449 443 self._svfs.write(path, rawtext)
450 444
451 445 self._indexdata.append({
452 446 b'node': node,
453 447 b'p1': p1,
454 448 b'p2': p2,
455 449 b'linkrev': link,
456 450 b'flags': flags,
457 451 })
458 452
459 453 self._reflectindexupdate()
460 454
461 455 return node
462 456
463 457 def _reflectindexupdate(self):
464 458 self._refreshindex()
465 459 self._svfs.write(self._indexpath, cbor.dumps(self._indexdata))
466 460
467 461 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
468 462 nodes = []
469 463
470 464 transaction.addbackup(self._indexpath)
471 465
472 466 for node, p1, p2, linknode, deltabase, delta, flags in deltas:
473 467 linkrev = linkmapper(linknode)
474 468 flags = flags or revlog.REVIDX_DEFAULT_FLAGS
475 469
476 470 nodes.append(node)
477 471
478 472 if node in self._indexbynode:
479 473 continue
480 474
481 475 # Need to resolve the fulltext from the delta base.
482 476 if deltabase == nullid:
483 477 text = mdiff.patch(b'', delta)
484 478 else:
485 479 text = mdiff.patch(self.revision(deltabase), delta)
486 480
487 481 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
488 482 flags)
489 483
490 484 if addrevisioncb:
491 485 addrevisioncb(self, node)
492 486
493 487 return nodes
494 488
495 489 def revdiff(self, rev1, rev2):
496 490 validaterev(rev1)
497 491 validaterev(rev2)
498 492
499 493 node1 = self.node(rev1)
500 494 node2 = self.node(rev2)
501 495
502 496 return mdiff.textdiff(self.revision(node1, raw=True),
503 497 self.revision(node2, raw=True))
504 498
505 499 def heads(self, start=None, stop=None):
506 500 # This is copied from revlog.py.
507 501 if start is None and stop is None:
508 502 if not len(self):
509 503 return [nullid]
510 504 return [self.node(r) for r in self.headrevs()]
511 505
512 506 if start is None:
513 507 start = nullid
514 508 if stop is None:
515 509 stop = []
516 510 stoprevs = set([self.rev(n) for n in stop])
517 511 startrev = self.rev(start)
518 512 reachable = {startrev}
519 513 heads = {startrev}
520 514
521 515 parentrevs = self.parentrevs
522 516 for r in self.revs(start=startrev + 1):
523 517 for p in parentrevs(r):
524 518 if p in reachable:
525 519 if r not in stoprevs:
526 520 reachable.add(r)
527 521 heads.add(r)
528 522 if p in heads and p not in stoprevs:
529 523 heads.remove(p)
530 524
531 525 return [self.node(r) for r in heads]
532 526
533 527 def children(self, node):
534 528 validatenode(node)
535 529
536 530 # This is a copy of revlog.children().
537 531 c = []
538 532 p = self.rev(node)
539 533 for r in self.revs(start=p + 1):
540 534 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
541 535 if prevs:
542 536 for pr in prevs:
543 537 if pr == p:
544 538 c.append(self.node(r))
545 539 elif p == nullrev:
546 540 c.append(self.node(r))
547 541 return c
548 542
549 543 def getstrippoint(self, minlink):
550 544
551 545 # This is largely a copy of revlog.getstrippoint().
552 546 brokenrevs = set()
553 547 strippoint = len(self)
554 548
555 549 heads = {}
556 550 futurelargelinkrevs = set()
557 551 for head in self.heads():
558 552 headlinkrev = self.linkrev(self.rev(head))
559 553 heads[head] = headlinkrev
560 554 if headlinkrev >= minlink:
561 555 futurelargelinkrevs.add(headlinkrev)
562 556
563 557 # This algorithm involves walking down the rev graph, starting at the
564 558 # heads. Since the revs are topologically sorted according to linkrev,
565 559 # once all head linkrevs are below the minlink, we know there are
566 560 # no more revs that could have a linkrev greater than minlink.
567 561 # So we can stop walking.
568 562 while futurelargelinkrevs:
569 563 strippoint -= 1
570 564 linkrev = heads.pop(strippoint)
571 565
572 566 if linkrev < minlink:
573 567 brokenrevs.add(strippoint)
574 568 else:
575 569 futurelargelinkrevs.remove(linkrev)
576 570
577 571 for p in self.parentrevs(strippoint):
578 572 if p != nullrev:
579 573 plinkrev = self.linkrev(p)
580 574 heads[p] = plinkrev
581 575 if plinkrev >= minlink:
582 576 futurelargelinkrevs.add(plinkrev)
583 577
584 578 return strippoint, brokenrevs
585 579
586 580 def strip(self, minlink, transaction):
587 581 if not len(self):
588 582 return
589 583
590 584 rev, _ignored = self.getstrippoint(minlink)
591 585 if rev == len(self):
592 586 return
593 587
594 588 # Purge index data starting at the requested revision.
595 589 self._indexdata[rev:] = []
596 590 self._reflectindexupdate()
597 591
598 592 def issimplestorefile(f, kind, st):
599 593 if kind != stat.S_IFREG:
600 594 return False
601 595
602 596 if store.isrevlog(f, kind, st):
603 597 return False
604 598
605 599 # Ignore transaction undo files.
606 600 if f.startswith('undo.'):
607 601 return False
608 602
609 603 # Otherwise assume it belongs to the simple store.
610 604 return True
611 605
612 606 class simplestore(store.encodedstore):
613 607 def datafiles(self):
614 608 for x in super(simplestore, self).datafiles():
615 609 yield x
616 610
617 611 # Supplement with non-revlog files.
618 612 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
619 613
620 614 for unencoded, encoded, size in extrafiles:
621 615 try:
622 616 unencoded = store.decodefilename(unencoded)
623 617 except KeyError:
624 618 unencoded = None
625 619
626 620 yield unencoded, encoded, size
627 621
628 622 def reposetup(ui, repo):
629 623 if not repo.local():
630 624 return
631 625
632 626 if isinstance(repo, bundlerepo.bundlerepository):
633 627 raise error.Abort(_('cannot use simple store with bundlerepo'))
634 628
635 629 class simplestorerepo(repo.__class__):
636 630 def file(self, f):
637 631 return filestorage(self.svfs, f)
638 632
639 633 repo.__class__ = simplestorerepo
640 634
641 635 def featuresetup(ui, supported):
642 636 supported.add(REQUIREMENT)
643 637
644 638 def newreporequirements(orig, ui):
645 639 """Modifies default requirements for new repos to use the simple store."""
646 640 requirements = orig(ui)
647 641
648 642 # These requirements are only used to affect creation of the store
649 643 # object. We have our own store. So we can remove them.
650 644 # TODO do this once we feel like taking the test hit.
651 645 #if 'fncache' in requirements:
652 646 # requirements.remove('fncache')
653 647 #if 'dotencode' in requirements:
654 648 # requirements.remove('dotencode')
655 649
656 650 requirements.add(REQUIREMENT)
657 651
658 652 return requirements
659 653
660 654 def makestore(orig, requirements, path, vfstype):
661 655 if REQUIREMENT not in requirements:
662 656 return orig(requirements, path, vfstype)
663 657
664 658 return simplestore(path, vfstype)
665 659
666 660 def verifierinit(orig, self, *args, **kwargs):
667 661 orig(self, *args, **kwargs)
668 662
669 663 # We don't care that files in the store don't align with what is
670 664 # advertised. So suppress these warnings.
671 665 self.warnorphanstorefiles = False
672 666
673 667 def extsetup(ui):
674 668 localrepo.featuresetupfuncs.add(featuresetup)
675 669
676 670 extensions.wrapfunction(localrepo, 'newreporequirements',
677 671 newreporequirements)
678 672 extensions.wrapfunction(store, 'store', makestore)
679 673 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
General Comments 0
You need to be logged in to leave comments. Login now