##// END OF EJS Templates
filelog: remove revdiff() (API)...
Gregory Szorc -
r40033:2f80eaf3 default
parent child Browse files
Show More
@@ -1,252 +1,249
1 1 # filelog.py - file history class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from . import (
11 11 error,
12 12 repository,
13 13 revlog,
14 14 )
15 15 from .utils import (
16 16 interfaceutil,
17 17 storageutil,
18 18 )
19 19
20 20 @interfaceutil.implementer(repository.ifilestorage)
21 21 class filelog(object):
22 22 def __init__(self, opener, path):
23 23 self._revlog = revlog.revlog(opener,
24 24 '/'.join(('data', path + '.i')),
25 25 censorable=True)
26 26 # Full name of the user visible file, relative to the repository root.
27 27 # Used by LFS.
28 28 self._revlog.filename = path
29 29
30 30 def __len__(self):
31 31 return len(self._revlog)
32 32
33 33 def __iter__(self):
34 34 return self._revlog.__iter__()
35 35
36 36 def revs(self, start=0, stop=None):
37 37 return self._revlog.revs(start=start, stop=stop)
38 38
39 39 def parents(self, node):
40 40 return self._revlog.parents(node)
41 41
42 42 def parentrevs(self, rev):
43 43 return self._revlog.parentrevs(rev)
44 44
45 45 def rev(self, node):
46 46 return self._revlog.rev(node)
47 47
48 48 def node(self, rev):
49 49 return self._revlog.node(rev)
50 50
51 51 def lookup(self, node):
52 52 return self._revlog.lookup(node)
53 53
54 54 def linkrev(self, rev):
55 55 return self._revlog.linkrev(rev)
56 56
57 57 def commonancestorsheads(self, node1, node2):
58 58 return self._revlog.commonancestorsheads(node1, node2)
59 59
60 60 # Used by dagop.blockdescendants().
61 61 def descendants(self, revs):
62 62 return self._revlog.descendants(revs)
63 63
64 64 def heads(self, start=None, stop=None):
65 65 return self._revlog.heads(start, stop)
66 66
67 67 # Used by hgweb, children extension.
68 68 def children(self, node):
69 69 return self._revlog.children(node)
70 70
71 71 def iscensored(self, rev):
72 72 return self._revlog.iscensored(rev)
73 73
74 74 # Might be unused.
75 75 def checkhash(self, text, node, p1=None, p2=None, rev=None):
76 76 return self._revlog.checkhash(text, node, p1=p1, p2=p2, rev=rev)
77 77
78 78 def revision(self, node, _df=None, raw=False):
79 79 return self._revlog.revision(node, _df=_df, raw=raw)
80 80
81 def revdiff(self, rev1, rev2):
82 return self._revlog.revdiff(rev1, rev2)
83
84 81 def emitrevisions(self, nodes, nodesorder=None,
85 82 revisiondata=False, assumehaveparentrevisions=False,
86 83 deltaprevious=False):
87 84 return self._revlog.emitrevisions(
88 85 nodes, nodesorder=nodesorder, revisiondata=revisiondata,
89 86 assumehaveparentrevisions=assumehaveparentrevisions,
90 87 deltaprevious=deltaprevious)
91 88
92 89 def addrevision(self, revisiondata, transaction, linkrev, p1, p2,
93 90 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS,
94 91 cachedelta=None):
95 92 return self._revlog.addrevision(revisiondata, transaction, linkrev,
96 93 p1, p2, node=node, flags=flags,
97 94 cachedelta=cachedelta)
98 95
99 96 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
100 97 return self._revlog.addgroup(deltas, linkmapper, transaction,
101 98 addrevisioncb=addrevisioncb)
102 99
103 100 def getstrippoint(self, minlink):
104 101 return self._revlog.getstrippoint(minlink)
105 102
106 103 def strip(self, minlink, transaction):
107 104 return self._revlog.strip(minlink, transaction)
108 105
109 106 def censorrevision(self, tr, node, tombstone=b''):
110 107 return self._revlog.censorrevision(node, tombstone=tombstone)
111 108
112 109 def files(self):
113 110 return self._revlog.files()
114 111
115 112 def read(self, node):
116 113 return storageutil.filtermetadata(self.revision(node))
117 114
118 115 def add(self, text, meta, transaction, link, p1=None, p2=None):
119 116 if meta or text.startswith('\1\n'):
120 117 text = storageutil.packmeta(meta, text)
121 118 return self.addrevision(text, transaction, link, p1, p2)
122 119
123 120 def renamed(self, node):
124 121 if self.parents(node)[0] != revlog.nullid:
125 122 return False
126 123 t = self.revision(node)
127 124 m = storageutil.parsemeta(t)[0]
128 125 # copy and copyrev occur in pairs. In rare cases due to bugs,
129 126 # one can occur without the other.
130 127 if m and "copy" in m and "copyrev" in m:
131 128 return (m["copy"], revlog.bin(m["copyrev"]))
132 129 return False
133 130
134 131 def size(self, rev):
135 132 """return the size of a given revision"""
136 133
137 134 # for revisions with renames, we have to go the slow way
138 135 node = self.node(rev)
139 136 if self.renamed(node):
140 137 return len(self.read(node))
141 138 if self.iscensored(rev):
142 139 return 0
143 140
144 141 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
145 142 return self._revlog.size(rev)
146 143
147 144 def cmp(self, node, text):
148 145 """compare text with a given file revision
149 146
150 147 returns True if text is different than what is stored.
151 148 """
152 149
153 150 t = text
154 151 if text.startswith('\1\n'):
155 152 t = '\1\n\1\n' + text
156 153
157 154 samehashes = not self._revlog.cmp(node, t)
158 155 if samehashes:
159 156 return False
160 157
161 158 # censored files compare against the empty file
162 159 if self.iscensored(self.rev(node)):
163 160 return text != ''
164 161
165 162 # renaming a file produces a different hash, even if the data
166 163 # remains unchanged. Check if it's the case (slow):
167 164 if self.renamed(node):
168 165 t2 = self.read(node)
169 166 return t2 != text
170 167
171 168 return True
172 169
173 170 def verifyintegrity(self, state):
174 171 return self._revlog.verifyintegrity(state)
175 172
176 173 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
177 174 revisionscount=False, trackedsize=False,
178 175 storedsize=False):
179 176 return self._revlog.storageinfo(
180 177 exclusivefiles=exclusivefiles, sharedfiles=sharedfiles,
181 178 revisionscount=revisionscount, trackedsize=trackedsize,
182 179 storedsize=storedsize)
183 180
184 181 # TODO these aren't part of the interface and aren't internal methods.
185 182 # Callers should be fixed to not use them.
186 183
187 184 # Used by bundlefilelog, unionfilelog.
188 185 @property
189 186 def indexfile(self):
190 187 return self._revlog.indexfile
191 188
192 189 @indexfile.setter
193 190 def indexfile(self, value):
194 191 self._revlog.indexfile = value
195 192
196 193 # Used by repo upgrade.
197 194 def clone(self, tr, destrevlog, **kwargs):
198 195 if not isinstance(destrevlog, filelog):
199 196 raise error.ProgrammingError('expected filelog to clone()')
200 197
201 198 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
202 199
203 200 class narrowfilelog(filelog):
204 201 """Filelog variation to be used with narrow stores."""
205 202
206 203 def __init__(self, opener, path, narrowmatch):
207 204 super(narrowfilelog, self).__init__(opener, path)
208 205 self._narrowmatch = narrowmatch
209 206
210 207 def renamed(self, node):
211 208 res = super(narrowfilelog, self).renamed(node)
212 209
213 210 # Renames that come from outside the narrowspec are problematic
214 211 # because we may lack the base text for the rename. This can result
215 212 # in code attempting to walk the ancestry or compute a diff
216 213 # encountering a missing revision. We address this by silently
217 214 # removing rename metadata if the source file is outside the
218 215 # narrow spec.
219 216 #
220 217 # A better solution would be to see if the base revision is available,
221 218 # rather than assuming it isn't.
222 219 #
223 220 # An even better solution would be to teach all consumers of rename
224 221 # metadata that the base revision may not be available.
225 222 #
226 223 # TODO consider better ways of doing this.
227 224 if res and not self._narrowmatch(res[0]):
228 225 return None
229 226
230 227 return res
231 228
232 229 def size(self, rev):
233 230 # Because we have a custom renamed() that may lie, we need to call
234 231 # the base renamed() to report accurate results.
235 232 node = self.node(rev)
236 233 if super(narrowfilelog, self).renamed(node):
237 234 return len(self.read(node))
238 235 else:
239 236 return super(narrowfilelog, self).size(rev)
240 237
241 238 def cmp(self, node, text):
242 239 different = super(narrowfilelog, self).cmp(node, text)
243 240
244 241 # Because renamed() may lie, we may get false positives for
245 242 # different content. Check for this by comparing against the original
246 243 # renamed() implementation.
247 244 if different:
248 245 if super(narrowfilelog, self).renamed(node):
249 246 t2 = self.read(node)
250 247 return t2 != text
251 248
252 249 return different
@@ -1,1677 +1,1668
1 1 # repository.py - Interfaces and base classes for repositories and peers.
2 2 #
3 3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from .i18n import _
11 11 from . import (
12 12 error,
13 13 )
14 14 from .utils import (
15 15 interfaceutil,
16 16 )
17 17
18 18 # When narrowing is finalized and no longer subject to format changes,
19 19 # we should move this to just "narrow" or similar.
20 20 NARROW_REQUIREMENT = 'narrowhg-experimental'
21 21
22 22 # Local repository feature string.
23 23
24 24 # Revlogs are being used for file storage.
25 25 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
26 26 # The storage part of the repository is shared from an external source.
27 27 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
28 28 # LFS supported for backing file storage.
29 29 REPO_FEATURE_LFS = b'lfs'
30 30
31 31 class ipeerconnection(interfaceutil.Interface):
32 32 """Represents a "connection" to a repository.
33 33
34 34 This is the base interface for representing a connection to a repository.
35 35 It holds basic properties and methods applicable to all peer types.
36 36
37 37 This is not a complete interface definition and should not be used
38 38 outside of this module.
39 39 """
40 40 ui = interfaceutil.Attribute("""ui.ui instance""")
41 41
42 42 def url():
43 43 """Returns a URL string representing this peer.
44 44
45 45 Currently, implementations expose the raw URL used to construct the
46 46 instance. It may contain credentials as part of the URL. The
47 47 expectations of the value aren't well-defined and this could lead to
48 48 data leakage.
49 49
50 50 TODO audit/clean consumers and more clearly define the contents of this
51 51 value.
52 52 """
53 53
54 54 def local():
55 55 """Returns a local repository instance.
56 56
57 57 If the peer represents a local repository, returns an object that
58 58 can be used to interface with it. Otherwise returns ``None``.
59 59 """
60 60
61 61 def peer():
62 62 """Returns an object conforming to this interface.
63 63
64 64 Most implementations will ``return self``.
65 65 """
66 66
67 67 def canpush():
68 68 """Returns a boolean indicating if this peer can be pushed to."""
69 69
70 70 def close():
71 71 """Close the connection to this peer.
72 72
73 73 This is called when the peer will no longer be used. Resources
74 74 associated with the peer should be cleaned up.
75 75 """
76 76
77 77 class ipeercapabilities(interfaceutil.Interface):
78 78 """Peer sub-interface related to capabilities."""
79 79
80 80 def capable(name):
81 81 """Determine support for a named capability.
82 82
83 83 Returns ``False`` if capability not supported.
84 84
85 85 Returns ``True`` if boolean capability is supported. Returns a string
86 86 if capability support is non-boolean.
87 87
88 88 Capability strings may or may not map to wire protocol capabilities.
89 89 """
90 90
91 91 def requirecap(name, purpose):
92 92 """Require a capability to be present.
93 93
94 94 Raises a ``CapabilityError`` if the capability isn't present.
95 95 """
96 96
97 97 class ipeercommands(interfaceutil.Interface):
98 98 """Client-side interface for communicating over the wire protocol.
99 99
100 100 This interface is used as a gateway to the Mercurial wire protocol.
101 101 methods commonly call wire protocol commands of the same name.
102 102 """
103 103
104 104 def branchmap():
105 105 """Obtain heads in named branches.
106 106
107 107 Returns a dict mapping branch name to an iterable of nodes that are
108 108 heads on that branch.
109 109 """
110 110
111 111 def capabilities():
112 112 """Obtain capabilities of the peer.
113 113
114 114 Returns a set of string capabilities.
115 115 """
116 116
117 117 def clonebundles():
118 118 """Obtains the clone bundles manifest for the repo.
119 119
120 120 Returns the manifest as unparsed bytes.
121 121 """
122 122
123 123 def debugwireargs(one, two, three=None, four=None, five=None):
124 124 """Used to facilitate debugging of arguments passed over the wire."""
125 125
126 126 def getbundle(source, **kwargs):
127 127 """Obtain remote repository data as a bundle.
128 128
129 129 This command is how the bulk of repository data is transferred from
130 130 the peer to the local repository
131 131
132 132 Returns a generator of bundle data.
133 133 """
134 134
135 135 def heads():
136 136 """Determine all known head revisions in the peer.
137 137
138 138 Returns an iterable of binary nodes.
139 139 """
140 140
141 141 def known(nodes):
142 142 """Determine whether multiple nodes are known.
143 143
144 144 Accepts an iterable of nodes whose presence to check for.
145 145
146 146 Returns an iterable of booleans indicating of the corresponding node
147 147 at that index is known to the peer.
148 148 """
149 149
150 150 def listkeys(namespace):
151 151 """Obtain all keys in a pushkey namespace.
152 152
153 153 Returns an iterable of key names.
154 154 """
155 155
156 156 def lookup(key):
157 157 """Resolve a value to a known revision.
158 158
159 159 Returns a binary node of the resolved revision on success.
160 160 """
161 161
162 162 def pushkey(namespace, key, old, new):
163 163 """Set a value using the ``pushkey`` protocol.
164 164
165 165 Arguments correspond to the pushkey namespace and key to operate on and
166 166 the old and new values for that key.
167 167
168 168 Returns a string with the peer result. The value inside varies by the
169 169 namespace.
170 170 """
171 171
172 172 def stream_out():
173 173 """Obtain streaming clone data.
174 174
175 175 Successful result should be a generator of data chunks.
176 176 """
177 177
178 178 def unbundle(bundle, heads, url):
179 179 """Transfer repository data to the peer.
180 180
181 181 This is how the bulk of data during a push is transferred.
182 182
183 183 Returns the integer number of heads added to the peer.
184 184 """
185 185
186 186 class ipeerlegacycommands(interfaceutil.Interface):
187 187 """Interface for implementing support for legacy wire protocol commands.
188 188
189 189 Wire protocol commands transition to legacy status when they are no longer
190 190 used by modern clients. To facilitate identifying which commands are
191 191 legacy, the interfaces are split.
192 192 """
193 193
194 194 def between(pairs):
195 195 """Obtain nodes between pairs of nodes.
196 196
197 197 ``pairs`` is an iterable of node pairs.
198 198
199 199 Returns an iterable of iterables of nodes corresponding to each
200 200 requested pair.
201 201 """
202 202
203 203 def branches(nodes):
204 204 """Obtain ancestor changesets of specific nodes back to a branch point.
205 205
206 206 For each requested node, the peer finds the first ancestor node that is
207 207 a DAG root or is a merge.
208 208
209 209 Returns an iterable of iterables with the resolved values for each node.
210 210 """
211 211
212 212 def changegroup(nodes, source):
213 213 """Obtain a changegroup with data for descendants of specified nodes."""
214 214
215 215 def changegroupsubset(bases, heads, source):
216 216 pass
217 217
218 218 class ipeercommandexecutor(interfaceutil.Interface):
219 219 """Represents a mechanism to execute remote commands.
220 220
221 221 This is the primary interface for requesting that wire protocol commands
222 222 be executed. Instances of this interface are active in a context manager
223 223 and have a well-defined lifetime. When the context manager exits, all
224 224 outstanding requests are waited on.
225 225 """
226 226
227 227 def callcommand(name, args):
228 228 """Request that a named command be executed.
229 229
230 230 Receives the command name and a dictionary of command arguments.
231 231
232 232 Returns a ``concurrent.futures.Future`` that will resolve to the
233 233 result of that command request. That exact value is left up to
234 234 the implementation and possibly varies by command.
235 235
236 236 Not all commands can coexist with other commands in an executor
237 237 instance: it depends on the underlying wire protocol transport being
238 238 used and the command itself.
239 239
240 240 Implementations MAY call ``sendcommands()`` automatically if the
241 241 requested command can not coexist with other commands in this executor.
242 242
243 243 Implementations MAY call ``sendcommands()`` automatically when the
244 244 future's ``result()`` is called. So, consumers using multiple
245 245 commands with an executor MUST ensure that ``result()`` is not called
246 246 until all command requests have been issued.
247 247 """
248 248
249 249 def sendcommands():
250 250 """Trigger submission of queued command requests.
251 251
252 252 Not all transports submit commands as soon as they are requested to
253 253 run. When called, this method forces queued command requests to be
254 254 issued. It will no-op if all commands have already been sent.
255 255
256 256 When called, no more new commands may be issued with this executor.
257 257 """
258 258
259 259 def close():
260 260 """Signal that this command request is finished.
261 261
262 262 When called, no more new commands may be issued. All outstanding
263 263 commands that have previously been issued are waited on before
264 264 returning. This not only includes waiting for the futures to resolve,
265 265 but also waiting for all response data to arrive. In other words,
266 266 calling this waits for all on-wire state for issued command requests
267 267 to finish.
268 268
269 269 When used as a context manager, this method is called when exiting the
270 270 context manager.
271 271
272 272 This method may call ``sendcommands()`` if there are buffered commands.
273 273 """
274 274
275 275 class ipeerrequests(interfaceutil.Interface):
276 276 """Interface for executing commands on a peer."""
277 277
278 278 def commandexecutor():
279 279 """A context manager that resolves to an ipeercommandexecutor.
280 280
281 281 The object this resolves to can be used to issue command requests
282 282 to the peer.
283 283
284 284 Callers should call its ``callcommand`` method to issue command
285 285 requests.
286 286
287 287 A new executor should be obtained for each distinct set of commands
288 288 (possibly just a single command) that the consumer wants to execute
289 289 as part of a single operation or round trip. This is because some
290 290 peers are half-duplex and/or don't support persistent connections.
291 291 e.g. in the case of HTTP peers, commands sent to an executor represent
292 292 a single HTTP request. While some peers may support multiple command
293 293 sends over the wire per executor, consumers need to code to the least
294 294 capable peer. So it should be assumed that command executors buffer
295 295 called commands until they are told to send them and that each
296 296 command executor could result in a new connection or wire-level request
297 297 being issued.
298 298 """
299 299
300 300 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
301 301 """Unified interface for peer repositories.
302 302
303 303 All peer instances must conform to this interface.
304 304 """
305 305
306 306 @interfaceutil.implementer(ipeerbase)
307 307 class peer(object):
308 308 """Base class for peer repositories."""
309 309
310 310 def capable(self, name):
311 311 caps = self.capabilities()
312 312 if name in caps:
313 313 return True
314 314
315 315 name = '%s=' % name
316 316 for cap in caps:
317 317 if cap.startswith(name):
318 318 return cap[len(name):]
319 319
320 320 return False
321 321
322 322 def requirecap(self, name, purpose):
323 323 if self.capable(name):
324 324 return
325 325
326 326 raise error.CapabilityError(
327 327 _('cannot %s; remote repository does not support the %r '
328 328 'capability') % (purpose, name))
329 329
330 330 class iverifyproblem(interfaceutil.Interface):
331 331 """Represents a problem with the integrity of the repository.
332 332
333 333 Instances of this interface are emitted to describe an integrity issue
334 334 with a repository (e.g. corrupt storage, missing data, etc).
335 335
336 336 Instances are essentially messages associated with severity.
337 337 """
338 338 warning = interfaceutil.Attribute(
339 339 """Message indicating a non-fatal problem.""")
340 340
341 341 error = interfaceutil.Attribute(
342 342 """Message indicating a fatal problem.""")
343 343
344 344 node = interfaceutil.Attribute(
345 345 """Revision encountering the problem.
346 346
347 347 ``None`` means the problem doesn't apply to a single revision.
348 348 """)
349 349
350 350 class irevisiondelta(interfaceutil.Interface):
351 351 """Represents a delta between one revision and another.
352 352
353 353 Instances convey enough information to allow a revision to be exchanged
354 354 with another repository.
355 355
356 356 Instances represent the fulltext revision data or a delta against
357 357 another revision. Therefore the ``revision`` and ``delta`` attributes
358 358 are mutually exclusive.
359 359
360 360 Typically used for changegroup generation.
361 361 """
362 362
363 363 node = interfaceutil.Attribute(
364 364 """20 byte node of this revision.""")
365 365
366 366 p1node = interfaceutil.Attribute(
367 367 """20 byte node of 1st parent of this revision.""")
368 368
369 369 p2node = interfaceutil.Attribute(
370 370 """20 byte node of 2nd parent of this revision.""")
371 371
372 372 linknode = interfaceutil.Attribute(
373 373 """20 byte node of the changelog revision this node is linked to.""")
374 374
375 375 flags = interfaceutil.Attribute(
376 376 """2 bytes of integer flags that apply to this revision.""")
377 377
378 378 basenode = interfaceutil.Attribute(
379 379 """20 byte node of the revision this data is a delta against.
380 380
381 381 ``nullid`` indicates that the revision is a full revision and not
382 382 a delta.
383 383 """)
384 384
385 385 baserevisionsize = interfaceutil.Attribute(
386 386 """Size of base revision this delta is against.
387 387
388 388 May be ``None`` if ``basenode`` is ``nullid``.
389 389 """)
390 390
391 391 revision = interfaceutil.Attribute(
392 392 """Raw fulltext of revision data for this node.""")
393 393
394 394 delta = interfaceutil.Attribute(
395 395 """Delta between ``basenode`` and ``node``.
396 396
397 397 Stored in the bdiff delta format.
398 398 """)
399 399
400 400 class ifilerevisionssequence(interfaceutil.Interface):
401 401 """Contains index data for all revisions of a file.
402 402
403 403 Types implementing this behave like lists of tuples. The index
404 404 in the list corresponds to the revision number. The values contain
405 405 index metadata.
406 406
407 407 The *null* revision (revision number -1) is always the last item
408 408 in the index.
409 409 """
410 410
411 411 def __len__():
412 412 """The total number of revisions."""
413 413
414 414 def __getitem__(rev):
415 415 """Returns the object having a specific revision number.
416 416
417 417 Returns an 8-tuple with the following fields:
418 418
419 419 offset+flags
420 420 Contains the offset and flags for the revision. 64-bit unsigned
421 421 integer where first 6 bytes are the offset and the next 2 bytes
422 422 are flags. The offset can be 0 if it is not used by the store.
423 423 compressed size
424 424 Size of the revision data in the store. It can be 0 if it isn't
425 425 needed by the store.
426 426 uncompressed size
427 427 Fulltext size. It can be 0 if it isn't needed by the store.
428 428 base revision
429 429 Revision number of revision the delta for storage is encoded
430 430 against. -1 indicates not encoded against a base revision.
431 431 link revision
432 432 Revision number of changelog revision this entry is related to.
433 433 p1 revision
434 434 Revision number of 1st parent. -1 if no 1st parent.
435 435 p2 revision
436 436 Revision number of 2nd parent. -1 if no 1st parent.
437 437 node
438 438 Binary node value for this revision number.
439 439
440 440 Negative values should index off the end of the sequence. ``-1``
441 441 should return the null revision. ``-2`` should return the most
442 442 recent revision.
443 443 """
444 444
445 445 def __contains__(rev):
446 446 """Whether a revision number exists."""
447 447
448 448 def insert(self, i, entry):
449 449 """Add an item to the index at specific revision."""
450 450
451 451 class ifileindex(interfaceutil.Interface):
452 452 """Storage interface for index data of a single file.
453 453
454 454 File storage data is divided into index metadata and data storage.
455 455 This interface defines the index portion of the interface.
456 456
457 457 The index logically consists of:
458 458
459 459 * A mapping between revision numbers and nodes.
460 460 * DAG data (storing and querying the relationship between nodes).
461 461 * Metadata to facilitate storage.
462 462 """
463 463 def __len__():
464 464 """Obtain the number of revisions stored for this file."""
465 465
466 466 def __iter__():
467 467 """Iterate over revision numbers for this file."""
468 468
469 469 def revs(start=0, stop=None):
470 470 """Iterate over revision numbers for this file, with control."""
471 471
472 472 def parents(node):
473 473 """Returns a 2-tuple of parent nodes for a revision.
474 474
475 475 Values will be ``nullid`` if the parent is empty.
476 476 """
477 477
478 478 def parentrevs(rev):
479 479 """Like parents() but operates on revision numbers."""
480 480
481 481 def rev(node):
482 482 """Obtain the revision number given a node.
483 483
484 484 Raises ``error.LookupError`` if the node is not known.
485 485 """
486 486
487 487 def node(rev):
488 488 """Obtain the node value given a revision number.
489 489
490 490 Raises ``IndexError`` if the node is not known.
491 491 """
492 492
493 493 def lookup(node):
494 494 """Attempt to resolve a value to a node.
495 495
496 496 Value can be a binary node, hex node, revision number, or a string
497 497 that can be converted to an integer.
498 498
499 499 Raises ``error.LookupError`` if a node could not be resolved.
500 500 """
501 501
502 502 def linkrev(rev):
503 503 """Obtain the changeset revision number a revision is linked to."""
504 504
505 505 def iscensored(rev):
506 506 """Return whether a revision's content has been censored."""
507 507
508 508 def commonancestorsheads(node1, node2):
509 509 """Obtain an iterable of nodes containing heads of common ancestors.
510 510
511 511 See ``ancestor.commonancestorsheads()``.
512 512 """
513 513
514 514 def descendants(revs):
515 515 """Obtain descendant revision numbers for a set of revision numbers.
516 516
517 517 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
518 518 """
519 519
520 520 def heads(start=None, stop=None):
521 521 """Obtain a list of nodes that are DAG heads, with control.
522 522
523 523 The set of revisions examined can be limited by specifying
524 524 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
525 525 iterable of nodes. DAG traversal starts at earlier revision
526 526 ``start`` and iterates forward until any node in ``stop`` is
527 527 encountered.
528 528 """
529 529
530 530 def children(node):
531 531 """Obtain nodes that are children of a node.
532 532
533 533 Returns a list of nodes.
534 534 """
535 535
536 536 class ifiledata(interfaceutil.Interface):
537 537 """Storage interface for data storage of a specific file.
538 538
539 539 This complements ``ifileindex`` and provides an interface for accessing
540 540 data for a tracked file.
541 541 """
542 542 def size(rev):
543 543 """Obtain the fulltext size of file data.
544 544
545 545 Any metadata is excluded from size measurements.
546 546 """
547 547
548 548 def checkhash(fulltext, node, p1=None, p2=None, rev=None):
549 549 """Validate the stored hash of a given fulltext and node.
550 550
551 551 Raises ``error.StorageError`` is hash validation fails.
552 552 """
553 553
554 554 def revision(node, raw=False):
555 555 """"Obtain fulltext data for a node.
556 556
557 557 By default, any storage transformations are applied before the data
558 558 is returned. If ``raw`` is True, non-raw storage transformations
559 559 are not applied.
560 560
561 561 The fulltext data may contain a header containing metadata. Most
562 562 consumers should use ``read()`` to obtain the actual file data.
563 563 """
564 564
565 565 def read(node):
566 566 """Resolve file fulltext data.
567 567
568 568 This is similar to ``revision()`` except any metadata in the data
569 569 headers is stripped.
570 570 """
571 571
572 572 def renamed(node):
573 573 """Obtain copy metadata for a node.
574 574
575 575 Returns ``False`` if no copy metadata is stored or a 2-tuple of
576 576 (path, node) from which this revision was copied.
577 577 """
578 578
579 579 def cmp(node, fulltext):
580 580 """Compare fulltext to another revision.
581 581
582 582 Returns True if the fulltext is different from what is stored.
583 583
584 584 This takes copy metadata into account.
585 585
586 586 TODO better document the copy metadata and censoring logic.
587 587 """
588 588
589 def revdiff(rev1, rev2):
590 """Obtain a delta between two revision numbers.
591
592 Operates on raw data in the store (``revision(node, raw=True)``).
593
594 The returned data is the result of ``bdiff.bdiff`` on the raw
595 revision data.
596 """
597
598 589 def emitrevisions(nodes,
599 590 nodesorder=None,
600 591 revisiondata=False,
601 592 assumehaveparentrevisions=False,
602 593 deltaprevious=False):
603 594 """Produce ``irevisiondelta`` for revisions.
604 595
605 596 Given an iterable of nodes, emits objects conforming to the
606 597 ``irevisiondelta`` interface that describe revisions in storage.
607 598
608 599 This method is a generator.
609 600
610 601 The input nodes may be unordered. Implementations must ensure that a
611 602 node's parents are emitted before the node itself. Transitively, this
612 603 means that a node may only be emitted once all its ancestors in
613 604 ``nodes`` have also been emitted.
614 605
615 606 By default, emits "index" data (the ``node``, ``p1node``, and
616 607 ``p2node`` attributes). If ``revisiondata`` is set, revision data
617 608 will also be present on the emitted objects.
618 609
619 610 With default argument values, implementations can choose to emit
620 611 either fulltext revision data or a delta. When emitting deltas,
621 612 implementations must consider whether the delta's base revision
622 613 fulltext is available to the receiver.
623 614
624 615 The base revision fulltext is guaranteed to be available if any of
625 616 the following are met:
626 617
627 618 * Its fulltext revision was emitted by this method call.
628 619 * A delta for that revision was emitted by this method call.
629 620 * ``assumehaveparentrevisions`` is True and the base revision is a
630 621 parent of the node.
631 622
632 623 ``nodesorder`` can be used to control the order that revisions are
633 624 emitted. By default, revisions can be reordered as long as they are
634 625 in DAG topological order (see above). If the value is ``nodes``,
635 626 the iteration order from ``nodes`` should be used. If the value is
636 627 ``storage``, then the native order from the backing storage layer
637 628 is used. (Not all storage layers will have strong ordering and behavior
638 629 of this mode is storage-dependent.) ``nodes`` ordering can force
639 630 revisions to be emitted before their ancestors, so consumers should
640 631 use it with care.
641 632
642 633 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
643 634 be set and it is the caller's responsibility to resolve it, if needed.
644 635
645 636 If ``deltaprevious`` is True and revision data is requested, all
646 637 revision data should be emitted as deltas against the revision
647 638 emitted just prior. The initial revision should be a delta against
648 639 its 1st parent.
649 640 """
650 641
651 642 class ifilemutation(interfaceutil.Interface):
652 643 """Storage interface for mutation events of a tracked file."""
653 644
654 645 def add(filedata, meta, transaction, linkrev, p1, p2):
655 646 """Add a new revision to the store.
656 647
657 648 Takes file data, dictionary of metadata, a transaction, linkrev,
658 649 and parent nodes.
659 650
660 651 Returns the node that was added.
661 652
662 653 May no-op if a revision matching the supplied data is already stored.
663 654 """
664 655
665 656 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
666 657 flags=0, cachedelta=None):
667 658 """Add a new revision to the store.
668 659
669 660 This is similar to ``add()`` except it operates at a lower level.
670 661
671 662 The data passed in already contains a metadata header, if any.
672 663
673 664 ``node`` and ``flags`` can be used to define the expected node and
674 665 the flags to use with storage.
675 666
676 667 ``add()`` is usually called when adding files from e.g. the working
677 668 directory. ``addrevision()`` is often called by ``add()`` and for
678 669 scenarios where revision data has already been computed, such as when
679 670 applying raw data from a peer repo.
680 671 """
681 672
682 673 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
683 674 """Process a series of deltas for storage.
684 675
685 676 ``deltas`` is an iterable of 7-tuples of
686 677 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
687 678 to add.
688 679
689 680 The ``delta`` field contains ``mpatch`` data to apply to a base
690 681 revision, identified by ``deltabase``. The base node can be
691 682 ``nullid``, in which case the header from the delta can be ignored
692 683 and the delta used as the fulltext.
693 684
694 685 ``addrevisioncb`` should be called for each node as it is committed.
695 686
696 687 Returns a list of nodes that were processed. A node will be in the list
697 688 even if it existed in the store previously.
698 689 """
699 690
700 691 def censorrevision(tr, node, tombstone=b''):
701 692 """Remove the content of a single revision.
702 693
703 694 The specified ``node`` will have its content purged from storage.
704 695 Future attempts to access the revision data for this node will
705 696 result in failure.
706 697
707 698 A ``tombstone`` message can optionally be stored. This message may be
708 699 displayed to users when they attempt to access the missing revision
709 700 data.
710 701
711 702 Storage backends may have stored deltas against the previous content
712 703 in this revision. As part of censoring a revision, these storage
713 704 backends are expected to rewrite any internally stored deltas such
714 705 that they no longer reference the deleted content.
715 706 """
716 707
717 708 def getstrippoint(minlink):
718 709 """Find the minimum revision that must be stripped to strip a linkrev.
719 710
720 711 Returns a 2-tuple containing the minimum revision number and a set
721 712 of all revisions numbers that would be broken by this strip.
722 713
723 714 TODO this is highly revlog centric and should be abstracted into
724 715 a higher-level deletion API. ``repair.strip()`` relies on this.
725 716 """
726 717
727 718 def strip(minlink, transaction):
728 719 """Remove storage of items starting at a linkrev.
729 720
730 721 This uses ``getstrippoint()`` to determine the first node to remove.
731 722 Then it effectively truncates storage for all revisions after that.
732 723
733 724 TODO this is highly revlog centric and should be abstracted into a
734 725 higher-level deletion API.
735 726 """
736 727
737 728 class ifilestorage(ifileindex, ifiledata, ifilemutation):
738 729 """Complete storage interface for a single tracked file."""
739 730
740 731 def files():
741 732 """Obtain paths that are backing storage for this file.
742 733
743 734 TODO this is used heavily by verify code and there should probably
744 735 be a better API for that.
745 736 """
746 737
747 738 def storageinfo(exclusivefiles=False, sharedfiles=False,
748 739 revisionscount=False, trackedsize=False,
749 740 storedsize=False):
750 741 """Obtain information about storage for this file's data.
751 742
752 743 Returns a dict describing storage for this tracked path. The keys
753 744 in the dict map to arguments of the same. The arguments are bools
754 745 indicating whether to calculate and obtain that data.
755 746
756 747 exclusivefiles
757 748 Iterable of (vfs, path) describing files that are exclusively
758 749 used to back storage for this tracked path.
759 750
760 751 sharedfiles
761 752 Iterable of (vfs, path) describing files that are used to back
762 753 storage for this tracked path. Those files may also provide storage
763 754 for other stored entities.
764 755
765 756 revisionscount
766 757 Number of revisions available for retrieval.
767 758
768 759 trackedsize
769 760 Total size in bytes of all tracked revisions. This is a sum of the
770 761 length of the fulltext of all revisions.
771 762
772 763 storedsize
773 764 Total size in bytes used to store data for all tracked revisions.
774 765 This is commonly less than ``trackedsize`` due to internal usage
775 766 of deltas rather than fulltext revisions.
776 767
777 768 Not all storage backends may support all queries are have a reasonable
778 769 value to use. In that case, the value should be set to ``None`` and
779 770 callers are expected to handle this special value.
780 771 """
781 772
782 773 def verifyintegrity(state):
783 774 """Verifies the integrity of file storage.
784 775
785 776 ``state`` is a dict holding state of the verifier process. It can be
786 777 used to communicate data between invocations of multiple storage
787 778 primitives.
788 779
789 780 If individual revisions cannot have their revision content resolved,
790 781 the method is expected to set the ``skipread`` key to a set of nodes
791 782 that encountered problems.
792 783
793 784 The method yields objects conforming to the ``iverifyproblem``
794 785 interface.
795 786 """
796 787
797 788 class idirs(interfaceutil.Interface):
798 789 """Interface representing a collection of directories from paths.
799 790
800 791 This interface is essentially a derived data structure representing
801 792 directories from a collection of paths.
802 793 """
803 794
804 795 def addpath(path):
805 796 """Add a path to the collection.
806 797
807 798 All directories in the path will be added to the collection.
808 799 """
809 800
810 801 def delpath(path):
811 802 """Remove a path from the collection.
812 803
813 804 If the removal was the last path in a particular directory, the
814 805 directory is removed from the collection.
815 806 """
816 807
817 808 def __iter__():
818 809 """Iterate over the directories in this collection of paths."""
819 810
820 811 def __contains__(path):
821 812 """Whether a specific directory is in this collection."""
822 813
823 814 class imanifestdict(interfaceutil.Interface):
824 815 """Interface representing a manifest data structure.
825 816
826 817 A manifest is effectively a dict mapping paths to entries. Each entry
827 818 consists of a binary node and extra flags affecting that entry.
828 819 """
829 820
830 821 def __getitem__(path):
831 822 """Returns the binary node value for a path in the manifest.
832 823
833 824 Raises ``KeyError`` if the path does not exist in the manifest.
834 825
835 826 Equivalent to ``self.find(path)[0]``.
836 827 """
837 828
838 829 def find(path):
839 830 """Returns the entry for a path in the manifest.
840 831
841 832 Returns a 2-tuple of (node, flags).
842 833
843 834 Raises ``KeyError`` if the path does not exist in the manifest.
844 835 """
845 836
846 837 def __len__():
847 838 """Return the number of entries in the manifest."""
848 839
849 840 def __nonzero__():
850 841 """Returns True if the manifest has entries, False otherwise."""
851 842
852 843 __bool__ = __nonzero__
853 844
854 845 def __setitem__(path, node):
855 846 """Define the node value for a path in the manifest.
856 847
857 848 If the path is already in the manifest, its flags will be copied to
858 849 the new entry.
859 850 """
860 851
861 852 def __contains__(path):
862 853 """Whether a path exists in the manifest."""
863 854
864 855 def __delitem__(path):
865 856 """Remove a path from the manifest.
866 857
867 858 Raises ``KeyError`` if the path is not in the manifest.
868 859 """
869 860
870 861 def __iter__():
871 862 """Iterate over paths in the manifest."""
872 863
873 864 def iterkeys():
874 865 """Iterate over paths in the manifest."""
875 866
876 867 def keys():
877 868 """Obtain a list of paths in the manifest."""
878 869
879 870 def filesnotin(other, match=None):
880 871 """Obtain the set of paths in this manifest but not in another.
881 872
882 873 ``match`` is an optional matcher function to be applied to both
883 874 manifests.
884 875
885 876 Returns a set of paths.
886 877 """
887 878
888 879 def dirs():
889 880 """Returns an object implementing the ``idirs`` interface."""
890 881
891 882 def hasdir(dir):
892 883 """Returns a bool indicating if a directory is in this manifest."""
893 884
894 885 def matches(match):
895 886 """Generate a new manifest filtered through a matcher.
896 887
897 888 Returns an object conforming to the ``imanifestdict`` interface.
898 889 """
899 890
900 891 def walk(match):
901 892 """Generator of paths in manifest satisfying a matcher.
902 893
903 894 This is equivalent to ``self.matches(match).iterkeys()`` except a new
904 895 manifest object is not created.
905 896
906 897 If the matcher has explicit files listed and they don't exist in
907 898 the manifest, ``match.bad()`` is called for each missing file.
908 899 """
909 900
910 901 def diff(other, match=None, clean=False):
911 902 """Find differences between this manifest and another.
912 903
913 904 This manifest is compared to ``other``.
914 905
915 906 If ``match`` is provided, the two manifests are filtered against this
916 907 matcher and only entries satisfying the matcher are compared.
917 908
918 909 If ``clean`` is True, unchanged files are included in the returned
919 910 object.
920 911
921 912 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
922 913 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
923 914 represents the node and flags for this manifest and ``(node2, flag2)``
924 915 are the same for the other manifest.
925 916 """
926 917
927 918 def setflag(path, flag):
928 919 """Set the flag value for a given path.
929 920
930 921 Raises ``KeyError`` if the path is not already in the manifest.
931 922 """
932 923
933 924 def get(path, default=None):
934 925 """Obtain the node value for a path or a default value if missing."""
935 926
936 927 def flags(path, default=''):
937 928 """Return the flags value for a path or a default value if missing."""
938 929
939 930 def copy():
940 931 """Return a copy of this manifest."""
941 932
942 933 def items():
943 934 """Returns an iterable of (path, node) for items in this manifest."""
944 935
945 936 def iteritems():
946 937 """Identical to items()."""
947 938
948 939 def iterentries():
949 940 """Returns an iterable of (path, node, flags) for this manifest.
950 941
951 942 Similar to ``iteritems()`` except items are a 3-tuple and include
952 943 flags.
953 944 """
954 945
955 946 def text():
956 947 """Obtain the raw data representation for this manifest.
957 948
958 949 Result is used to create a manifest revision.
959 950 """
960 951
961 952 def fastdelta(base, changes):
962 953 """Obtain a delta between this manifest and another given changes.
963 954
964 955 ``base`` in the raw data representation for another manifest.
965 956
966 957 ``changes`` is an iterable of ``(path, to_delete)``.
967 958
968 959 Returns a 2-tuple containing ``bytearray(self.text())`` and the
969 960 delta between ``base`` and this manifest.
970 961 """
971 962
972 963 class imanifestrevisionbase(interfaceutil.Interface):
973 964 """Base interface representing a single revision of a manifest.
974 965
975 966 Should not be used as a primary interface: should always be inherited
976 967 as part of a larger interface.
977 968 """
978 969
979 970 def new():
980 971 """Obtain a new manifest instance.
981 972
982 973 Returns an object conforming to the ``imanifestrevisionwritable``
983 974 interface. The instance will be associated with the same
984 975 ``imanifestlog`` collection as this instance.
985 976 """
986 977
987 978 def copy():
988 979 """Obtain a copy of this manifest instance.
989 980
990 981 Returns an object conforming to the ``imanifestrevisionwritable``
991 982 interface. The instance will be associated with the same
992 983 ``imanifestlog`` collection as this instance.
993 984 """
994 985
995 986 def read():
996 987 """Obtain the parsed manifest data structure.
997 988
998 989 The returned object conforms to the ``imanifestdict`` interface.
999 990 """
1000 991
1001 992 class imanifestrevisionstored(imanifestrevisionbase):
1002 993 """Interface representing a manifest revision committed to storage."""
1003 994
1004 995 def node():
1005 996 """The binary node for this manifest."""
1006 997
1007 998 parents = interfaceutil.Attribute(
1008 999 """List of binary nodes that are parents for this manifest revision."""
1009 1000 )
1010 1001
1011 1002 def readdelta(shallow=False):
1012 1003 """Obtain the manifest data structure representing changes from parent.
1013 1004
1014 1005 This manifest is compared to its 1st parent. A new manifest representing
1015 1006 those differences is constructed.
1016 1007
1017 1008 The returned object conforms to the ``imanifestdict`` interface.
1018 1009 """
1019 1010
1020 1011 def readfast(shallow=False):
1021 1012 """Calls either ``read()`` or ``readdelta()``.
1022 1013
1023 1014 The faster of the two options is called.
1024 1015 """
1025 1016
1026 1017 def find(key):
1027 1018 """Calls self.read().find(key)``.
1028 1019
1029 1020 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1030 1021 """
1031 1022
1032 1023 class imanifestrevisionwritable(imanifestrevisionbase):
1033 1024 """Interface representing a manifest revision that can be committed."""
1034 1025
1035 1026 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1036 1027 """Add this revision to storage.
1037 1028
1038 1029 Takes a transaction object, the changeset revision number it will
1039 1030 be associated with, its parent nodes, and lists of added and
1040 1031 removed paths.
1041 1032
1042 1033 If match is provided, storage can choose not to inspect or write out
1043 1034 items that do not match. Storage is still required to be able to provide
1044 1035 the full manifest in the future for any directories written (these
1045 1036 manifests should not be "narrowed on disk").
1046 1037
1047 1038 Returns the binary node of the created revision.
1048 1039 """
1049 1040
1050 1041 class imanifeststorage(interfaceutil.Interface):
1051 1042 """Storage interface for manifest data."""
1052 1043
1053 1044 tree = interfaceutil.Attribute(
1054 1045 """The path to the directory this manifest tracks.
1055 1046
1056 1047 The empty bytestring represents the root manifest.
1057 1048 """)
1058 1049
1059 1050 index = interfaceutil.Attribute(
1060 1051 """An ``ifilerevisionssequence`` instance.""")
1061 1052
1062 1053 indexfile = interfaceutil.Attribute(
1063 1054 """Path of revlog index file.
1064 1055
1065 1056 TODO this is revlog specific and should not be exposed.
1066 1057 """)
1067 1058
1068 1059 opener = interfaceutil.Attribute(
1069 1060 """VFS opener to use to access underlying files used for storage.
1070 1061
1071 1062 TODO this is revlog specific and should not be exposed.
1072 1063 """)
1073 1064
1074 1065 version = interfaceutil.Attribute(
1075 1066 """Revlog version number.
1076 1067
1077 1068 TODO this is revlog specific and should not be exposed.
1078 1069 """)
1079 1070
1080 1071 _generaldelta = interfaceutil.Attribute(
1081 1072 """Whether generaldelta storage is being used.
1082 1073
1083 1074 TODO this is revlog specific and should not be exposed.
1084 1075 """)
1085 1076
1086 1077 fulltextcache = interfaceutil.Attribute(
1087 1078 """Dict with cache of fulltexts.
1088 1079
1089 1080 TODO this doesn't feel appropriate for the storage interface.
1090 1081 """)
1091 1082
1092 1083 def __len__():
1093 1084 """Obtain the number of revisions stored for this manifest."""
1094 1085
1095 1086 def __iter__():
1096 1087 """Iterate over revision numbers for this manifest."""
1097 1088
1098 1089 def rev(node):
1099 1090 """Obtain the revision number given a binary node.
1100 1091
1101 1092 Raises ``error.LookupError`` if the node is not known.
1102 1093 """
1103 1094
1104 1095 def node(rev):
1105 1096 """Obtain the node value given a revision number.
1106 1097
1107 1098 Raises ``error.LookupError`` if the revision is not known.
1108 1099 """
1109 1100
1110 1101 def lookup(value):
1111 1102 """Attempt to resolve a value to a node.
1112 1103
1113 1104 Value can be a binary node, hex node, revision number, or a bytes
1114 1105 that can be converted to an integer.
1115 1106
1116 1107 Raises ``error.LookupError`` if a ndoe could not be resolved.
1117 1108
1118 1109 TODO this is only used by debug* commands and can probably be deleted
1119 1110 easily.
1120 1111 """
1121 1112
1122 1113 def parents(node):
1123 1114 """Returns a 2-tuple of parent nodes for a node.
1124 1115
1125 1116 Values will be ``nullid`` if the parent is empty.
1126 1117 """
1127 1118
1128 1119 def parentrevs(rev):
1129 1120 """Like parents() but operates on revision numbers."""
1130 1121
1131 1122 def linkrev(rev):
1132 1123 """Obtain the changeset revision number a revision is linked to."""
1133 1124
1134 1125 def revision(node, _df=None, raw=False):
1135 1126 """Obtain fulltext data for a node."""
1136 1127
1137 1128 def revdiff(rev1, rev2):
1138 1129 """Obtain a delta between two revision numbers.
1139 1130
1140 1131 The returned data is the result of ``bdiff.bdiff()`` on the raw
1141 1132 revision data.
1142 1133 """
1143 1134
1144 1135 def cmp(node, fulltext):
1145 1136 """Compare fulltext to another revision.
1146 1137
1147 1138 Returns True if the fulltext is different from what is stored.
1148 1139 """
1149 1140
1150 1141 def emitrevisions(nodes,
1151 1142 nodesorder=None,
1152 1143 revisiondata=False,
1153 1144 assumehaveparentrevisions=False):
1154 1145 """Produce ``irevisiondelta`` describing revisions.
1155 1146
1156 1147 See the documentation for ``ifiledata`` for more.
1157 1148 """
1158 1149
1159 1150 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1160 1151 """Process a series of deltas for storage.
1161 1152
1162 1153 See the documentation in ``ifilemutation`` for more.
1163 1154 """
1164 1155
1165 1156 def rawsize(rev):
1166 1157 """Obtain the size of tracked data.
1167 1158
1168 1159 Is equivalent to ``len(m.revision(node, raw=True))``.
1169 1160
1170 1161 TODO this method is only used by upgrade code and may be removed.
1171 1162 """
1172 1163
1173 1164 def getstrippoint(minlink):
1174 1165 """Find minimum revision that must be stripped to strip a linkrev.
1175 1166
1176 1167 See the documentation in ``ifilemutation`` for more.
1177 1168 """
1178 1169
1179 1170 def strip(minlink, transaction):
1180 1171 """Remove storage of items starting at a linkrev.
1181 1172
1182 1173 See the documentation in ``ifilemutation`` for more.
1183 1174 """
1184 1175
1185 1176 def checksize():
1186 1177 """Obtain the expected sizes of backing files.
1187 1178
1188 1179 TODO this is used by verify and it should not be part of the interface.
1189 1180 """
1190 1181
1191 1182 def files():
1192 1183 """Obtain paths that are backing storage for this manifest.
1193 1184
1194 1185 TODO this is used by verify and there should probably be a better API
1195 1186 for this functionality.
1196 1187 """
1197 1188
1198 1189 def deltaparent(rev):
1199 1190 """Obtain the revision that a revision is delta'd against.
1200 1191
1201 1192 TODO delta encoding is an implementation detail of storage and should
1202 1193 not be exposed to the storage interface.
1203 1194 """
1204 1195
1205 1196 def clone(tr, dest, **kwargs):
1206 1197 """Clone this instance to another."""
1207 1198
1208 1199 def clearcaches(clear_persisted_data=False):
1209 1200 """Clear any caches associated with this instance."""
1210 1201
1211 1202 def dirlog(d):
1212 1203 """Obtain a manifest storage instance for a tree."""
1213 1204
1214 1205 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1215 1206 match=None):
1216 1207 """Add a revision to storage.
1217 1208
1218 1209 ``m`` is an object conforming to ``imanifestdict``.
1219 1210
1220 1211 ``link`` is the linkrev revision number.
1221 1212
1222 1213 ``p1`` and ``p2`` are the parent revision numbers.
1223 1214
1224 1215 ``added`` and ``removed`` are iterables of added and removed paths,
1225 1216 respectively.
1226 1217
1227 1218 ``readtree`` is a function that can be used to read the child tree(s)
1228 1219 when recursively writing the full tree structure when using
1229 1220 treemanifets.
1230 1221
1231 1222 ``match`` is a matcher that can be used to hint to storage that not all
1232 1223 paths must be inspected; this is an optimization and can be safely
1233 1224 ignored. Note that the storage must still be able to reproduce a full
1234 1225 manifest including files that did not match.
1235 1226 """
1236 1227
1237 1228 def storageinfo(exclusivefiles=False, sharedfiles=False,
1238 1229 revisionscount=False, trackedsize=False,
1239 1230 storedsize=False):
1240 1231 """Obtain information about storage for this manifest's data.
1241 1232
1242 1233 See ``ifilestorage.storageinfo()`` for a description of this method.
1243 1234 This one behaves the same way, except for manifest data.
1244 1235 """
1245 1236
1246 1237 class imanifestlog(interfaceutil.Interface):
1247 1238 """Interface representing a collection of manifest snapshots.
1248 1239
1249 1240 Represents the root manifest in a repository.
1250 1241
1251 1242 Also serves as a means to access nested tree manifests and to cache
1252 1243 tree manifests.
1253 1244 """
1254 1245
1255 1246 def __getitem__(node):
1256 1247 """Obtain a manifest instance for a given binary node.
1257 1248
1258 1249 Equivalent to calling ``self.get('', node)``.
1259 1250
1260 1251 The returned object conforms to the ``imanifestrevisionstored``
1261 1252 interface.
1262 1253 """
1263 1254
1264 1255 def get(tree, node, verify=True):
1265 1256 """Retrieve the manifest instance for a given directory and binary node.
1266 1257
1267 1258 ``node`` always refers to the node of the root manifest (which will be
1268 1259 the only manifest if flat manifests are being used).
1269 1260
1270 1261 If ``tree`` is the empty string, the root manifest is returned.
1271 1262 Otherwise the manifest for the specified directory will be returned
1272 1263 (requires tree manifests).
1273 1264
1274 1265 If ``verify`` is True, ``LookupError`` is raised if the node is not
1275 1266 known.
1276 1267
1277 1268 The returned object conforms to the ``imanifestrevisionstored``
1278 1269 interface.
1279 1270 """
1280 1271
1281 1272 def getstorage(tree):
1282 1273 """Retrieve an interface to storage for a particular tree.
1283 1274
1284 1275 If ``tree`` is the empty bytestring, storage for the root manifest will
1285 1276 be returned. Otherwise storage for a tree manifest is returned.
1286 1277
1287 1278 TODO formalize interface for returned object.
1288 1279 """
1289 1280
1290 1281 def clearcaches():
1291 1282 """Clear caches associated with this collection."""
1292 1283
1293 1284 def rev(node):
1294 1285 """Obtain the revision number for a binary node.
1295 1286
1296 1287 Raises ``error.LookupError`` if the node is not known.
1297 1288 """
1298 1289
1299 1290 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1300 1291 """Local repository sub-interface providing access to tracked file storage.
1301 1292
1302 1293 This interface defines how a repository accesses storage for a single
1303 1294 tracked file path.
1304 1295 """
1305 1296
1306 1297 def file(f):
1307 1298 """Obtain a filelog for a tracked path.
1308 1299
1309 1300 The returned type conforms to the ``ifilestorage`` interface.
1310 1301 """
1311 1302
1312 1303 class ilocalrepositorymain(interfaceutil.Interface):
1313 1304 """Main interface for local repositories.
1314 1305
1315 1306 This currently captures the reality of things - not how things should be.
1316 1307 """
1317 1308
1318 1309 supportedformats = interfaceutil.Attribute(
1319 1310 """Set of requirements that apply to stream clone.
1320 1311
1321 1312 This is actually a class attribute and is shared among all instances.
1322 1313 """)
1323 1314
1324 1315 supported = interfaceutil.Attribute(
1325 1316 """Set of requirements that this repo is capable of opening.""")
1326 1317
1327 1318 requirements = interfaceutil.Attribute(
1328 1319 """Set of requirements this repo uses.""")
1329 1320
1330 1321 features = interfaceutil.Attribute(
1331 1322 """Set of "features" this repository supports.
1332 1323
1333 1324 A "feature" is a loosely-defined term. It can refer to a feature
1334 1325 in the classical sense or can describe an implementation detail
1335 1326 of the repository. For example, a ``readonly`` feature may denote
1336 1327 the repository as read-only. Or a ``revlogfilestore`` feature may
1337 1328 denote that the repository is using revlogs for file storage.
1338 1329
1339 1330 The intent of features is to provide a machine-queryable mechanism
1340 1331 for repo consumers to test for various repository characteristics.
1341 1332
1342 1333 Features are similar to ``requirements``. The main difference is that
1343 1334 requirements are stored on-disk and represent requirements to open the
1344 1335 repository. Features are more run-time capabilities of the repository
1345 1336 and more granular capabilities (which may be derived from requirements).
1346 1337 """)
1347 1338
1348 1339 filtername = interfaceutil.Attribute(
1349 1340 """Name of the repoview that is active on this repo.""")
1350 1341
1351 1342 wvfs = interfaceutil.Attribute(
1352 1343 """VFS used to access the working directory.""")
1353 1344
1354 1345 vfs = interfaceutil.Attribute(
1355 1346 """VFS rooted at the .hg directory.
1356 1347
1357 1348 Used to access repository data not in the store.
1358 1349 """)
1359 1350
1360 1351 svfs = interfaceutil.Attribute(
1361 1352 """VFS rooted at the store.
1362 1353
1363 1354 Used to access repository data in the store. Typically .hg/store.
1364 1355 But can point elsewhere if the store is shared.
1365 1356 """)
1366 1357
1367 1358 root = interfaceutil.Attribute(
1368 1359 """Path to the root of the working directory.""")
1369 1360
1370 1361 path = interfaceutil.Attribute(
1371 1362 """Path to the .hg directory.""")
1372 1363
1373 1364 origroot = interfaceutil.Attribute(
1374 1365 """The filesystem path that was used to construct the repo.""")
1375 1366
1376 1367 auditor = interfaceutil.Attribute(
1377 1368 """A pathauditor for the working directory.
1378 1369
1379 1370 This checks if a path refers to a nested repository.
1380 1371
1381 1372 Operates on the filesystem.
1382 1373 """)
1383 1374
1384 1375 nofsauditor = interfaceutil.Attribute(
1385 1376 """A pathauditor for the working directory.
1386 1377
1387 1378 This is like ``auditor`` except it doesn't do filesystem checks.
1388 1379 """)
1389 1380
1390 1381 baseui = interfaceutil.Attribute(
1391 1382 """Original ui instance passed into constructor.""")
1392 1383
1393 1384 ui = interfaceutil.Attribute(
1394 1385 """Main ui instance for this instance.""")
1395 1386
1396 1387 sharedpath = interfaceutil.Attribute(
1397 1388 """Path to the .hg directory of the repo this repo was shared from.""")
1398 1389
1399 1390 store = interfaceutil.Attribute(
1400 1391 """A store instance.""")
1401 1392
1402 1393 spath = interfaceutil.Attribute(
1403 1394 """Path to the store.""")
1404 1395
1405 1396 sjoin = interfaceutil.Attribute(
1406 1397 """Alias to self.store.join.""")
1407 1398
1408 1399 cachevfs = interfaceutil.Attribute(
1409 1400 """A VFS used to access the cache directory.
1410 1401
1411 1402 Typically .hg/cache.
1412 1403 """)
1413 1404
1414 1405 filteredrevcache = interfaceutil.Attribute(
1415 1406 """Holds sets of revisions to be filtered.""")
1416 1407
1417 1408 names = interfaceutil.Attribute(
1418 1409 """A ``namespaces`` instance.""")
1419 1410
1420 1411 def close():
1421 1412 """Close the handle on this repository."""
1422 1413
1423 1414 def peer():
1424 1415 """Obtain an object conforming to the ``peer`` interface."""
1425 1416
1426 1417 def unfiltered():
1427 1418 """Obtain an unfiltered/raw view of this repo."""
1428 1419
1429 1420 def filtered(name, visibilityexceptions=None):
1430 1421 """Obtain a named view of this repository."""
1431 1422
1432 1423 obsstore = interfaceutil.Attribute(
1433 1424 """A store of obsolescence data.""")
1434 1425
1435 1426 changelog = interfaceutil.Attribute(
1436 1427 """A handle on the changelog revlog.""")
1437 1428
1438 1429 manifestlog = interfaceutil.Attribute(
1439 1430 """An instance conforming to the ``imanifestlog`` interface.
1440 1431
1441 1432 Provides access to manifests for the repository.
1442 1433 """)
1443 1434
1444 1435 dirstate = interfaceutil.Attribute(
1445 1436 """Working directory state.""")
1446 1437
1447 1438 narrowpats = interfaceutil.Attribute(
1448 1439 """Matcher patterns for this repository's narrowspec.""")
1449 1440
1450 1441 def narrowmatch():
1451 1442 """Obtain a matcher for the narrowspec."""
1452 1443
1453 1444 def setnarrowpats(newincludes, newexcludes):
1454 1445 """Define the narrowspec for this repository."""
1455 1446
1456 1447 def __getitem__(changeid):
1457 1448 """Try to resolve a changectx."""
1458 1449
1459 1450 def __contains__(changeid):
1460 1451 """Whether a changeset exists."""
1461 1452
1462 1453 def __nonzero__():
1463 1454 """Always returns True."""
1464 1455 return True
1465 1456
1466 1457 __bool__ = __nonzero__
1467 1458
1468 1459 def __len__():
1469 1460 """Returns the number of changesets in the repo."""
1470 1461
1471 1462 def __iter__():
1472 1463 """Iterate over revisions in the changelog."""
1473 1464
1474 1465 def revs(expr, *args):
1475 1466 """Evaluate a revset.
1476 1467
1477 1468 Emits revisions.
1478 1469 """
1479 1470
1480 1471 def set(expr, *args):
1481 1472 """Evaluate a revset.
1482 1473
1483 1474 Emits changectx instances.
1484 1475 """
1485 1476
1486 1477 def anyrevs(specs, user=False, localalias=None):
1487 1478 """Find revisions matching one of the given revsets."""
1488 1479
1489 1480 def url():
1490 1481 """Returns a string representing the location of this repo."""
1491 1482
1492 1483 def hook(name, throw=False, **args):
1493 1484 """Call a hook."""
1494 1485
1495 1486 def tags():
1496 1487 """Return a mapping of tag to node."""
1497 1488
1498 1489 def tagtype(tagname):
1499 1490 """Return the type of a given tag."""
1500 1491
1501 1492 def tagslist():
1502 1493 """Return a list of tags ordered by revision."""
1503 1494
1504 1495 def nodetags(node):
1505 1496 """Return the tags associated with a node."""
1506 1497
1507 1498 def nodebookmarks(node):
1508 1499 """Return the list of bookmarks pointing to the specified node."""
1509 1500
1510 1501 def branchmap():
1511 1502 """Return a mapping of branch to heads in that branch."""
1512 1503
1513 1504 def revbranchcache():
1514 1505 pass
1515 1506
1516 1507 def branchtip(branchtip, ignoremissing=False):
1517 1508 """Return the tip node for a given branch."""
1518 1509
1519 1510 def lookup(key):
1520 1511 """Resolve the node for a revision."""
1521 1512
1522 1513 def lookupbranch(key):
1523 1514 """Look up the branch name of the given revision or branch name."""
1524 1515
1525 1516 def known(nodes):
1526 1517 """Determine whether a series of nodes is known.
1527 1518
1528 1519 Returns a list of bools.
1529 1520 """
1530 1521
1531 1522 def local():
1532 1523 """Whether the repository is local."""
1533 1524 return True
1534 1525
1535 1526 def publishing():
1536 1527 """Whether the repository is a publishing repository."""
1537 1528
1538 1529 def cancopy():
1539 1530 pass
1540 1531
1541 1532 def shared():
1542 1533 """The type of shared repository or None."""
1543 1534
1544 1535 def wjoin(f, *insidef):
1545 1536 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1546 1537
1547 1538 def setparents(p1, p2):
1548 1539 """Set the parent nodes of the working directory."""
1549 1540
1550 1541 def filectx(path, changeid=None, fileid=None):
1551 1542 """Obtain a filectx for the given file revision."""
1552 1543
1553 1544 def getcwd():
1554 1545 """Obtain the current working directory from the dirstate."""
1555 1546
1556 1547 def pathto(f, cwd=None):
1557 1548 """Obtain the relative path to a file."""
1558 1549
1559 1550 def adddatafilter(name, fltr):
1560 1551 pass
1561 1552
1562 1553 def wread(filename):
1563 1554 """Read a file from wvfs, using data filters."""
1564 1555
1565 1556 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1566 1557 """Write data to a file in the wvfs, using data filters."""
1567 1558
1568 1559 def wwritedata(filename, data):
1569 1560 """Resolve data for writing to the wvfs, using data filters."""
1570 1561
1571 1562 def currenttransaction():
1572 1563 """Obtain the current transaction instance or None."""
1573 1564
1574 1565 def transaction(desc, report=None):
1575 1566 """Open a new transaction to write to the repository."""
1576 1567
1577 1568 def undofiles():
1578 1569 """Returns a list of (vfs, path) for files to undo transactions."""
1579 1570
1580 1571 def recover():
1581 1572 """Roll back an interrupted transaction."""
1582 1573
1583 1574 def rollback(dryrun=False, force=False):
1584 1575 """Undo the last transaction.
1585 1576
1586 1577 DANGEROUS.
1587 1578 """
1588 1579
1589 1580 def updatecaches(tr=None, full=False):
1590 1581 """Warm repo caches."""
1591 1582
1592 1583 def invalidatecaches():
1593 1584 """Invalidate cached data due to the repository mutating."""
1594 1585
1595 1586 def invalidatevolatilesets():
1596 1587 pass
1597 1588
1598 1589 def invalidatedirstate():
1599 1590 """Invalidate the dirstate."""
1600 1591
1601 1592 def invalidate(clearfilecache=False):
1602 1593 pass
1603 1594
1604 1595 def invalidateall():
1605 1596 pass
1606 1597
1607 1598 def lock(wait=True):
1608 1599 """Lock the repository store and return a lock instance."""
1609 1600
1610 1601 def wlock(wait=True):
1611 1602 """Lock the non-store parts of the repository."""
1612 1603
1613 1604 def currentwlock():
1614 1605 """Return the wlock if it's held or None."""
1615 1606
1616 1607 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1617 1608 pass
1618 1609
1619 1610 def commit(text='', user=None, date=None, match=None, force=False,
1620 1611 editor=False, extra=None):
1621 1612 """Add a new revision to the repository."""
1622 1613
1623 1614 def commitctx(ctx, error=False):
1624 1615 """Commit a commitctx instance to the repository."""
1625 1616
1626 1617 def destroying():
1627 1618 """Inform the repository that nodes are about to be destroyed."""
1628 1619
1629 1620 def destroyed():
1630 1621 """Inform the repository that nodes have been destroyed."""
1631 1622
1632 1623 def status(node1='.', node2=None, match=None, ignored=False,
1633 1624 clean=False, unknown=False, listsubrepos=False):
1634 1625 """Convenience method to call repo[x].status()."""
1635 1626
1636 1627 def addpostdsstatus(ps):
1637 1628 pass
1638 1629
1639 1630 def postdsstatus():
1640 1631 pass
1641 1632
1642 1633 def clearpostdsstatus():
1643 1634 pass
1644 1635
1645 1636 def heads(start=None):
1646 1637 """Obtain list of nodes that are DAG heads."""
1647 1638
1648 1639 def branchheads(branch=None, start=None, closed=False):
1649 1640 pass
1650 1641
1651 1642 def branches(nodes):
1652 1643 pass
1653 1644
1654 1645 def between(pairs):
1655 1646 pass
1656 1647
1657 1648 def checkpush(pushop):
1658 1649 pass
1659 1650
1660 1651 prepushoutgoinghooks = interfaceutil.Attribute(
1661 1652 """util.hooks instance.""")
1662 1653
1663 1654 def pushkey(namespace, key, old, new):
1664 1655 pass
1665 1656
1666 1657 def listkeys(namespace):
1667 1658 pass
1668 1659
1669 1660 def debugwireargs(one, two, three=None, four=None, five=None):
1670 1661 pass
1671 1662
1672 1663 def savecommitmessage(text):
1673 1664 pass
1674 1665
1675 1666 class completelocalrepository(ilocalrepositorymain,
1676 1667 ilocalrepositoryfilestorage):
1677 1668 """Complete interface for a local repository."""
@@ -1,1060 +1,1033
1 1 # storage.py - Testing of storage primitives.
2 2 #
3 3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import unittest
11 11
12 12 from ..node import (
13 13 hex,
14 14 nullid,
15 15 nullrev,
16 16 )
17 17 from .. import (
18 18 error,
19 19 mdiff,
20 20 revlog,
21 21 )
22 22 from ..utils import (
23 23 storageutil,
24 24 )
25 25
26 26 class basetestcase(unittest.TestCase):
27 27 if not getattr(unittest.TestCase, r'assertRaisesRegex', False):
28 28 assertRaisesRegex = (# camelcase-required
29 29 unittest.TestCase.assertRaisesRegexp)
30 30
31 31 class ifileindextests(basetestcase):
32 32 """Generic tests for the ifileindex interface.
33 33
34 34 All file storage backends for index data should conform to the tests in this
35 35 class.
36 36
37 37 Use ``makeifileindextests()`` to create an instance of this type.
38 38 """
39 39 def testempty(self):
40 40 f = self._makefilefn()
41 41 self.assertEqual(len(f), 0, 'new file store has 0 length by default')
42 42 self.assertEqual(list(f), [], 'iter yields nothing by default')
43 43
44 44 gen = iter(f)
45 45 with self.assertRaises(StopIteration):
46 46 next(gen)
47 47
48 48 # revs() should evaluate to an empty list.
49 49 self.assertEqual(list(f.revs()), [])
50 50
51 51 revs = iter(f.revs())
52 52 with self.assertRaises(StopIteration):
53 53 next(revs)
54 54
55 55 self.assertEqual(list(f.revs(start=20)), [])
56 56
57 57 # parents() and parentrevs() work with nullid/nullrev.
58 58 self.assertEqual(f.parents(nullid), (nullid, nullid))
59 59 self.assertEqual(f.parentrevs(nullrev), (nullrev, nullrev))
60 60
61 61 with self.assertRaises(error.LookupError):
62 62 f.parents(b'\x01' * 20)
63 63
64 64 for i in range(-5, 5):
65 65 if i == nullrev:
66 66 continue
67 67
68 68 with self.assertRaises(IndexError):
69 69 f.parentrevs(i)
70 70
71 71 # nullid/nullrev lookup always works.
72 72 self.assertEqual(f.rev(nullid), nullrev)
73 73 self.assertEqual(f.node(nullrev), nullid)
74 74
75 75 with self.assertRaises(error.LookupError):
76 76 f.rev(b'\x01' * 20)
77 77
78 78 for i in range(-5, 5):
79 79 if i == nullrev:
80 80 continue
81 81
82 82 with self.assertRaises(IndexError):
83 83 f.node(i)
84 84
85 85 self.assertEqual(f.lookup(nullid), nullid)
86 86 self.assertEqual(f.lookup(nullrev), nullid)
87 87 self.assertEqual(f.lookup(hex(nullid)), nullid)
88 88
89 89 # String converted to integer doesn't work for nullrev.
90 90 with self.assertRaises(error.LookupError):
91 91 f.lookup(b'%d' % nullrev)
92 92
93 93 self.assertEqual(f.linkrev(nullrev), nullrev)
94 94
95 95 for i in range(-5, 5):
96 96 if i == nullrev:
97 97 continue
98 98
99 99 with self.assertRaises(IndexError):
100 100 f.linkrev(i)
101 101
102 102 self.assertFalse(f.iscensored(nullrev))
103 103
104 104 for i in range(-5, 5):
105 105 if i == nullrev:
106 106 continue
107 107
108 108 with self.assertRaises(IndexError):
109 109 f.iscensored(i)
110 110
111 111 self.assertEqual(list(f.commonancestorsheads(nullid, nullid)), [])
112 112
113 113 with self.assertRaises(ValueError):
114 114 self.assertEqual(list(f.descendants([])), [])
115 115
116 116 self.assertEqual(list(f.descendants([nullrev])), [])
117 117
118 118 self.assertEqual(f.heads(), [nullid])
119 119 self.assertEqual(f.heads(nullid), [nullid])
120 120 self.assertEqual(f.heads(None, [nullid]), [nullid])
121 121 self.assertEqual(f.heads(nullid, [nullid]), [nullid])
122 122
123 123 self.assertEqual(f.children(nullid), [])
124 124
125 125 with self.assertRaises(error.LookupError):
126 126 f.children(b'\x01' * 20)
127 127
128 128 def testsinglerevision(self):
129 129 f = self._makefilefn()
130 130 with self._maketransactionfn() as tr:
131 131 node = f.add(b'initial', None, tr, 0, nullid, nullid)
132 132
133 133 self.assertEqual(len(f), 1)
134 134 self.assertEqual(list(f), [0])
135 135
136 136 gen = iter(f)
137 137 self.assertEqual(next(gen), 0)
138 138
139 139 with self.assertRaises(StopIteration):
140 140 next(gen)
141 141
142 142 self.assertEqual(list(f.revs()), [0])
143 143 self.assertEqual(list(f.revs(start=1)), [])
144 144 self.assertEqual(list(f.revs(start=0)), [0])
145 145 self.assertEqual(list(f.revs(stop=0)), [0])
146 146 self.assertEqual(list(f.revs(stop=1)), [0])
147 147 self.assertEqual(list(f.revs(1, 1)), [])
148 148 # TODO buggy
149 149 self.assertEqual(list(f.revs(1, 0)), [1, 0])
150 150 self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
151 151
152 152 self.assertEqual(f.parents(node), (nullid, nullid))
153 153 self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
154 154
155 155 with self.assertRaises(error.LookupError):
156 156 f.parents(b'\x01' * 20)
157 157
158 158 with self.assertRaises(IndexError):
159 159 f.parentrevs(1)
160 160
161 161 self.assertEqual(f.rev(node), 0)
162 162
163 163 with self.assertRaises(error.LookupError):
164 164 f.rev(b'\x01' * 20)
165 165
166 166 self.assertEqual(f.node(0), node)
167 167
168 168 with self.assertRaises(IndexError):
169 169 f.node(1)
170 170
171 171 self.assertEqual(f.lookup(node), node)
172 172 self.assertEqual(f.lookup(0), node)
173 173 self.assertEqual(f.lookup(b'0'), node)
174 174 self.assertEqual(f.lookup(hex(node)), node)
175 175
176 176 self.assertEqual(f.linkrev(0), 0)
177 177
178 178 with self.assertRaises(IndexError):
179 179 f.linkrev(1)
180 180
181 181 self.assertFalse(f.iscensored(0))
182 182
183 183 with self.assertRaises(IndexError):
184 184 f.iscensored(1)
185 185
186 186 self.assertEqual(list(f.descendants([0])), [])
187 187
188 188 self.assertEqual(f.heads(), [node])
189 189 self.assertEqual(f.heads(node), [node])
190 190 self.assertEqual(f.heads(stop=[node]), [node])
191 191
192 192 with self.assertRaises(error.LookupError):
193 193 f.heads(stop=[b'\x01' * 20])
194 194
195 195 self.assertEqual(f.children(node), [])
196 196
197 197 def testmultiplerevisions(self):
198 198 fulltext0 = b'x' * 1024
199 199 fulltext1 = fulltext0 + b'y'
200 200 fulltext2 = b'y' + fulltext0 + b'z'
201 201
202 202 f = self._makefilefn()
203 203 with self._maketransactionfn() as tr:
204 204 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
205 205 node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
206 206 node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
207 207
208 208 self.assertEqual(len(f), 3)
209 209 self.assertEqual(list(f), [0, 1, 2])
210 210
211 211 gen = iter(f)
212 212 self.assertEqual(next(gen), 0)
213 213 self.assertEqual(next(gen), 1)
214 214 self.assertEqual(next(gen), 2)
215 215
216 216 with self.assertRaises(StopIteration):
217 217 next(gen)
218 218
219 219 self.assertEqual(list(f.revs()), [0, 1, 2])
220 220 self.assertEqual(list(f.revs(0)), [0, 1, 2])
221 221 self.assertEqual(list(f.revs(1)), [1, 2])
222 222 self.assertEqual(list(f.revs(2)), [2])
223 223 self.assertEqual(list(f.revs(3)), [])
224 224 self.assertEqual(list(f.revs(stop=1)), [0, 1])
225 225 self.assertEqual(list(f.revs(stop=2)), [0, 1, 2])
226 226 self.assertEqual(list(f.revs(stop=3)), [0, 1, 2])
227 227 self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
228 228 self.assertEqual(list(f.revs(2, 1)), [2, 1])
229 229 # TODO this is wrong
230 230 self.assertEqual(list(f.revs(3, 2)), [3, 2])
231 231
232 232 self.assertEqual(f.parents(node0), (nullid, nullid))
233 233 self.assertEqual(f.parents(node1), (node0, nullid))
234 234 self.assertEqual(f.parents(node2), (node1, nullid))
235 235
236 236 self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
237 237 self.assertEqual(f.parentrevs(1), (0, nullrev))
238 238 self.assertEqual(f.parentrevs(2), (1, nullrev))
239 239
240 240 self.assertEqual(f.rev(node0), 0)
241 241 self.assertEqual(f.rev(node1), 1)
242 242 self.assertEqual(f.rev(node2), 2)
243 243
244 244 with self.assertRaises(error.LookupError):
245 245 f.rev(b'\x01' * 20)
246 246
247 247 self.assertEqual(f.node(0), node0)
248 248 self.assertEqual(f.node(1), node1)
249 249 self.assertEqual(f.node(2), node2)
250 250
251 251 with self.assertRaises(IndexError):
252 252 f.node(3)
253 253
254 254 self.assertEqual(f.lookup(node0), node0)
255 255 self.assertEqual(f.lookup(0), node0)
256 256 self.assertEqual(f.lookup(b'0'), node0)
257 257 self.assertEqual(f.lookup(hex(node0)), node0)
258 258
259 259 self.assertEqual(f.lookup(node1), node1)
260 260 self.assertEqual(f.lookup(1), node1)
261 261 self.assertEqual(f.lookup(b'1'), node1)
262 262 self.assertEqual(f.lookup(hex(node1)), node1)
263 263
264 264 self.assertEqual(f.linkrev(0), 0)
265 265 self.assertEqual(f.linkrev(1), 1)
266 266 self.assertEqual(f.linkrev(2), 3)
267 267
268 268 with self.assertRaises(IndexError):
269 269 f.linkrev(3)
270 270
271 271 self.assertFalse(f.iscensored(0))
272 272 self.assertFalse(f.iscensored(1))
273 273 self.assertFalse(f.iscensored(2))
274 274
275 275 with self.assertRaises(IndexError):
276 276 f.iscensored(3)
277 277
278 278 self.assertEqual(f.commonancestorsheads(node1, nullid), [])
279 279 self.assertEqual(f.commonancestorsheads(node1, node0), [node0])
280 280 self.assertEqual(f.commonancestorsheads(node1, node1), [node1])
281 281 self.assertEqual(f.commonancestorsheads(node0, node1), [node0])
282 282 self.assertEqual(f.commonancestorsheads(node1, node2), [node1])
283 283 self.assertEqual(f.commonancestorsheads(node2, node1), [node1])
284 284
285 285 self.assertEqual(list(f.descendants([0])), [1, 2])
286 286 self.assertEqual(list(f.descendants([1])), [2])
287 287 self.assertEqual(list(f.descendants([0, 1])), [1, 2])
288 288
289 289 self.assertEqual(f.heads(), [node2])
290 290 self.assertEqual(f.heads(node0), [node2])
291 291 self.assertEqual(f.heads(node1), [node2])
292 292 self.assertEqual(f.heads(node2), [node2])
293 293
294 294 # TODO this behavior seems wonky. Is it correct? If so, the
295 295 # docstring for heads() should be updated to reflect desired
296 296 # behavior.
297 297 self.assertEqual(f.heads(stop=[node1]), [node1, node2])
298 298 self.assertEqual(f.heads(stop=[node0]), [node0, node2])
299 299 self.assertEqual(f.heads(stop=[node1, node2]), [node1, node2])
300 300
301 301 with self.assertRaises(error.LookupError):
302 302 f.heads(stop=[b'\x01' * 20])
303 303
304 304 self.assertEqual(f.children(node0), [node1])
305 305 self.assertEqual(f.children(node1), [node2])
306 306 self.assertEqual(f.children(node2), [])
307 307
308 308 def testmultipleheads(self):
309 309 f = self._makefilefn()
310 310
311 311 with self._maketransactionfn() as tr:
312 312 node0 = f.add(b'0', None, tr, 0, nullid, nullid)
313 313 node1 = f.add(b'1', None, tr, 1, node0, nullid)
314 314 node2 = f.add(b'2', None, tr, 2, node1, nullid)
315 315 node3 = f.add(b'3', None, tr, 3, node0, nullid)
316 316 node4 = f.add(b'4', None, tr, 4, node3, nullid)
317 317 node5 = f.add(b'5', None, tr, 5, node0, nullid)
318 318
319 319 self.assertEqual(len(f), 6)
320 320
321 321 self.assertEqual(list(f.descendants([0])), [1, 2, 3, 4, 5])
322 322 self.assertEqual(list(f.descendants([1])), [2])
323 323 self.assertEqual(list(f.descendants([2])), [])
324 324 self.assertEqual(list(f.descendants([3])), [4])
325 325 self.assertEqual(list(f.descendants([0, 1])), [1, 2, 3, 4, 5])
326 326 self.assertEqual(list(f.descendants([1, 3])), [2, 4])
327 327
328 328 self.assertEqual(f.heads(), [node2, node4, node5])
329 329 self.assertEqual(f.heads(node0), [node2, node4, node5])
330 330 self.assertEqual(f.heads(node1), [node2])
331 331 self.assertEqual(f.heads(node2), [node2])
332 332 self.assertEqual(f.heads(node3), [node4])
333 333 self.assertEqual(f.heads(node4), [node4])
334 334 self.assertEqual(f.heads(node5), [node5])
335 335
336 336 # TODO this seems wrong.
337 337 self.assertEqual(f.heads(stop=[node0]), [node0, node2, node4, node5])
338 338 self.assertEqual(f.heads(stop=[node1]), [node1, node2, node4, node5])
339 339
340 340 self.assertEqual(f.children(node0), [node1, node3, node5])
341 341 self.assertEqual(f.children(node1), [node2])
342 342 self.assertEqual(f.children(node2), [])
343 343 self.assertEqual(f.children(node3), [node4])
344 344 self.assertEqual(f.children(node4), [])
345 345 self.assertEqual(f.children(node5), [])
346 346
347 347 class ifiledatatests(basetestcase):
348 348 """Generic tests for the ifiledata interface.
349 349
350 350 All file storage backends for data should conform to the tests in this
351 351 class.
352 352
353 353 Use ``makeifiledatatests()`` to create an instance of this type.
354 354 """
355 355 def testempty(self):
356 356 f = self._makefilefn()
357 357
358 358 self.assertEqual(f.storageinfo(), {})
359 359 self.assertEqual(f.storageinfo(revisionscount=True, trackedsize=True),
360 360 {'revisionscount': 0, 'trackedsize': 0})
361 361
362 362 self.assertEqual(f.size(nullrev), 0)
363 363
364 364 for i in range(-5, 5):
365 365 if i == nullrev:
366 366 continue
367 367
368 368 with self.assertRaises(IndexError):
369 369 f.size(i)
370 370
371 371 with self.assertRaises(error.StorageError):
372 372 f.checkhash(b'', nullid)
373 373
374 374 with self.assertRaises(error.LookupError):
375 375 f.checkhash(b'', b'\x01' * 20)
376 376
377 377 self.assertEqual(f.revision(nullid), b'')
378 378 self.assertEqual(f.revision(nullid, raw=True), b'')
379 379
380 380 with self.assertRaises(error.LookupError):
381 381 f.revision(b'\x01' * 20)
382 382
383 383 self.assertEqual(f.read(nullid), b'')
384 384
385 385 with self.assertRaises(error.LookupError):
386 386 f.read(b'\x01' * 20)
387 387
388 388 self.assertFalse(f.renamed(nullid))
389 389
390 390 with self.assertRaises(error.LookupError):
391 391 f.read(b'\x01' * 20)
392 392
393 393 self.assertTrue(f.cmp(nullid, b''))
394 394 self.assertTrue(f.cmp(nullid, b'foo'))
395 395
396 396 with self.assertRaises(error.LookupError):
397 397 f.cmp(b'\x01' * 20, b'irrelevant')
398 398
399 self.assertEqual(f.revdiff(nullrev, nullrev), b'')
400
401 with self.assertRaises(IndexError):
402 f.revdiff(0, nullrev)
403
404 with self.assertRaises(IndexError):
405 f.revdiff(nullrev, 0)
406
407 with self.assertRaises(IndexError):
408 f.revdiff(0, 0)
409
410 399 # Emitting empty list is an empty generator.
411 400 gen = f.emitrevisions([])
412 401 with self.assertRaises(StopIteration):
413 402 next(gen)
414 403
415 404 # Emitting null node yields nothing.
416 405 gen = f.emitrevisions([nullid])
417 406 with self.assertRaises(StopIteration):
418 407 next(gen)
419 408
420 409 # Requesting unknown node fails.
421 410 with self.assertRaises(error.LookupError):
422 411 list(f.emitrevisions([b'\x01' * 20]))
423 412
424 413 def testsinglerevision(self):
425 414 fulltext = b'initial'
426 415
427 416 f = self._makefilefn()
428 417 with self._maketransactionfn() as tr:
429 418 node = f.add(fulltext, None, tr, 0, nullid, nullid)
430 419
431 420 self.assertEqual(f.storageinfo(), {})
432 421 self.assertEqual(f.storageinfo(revisionscount=True, trackedsize=True),
433 422 {'revisionscount': 1, 'trackedsize': len(fulltext)})
434 423
435 424 self.assertEqual(f.size(0), len(fulltext))
436 425
437 426 with self.assertRaises(IndexError):
438 427 f.size(1)
439 428
440 429 f.checkhash(fulltext, node)
441 430 f.checkhash(fulltext, node, nullid, nullid)
442 431
443 432 with self.assertRaises(error.StorageError):
444 433 f.checkhash(fulltext + b'extra', node)
445 434
446 435 with self.assertRaises(error.StorageError):
447 436 f.checkhash(fulltext, node, b'\x01' * 20, nullid)
448 437
449 438 with self.assertRaises(error.StorageError):
450 439 f.checkhash(fulltext, node, nullid, b'\x01' * 20)
451 440
452 441 self.assertEqual(f.revision(node), fulltext)
453 442 self.assertEqual(f.revision(node, raw=True), fulltext)
454 443
455 444 self.assertEqual(f.read(node), fulltext)
456 445
457 446 self.assertFalse(f.renamed(node))
458 447
459 448 self.assertFalse(f.cmp(node, fulltext))
460 449 self.assertTrue(f.cmp(node, fulltext + b'extra'))
461 450
462 self.assertEqual(f.revdiff(0, 0), b'')
463 self.assertEqual(f.revdiff(nullrev, 0),
464 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07%s' %
465 fulltext)
466
467 self.assertEqual(f.revdiff(0, nullrev),
468 b'\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00')
469
470 451 # Emitting a single revision works.
471 452 gen = f.emitrevisions([node])
472 453 rev = next(gen)
473 454
474 455 self.assertEqual(rev.node, node)
475 456 self.assertEqual(rev.p1node, nullid)
476 457 self.assertEqual(rev.p2node, nullid)
477 458 self.assertIsNone(rev.linknode)
478 459 self.assertEqual(rev.basenode, nullid)
479 460 self.assertIsNone(rev.baserevisionsize)
480 461 self.assertIsNone(rev.revision)
481 462 self.assertIsNone(rev.delta)
482 463
483 464 with self.assertRaises(StopIteration):
484 465 next(gen)
485 466
486 467 # Requesting revision data works.
487 468 gen = f.emitrevisions([node], revisiondata=True)
488 469 rev = next(gen)
489 470
490 471 self.assertEqual(rev.node, node)
491 472 self.assertEqual(rev.p1node, nullid)
492 473 self.assertEqual(rev.p2node, nullid)
493 474 self.assertIsNone(rev.linknode)
494 475 self.assertEqual(rev.basenode, nullid)
495 476 self.assertIsNone(rev.baserevisionsize)
496 477 self.assertEqual(rev.revision, fulltext)
497 478 self.assertIsNone(rev.delta)
498 479
499 480 with self.assertRaises(StopIteration):
500 481 next(gen)
501 482
502 483 # Emitting an unknown node after a known revision results in error.
503 484 with self.assertRaises(error.LookupError):
504 485 list(f.emitrevisions([node, b'\x01' * 20]))
505 486
506 487 def testmultiplerevisions(self):
507 488 fulltext0 = b'x' * 1024
508 489 fulltext1 = fulltext0 + b'y'
509 490 fulltext2 = b'y' + fulltext0 + b'z'
510 491
511 492 f = self._makefilefn()
512 493 with self._maketransactionfn() as tr:
513 494 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
514 495 node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
515 496 node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
516 497
517 498 self.assertEqual(f.storageinfo(), {})
518 499 self.assertEqual(
519 500 f.storageinfo(revisionscount=True, trackedsize=True),
520 501 {
521 502 'revisionscount': 3,
522 503 'trackedsize': len(fulltext0) + len(fulltext1) + len(fulltext2),
523 504 })
524 505
525 506 self.assertEqual(f.size(0), len(fulltext0))
526 507 self.assertEqual(f.size(1), len(fulltext1))
527 508 self.assertEqual(f.size(2), len(fulltext2))
528 509
529 510 with self.assertRaises(IndexError):
530 511 f.size(3)
531 512
532 513 f.checkhash(fulltext0, node0)
533 514 f.checkhash(fulltext1, node1)
534 515 f.checkhash(fulltext1, node1, node0, nullid)
535 516 f.checkhash(fulltext2, node2, node1, nullid)
536 517
537 518 with self.assertRaises(error.StorageError):
538 519 f.checkhash(fulltext1, b'\x01' * 20)
539 520
540 521 with self.assertRaises(error.StorageError):
541 522 f.checkhash(fulltext1 + b'extra', node1, node0, nullid)
542 523
543 524 with self.assertRaises(error.StorageError):
544 525 f.checkhash(fulltext1, node1, node0, node0)
545 526
546 527 self.assertEqual(f.revision(node0), fulltext0)
547 528 self.assertEqual(f.revision(node0, raw=True), fulltext0)
548 529 self.assertEqual(f.revision(node1), fulltext1)
549 530 self.assertEqual(f.revision(node1, raw=True), fulltext1)
550 531 self.assertEqual(f.revision(node2), fulltext2)
551 532 self.assertEqual(f.revision(node2, raw=True), fulltext2)
552 533
553 534 with self.assertRaises(error.LookupError):
554 535 f.revision(b'\x01' * 20)
555 536
556 537 self.assertEqual(f.read(node0), fulltext0)
557 538 self.assertEqual(f.read(node1), fulltext1)
558 539 self.assertEqual(f.read(node2), fulltext2)
559 540
560 541 with self.assertRaises(error.LookupError):
561 542 f.read(b'\x01' * 20)
562 543
563 544 self.assertFalse(f.renamed(node0))
564 545 self.assertFalse(f.renamed(node1))
565 546 self.assertFalse(f.renamed(node2))
566 547
567 548 with self.assertRaises(error.LookupError):
568 549 f.renamed(b'\x01' * 20)
569 550
570 551 self.assertFalse(f.cmp(node0, fulltext0))
571 552 self.assertFalse(f.cmp(node1, fulltext1))
572 553 self.assertFalse(f.cmp(node2, fulltext2))
573 554
574 555 self.assertTrue(f.cmp(node1, fulltext0))
575 556 self.assertTrue(f.cmp(node2, fulltext1))
576 557
577 558 with self.assertRaises(error.LookupError):
578 559 f.cmp(b'\x01' * 20, b'irrelevant')
579 560
580 self.assertEqual(f.revdiff(0, 1),
581 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
582 fulltext1)
583
584 self.assertEqual(f.revdiff(0, 2),
585 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x02' +
586 fulltext2)
587
588 561 # Nodes should be emitted in order.
589 562 gen = f.emitrevisions([node0, node1, node2], revisiondata=True)
590 563
591 564 rev = next(gen)
592 565
593 566 self.assertEqual(rev.node, node0)
594 567 self.assertEqual(rev.p1node, nullid)
595 568 self.assertEqual(rev.p2node, nullid)
596 569 self.assertIsNone(rev.linknode)
597 570 self.assertEqual(rev.basenode, nullid)
598 571 self.assertIsNone(rev.baserevisionsize)
599 572 self.assertEqual(rev.revision, fulltext0)
600 573 self.assertIsNone(rev.delta)
601 574
602 575 rev = next(gen)
603 576
604 577 self.assertEqual(rev.node, node1)
605 578 self.assertEqual(rev.p1node, node0)
606 579 self.assertEqual(rev.p2node, nullid)
607 580 self.assertIsNone(rev.linknode)
608 581 self.assertEqual(rev.basenode, node0)
609 582 self.assertIsNone(rev.baserevisionsize)
610 583 self.assertIsNone(rev.revision)
611 584 self.assertEqual(rev.delta,
612 585 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
613 586 fulltext1)
614 587
615 588 rev = next(gen)
616 589
617 590 self.assertEqual(rev.node, node2)
618 591 self.assertEqual(rev.p1node, node1)
619 592 self.assertEqual(rev.p2node, nullid)
620 593 self.assertIsNone(rev.linknode)
621 594 self.assertEqual(rev.basenode, node1)
622 595 self.assertIsNone(rev.baserevisionsize)
623 596 self.assertIsNone(rev.revision)
624 597 self.assertEqual(rev.delta,
625 598 b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
626 599 fulltext2)
627 600
628 601 with self.assertRaises(StopIteration):
629 602 next(gen)
630 603
631 604 # Request not in DAG order is reordered to be in DAG order.
632 605 gen = f.emitrevisions([node2, node1, node0], revisiondata=True)
633 606
634 607 rev = next(gen)
635 608
636 609 self.assertEqual(rev.node, node0)
637 610 self.assertEqual(rev.p1node, nullid)
638 611 self.assertEqual(rev.p2node, nullid)
639 612 self.assertIsNone(rev.linknode)
640 613 self.assertEqual(rev.basenode, nullid)
641 614 self.assertIsNone(rev.baserevisionsize)
642 615 self.assertEqual(rev.revision, fulltext0)
643 616 self.assertIsNone(rev.delta)
644 617
645 618 rev = next(gen)
646 619
647 620 self.assertEqual(rev.node, node1)
648 621 self.assertEqual(rev.p1node, node0)
649 622 self.assertEqual(rev.p2node, nullid)
650 623 self.assertIsNone(rev.linknode)
651 624 self.assertEqual(rev.basenode, node0)
652 625 self.assertIsNone(rev.baserevisionsize)
653 626 self.assertIsNone(rev.revision)
654 627 self.assertEqual(rev.delta,
655 628 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
656 629 fulltext1)
657 630
658 631 rev = next(gen)
659 632
660 633 self.assertEqual(rev.node, node2)
661 634 self.assertEqual(rev.p1node, node1)
662 635 self.assertEqual(rev.p2node, nullid)
663 636 self.assertIsNone(rev.linknode)
664 637 self.assertEqual(rev.basenode, node1)
665 638 self.assertIsNone(rev.baserevisionsize)
666 639 self.assertIsNone(rev.revision)
667 640 self.assertEqual(rev.delta,
668 641 b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
669 642 fulltext2)
670 643
671 644 with self.assertRaises(StopIteration):
672 645 next(gen)
673 646
674 647 # Unrecognized nodesorder value raises ProgrammingError.
675 648 with self.assertRaises(error.ProgrammingError):
676 649 list(f.emitrevisions([], nodesorder='bad'))
677 650
678 651 # nodesorder=storage is recognized. But we can't test it thoroughly
679 652 # because behavior is storage-dependent.
680 653 res = list(f.emitrevisions([node2, node1, node0],
681 654 nodesorder='storage'))
682 655 self.assertEqual(len(res), 3)
683 656 self.assertEqual({o.node for o in res}, {node0, node1, node2})
684 657
685 658 # nodesorder=nodes forces the order.
686 659 gen = f.emitrevisions([node2, node0], nodesorder='nodes',
687 660 revisiondata=True)
688 661
689 662 rev = next(gen)
690 663 self.assertEqual(rev.node, node2)
691 664 self.assertEqual(rev.p1node, node1)
692 665 self.assertEqual(rev.p2node, nullid)
693 666 self.assertEqual(rev.basenode, nullid)
694 667 self.assertIsNone(rev.baserevisionsize)
695 668 self.assertEqual(rev.revision, fulltext2)
696 669 self.assertIsNone(rev.delta)
697 670
698 671 rev = next(gen)
699 672 self.assertEqual(rev.node, node0)
700 673 self.assertEqual(rev.p1node, nullid)
701 674 self.assertEqual(rev.p2node, nullid)
702 675 # Delta behavior is storage dependent, so we can't easily test it.
703 676
704 677 with self.assertRaises(StopIteration):
705 678 next(gen)
706 679
707 680 # assumehaveparentrevisions=False (the default) won't send a delta for
708 681 # the first revision.
709 682 gen = f.emitrevisions({node2, node1}, revisiondata=True)
710 683
711 684 rev = next(gen)
712 685 self.assertEqual(rev.node, node1)
713 686 self.assertEqual(rev.p1node, node0)
714 687 self.assertEqual(rev.p2node, nullid)
715 688 self.assertEqual(rev.basenode, nullid)
716 689 self.assertIsNone(rev.baserevisionsize)
717 690 self.assertEqual(rev.revision, fulltext1)
718 691 self.assertIsNone(rev.delta)
719 692
720 693 rev = next(gen)
721 694 self.assertEqual(rev.node, node2)
722 695 self.assertEqual(rev.p1node, node1)
723 696 self.assertEqual(rev.p2node, nullid)
724 697 self.assertEqual(rev.basenode, node1)
725 698 self.assertIsNone(rev.baserevisionsize)
726 699 self.assertIsNone(rev.revision)
727 700 self.assertEqual(rev.delta,
728 701 b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
729 702 fulltext2)
730 703
731 704 with self.assertRaises(StopIteration):
732 705 next(gen)
733 706
734 707 # assumehaveparentrevisions=True allows delta against initial revision.
735 708 gen = f.emitrevisions([node2, node1],
736 709 revisiondata=True, assumehaveparentrevisions=True)
737 710
738 711 rev = next(gen)
739 712 self.assertEqual(rev.node, node1)
740 713 self.assertEqual(rev.p1node, node0)
741 714 self.assertEqual(rev.p2node, nullid)
742 715 self.assertEqual(rev.basenode, node0)
743 716 self.assertIsNone(rev.baserevisionsize)
744 717 self.assertIsNone(rev.revision)
745 718 self.assertEqual(rev.delta,
746 719 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
747 720 fulltext1)
748 721
749 722 # forceprevious=True forces a delta against the previous revision.
750 723 # Special case for initial revision.
751 724 gen = f.emitrevisions([node0], revisiondata=True, deltaprevious=True)
752 725
753 726 rev = next(gen)
754 727 self.assertEqual(rev.node, node0)
755 728 self.assertEqual(rev.p1node, nullid)
756 729 self.assertEqual(rev.p2node, nullid)
757 730 self.assertEqual(rev.basenode, nullid)
758 731 self.assertIsNone(rev.baserevisionsize)
759 732 self.assertIsNone(rev.revision)
760 733 self.assertEqual(rev.delta,
761 734 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00' +
762 735 fulltext0)
763 736
764 737 with self.assertRaises(StopIteration):
765 738 next(gen)
766 739
767 740 gen = f.emitrevisions([node0, node2], revisiondata=True,
768 741 deltaprevious=True)
769 742
770 743 rev = next(gen)
771 744 self.assertEqual(rev.node, node0)
772 745 self.assertEqual(rev.p1node, nullid)
773 746 self.assertEqual(rev.p2node, nullid)
774 747 self.assertEqual(rev.basenode, nullid)
775 748 self.assertIsNone(rev.baserevisionsize)
776 749 self.assertIsNone(rev.revision)
777 750 self.assertEqual(rev.delta,
778 751 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00' +
779 752 fulltext0)
780 753
781 754 rev = next(gen)
782 755 self.assertEqual(rev.node, node2)
783 756 self.assertEqual(rev.p1node, node1)
784 757 self.assertEqual(rev.p2node, nullid)
785 758 self.assertEqual(rev.basenode, node0)
786 759
787 760 with self.assertRaises(StopIteration):
788 761 next(gen)
789 762
790 763 def testrenamed(self):
791 764 fulltext0 = b'foo'
792 765 fulltext1 = b'bar'
793 766 fulltext2 = b'baz'
794 767
795 768 meta1 = {
796 769 b'copy': b'source0',
797 770 b'copyrev': b'a' * 40,
798 771 }
799 772
800 773 meta2 = {
801 774 b'copy': b'source1',
802 775 b'copyrev': b'b' * 40,
803 776 }
804 777
805 778 stored1 = b''.join([
806 779 b'\x01\ncopy: source0\n',
807 780 b'copyrev: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n\x01\n',
808 781 fulltext1,
809 782 ])
810 783
811 784 stored2 = b''.join([
812 785 b'\x01\ncopy: source1\n',
813 786 b'copyrev: bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n\x01\n',
814 787 fulltext2,
815 788 ])
816 789
817 790 f = self._makefilefn()
818 791 with self._maketransactionfn() as tr:
819 792 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
820 793 node1 = f.add(fulltext1, meta1, tr, 1, node0, nullid)
821 794 node2 = f.add(fulltext2, meta2, tr, 2, nullid, nullid)
822 795
823 796 # Metadata header isn't recognized when parent isn't nullid.
824 797 self.assertEqual(f.size(1), len(stored1))
825 798 self.assertEqual(f.size(2), len(fulltext2))
826 799
827 800 self.assertEqual(f.revision(node1), stored1)
828 801 self.assertEqual(f.revision(node1, raw=True), stored1)
829 802 self.assertEqual(f.revision(node2), stored2)
830 803 self.assertEqual(f.revision(node2, raw=True), stored2)
831 804
832 805 self.assertEqual(f.read(node1), fulltext1)
833 806 self.assertEqual(f.read(node2), fulltext2)
834 807
835 808 # Returns False when first parent is set.
836 809 self.assertFalse(f.renamed(node1))
837 810 self.assertEqual(f.renamed(node2), (b'source1', b'\xbb' * 20))
838 811
839 812 self.assertTrue(f.cmp(node1, fulltext1))
840 813 self.assertTrue(f.cmp(node1, stored1))
841 814 self.assertFalse(f.cmp(node2, fulltext2))
842 815 self.assertTrue(f.cmp(node2, stored2))
843 816
844 817 def testmetadataprefix(self):
845 818 # Content with metadata prefix has extra prefix inserted in storage.
846 819 fulltext0 = b'\x01\nfoo'
847 820 stored0 = b'\x01\n\x01\n\x01\nfoo'
848 821
849 822 fulltext1 = b'\x01\nbar'
850 823 meta1 = {
851 824 b'copy': b'source0',
852 825 b'copyrev': b'b' * 40,
853 826 }
854 827 stored1 = b''.join([
855 828 b'\x01\ncopy: source0\n',
856 829 b'copyrev: %s\n' % (b'b' * 40),
857 830 b'\x01\n\x01\nbar',
858 831 ])
859 832
860 833 f = self._makefilefn()
861 834 with self._maketransactionfn() as tr:
862 835 node0 = f.add(fulltext0, {}, tr, 0, nullid, nullid)
863 836 node1 = f.add(fulltext1, meta1, tr, 1, nullid, nullid)
864 837
865 838 # TODO this is buggy.
866 839 self.assertEqual(f.size(0), len(fulltext0) + 4)
867 840
868 841 self.assertEqual(f.size(1), len(fulltext1))
869 842
870 843 self.assertEqual(f.revision(node0), stored0)
871 844 self.assertEqual(f.revision(node0, raw=True), stored0)
872 845
873 846 self.assertEqual(f.revision(node1), stored1)
874 847 self.assertEqual(f.revision(node1, raw=True), stored1)
875 848
876 849 self.assertEqual(f.read(node0), fulltext0)
877 850 self.assertEqual(f.read(node1), fulltext1)
878 851
879 852 self.assertFalse(f.cmp(node0, fulltext0))
880 853 self.assertTrue(f.cmp(node0, stored0))
881 854
882 855 self.assertFalse(f.cmp(node1, fulltext1))
883 856 self.assertTrue(f.cmp(node1, stored0))
884 857
885 858 def testcensored(self):
886 859 f = self._makefilefn()
887 860
888 861 stored1 = storageutil.packmeta({
889 862 b'censored': b'tombstone',
890 863 }, b'')
891 864
892 865 # TODO tests are incomplete because we need the node to be
893 866 # different due to presence of censor metadata. But we can't
894 867 # do this with addrevision().
895 868 with self._maketransactionfn() as tr:
896 869 node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
897 870 f.addrevision(stored1, tr, 1, node0, nullid,
898 871 flags=revlog.REVIDX_ISCENSORED)
899 872
900 873 self.assertTrue(f.iscensored(1))
901 874
902 875 self.assertEqual(f.revision(1), stored1)
903 876 self.assertEqual(f.revision(1, raw=True), stored1)
904 877
905 878 self.assertEqual(f.read(1), b'')
906 879
907 880 class ifilemutationtests(basetestcase):
908 881 """Generic tests for the ifilemutation interface.
909 882
910 883 All file storage backends that support writing should conform to this
911 884 interface.
912 885
913 886 Use ``makeifilemutationtests()`` to create an instance of this type.
914 887 """
915 888 def testaddnoop(self):
916 889 f = self._makefilefn()
917 890 with self._maketransactionfn() as tr:
918 891 node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
919 892 node1 = f.add(b'foo', None, tr, 0, nullid, nullid)
920 893 # Varying by linkrev shouldn't impact hash.
921 894 node2 = f.add(b'foo', None, tr, 1, nullid, nullid)
922 895
923 896 self.assertEqual(node1, node0)
924 897 self.assertEqual(node2, node0)
925 898 self.assertEqual(len(f), 1)
926 899
927 900 def testaddrevisionbadnode(self):
928 901 f = self._makefilefn()
929 902 with self._maketransactionfn() as tr:
930 903 # Adding a revision with bad node value fails.
931 904 with self.assertRaises(error.StorageError):
932 905 f.addrevision(b'foo', tr, 0, nullid, nullid, node=b'\x01' * 20)
933 906
934 907 def testaddrevisionunknownflag(self):
935 908 f = self._makefilefn()
936 909 with self._maketransactionfn() as tr:
937 910 for i in range(15, 0, -1):
938 911 if (1 << i) & ~revlog.REVIDX_KNOWN_FLAGS:
939 912 flags = 1 << i
940 913 break
941 914
942 915 with self.assertRaises(error.StorageError):
943 916 f.addrevision(b'foo', tr, 0, nullid, nullid, flags=flags)
944 917
945 918 def testaddgroupsimple(self):
946 919 f = self._makefilefn()
947 920
948 921 callbackargs = []
949 922 def cb(*args, **kwargs):
950 923 callbackargs.append((args, kwargs))
951 924
952 925 def linkmapper(node):
953 926 return 0
954 927
955 928 with self._maketransactionfn() as tr:
956 929 nodes = f.addgroup([], None, tr, addrevisioncb=cb)
957 930
958 931 self.assertEqual(nodes, [])
959 932 self.assertEqual(callbackargs, [])
960 933 self.assertEqual(len(f), 0)
961 934
962 935 fulltext0 = b'foo'
963 936 delta0 = mdiff.trivialdiffheader(len(fulltext0)) + fulltext0
964 937
965 938 deltas = [
966 939 (b'\x01' * 20, nullid, nullid, nullid, nullid, delta0, 0),
967 940 ]
968 941
969 942 with self._maketransactionfn() as tr:
970 943 with self.assertRaises(error.StorageError):
971 944 f.addgroup(deltas, linkmapper, tr, addrevisioncb=cb)
972 945
973 946 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
974 947
975 948 f = self._makefilefn()
976 949
977 950 deltas = [
978 951 (node0, nullid, nullid, nullid, nullid, delta0, 0),
979 952 ]
980 953
981 954 with self._maketransactionfn() as tr:
982 955 nodes = f.addgroup(deltas, linkmapper, tr, addrevisioncb=cb)
983 956
984 957 self.assertEqual(nodes, [
985 958 b'\x49\xd8\xcb\xb1\x5c\xe2\x57\x92\x04\x47'
986 959 b'\x00\x6b\x46\x97\x8b\x7a\xf9\x80\xa9\x79'])
987 960
988 961 self.assertEqual(len(callbackargs), 1)
989 962 self.assertEqual(callbackargs[0][0][1], nodes[0])
990 963
991 964 self.assertEqual(list(f.revs()), [0])
992 965 self.assertEqual(f.rev(nodes[0]), 0)
993 966 self.assertEqual(f.node(0), nodes[0])
994 967
995 968 def testaddgroupmultiple(self):
996 969 f = self._makefilefn()
997 970
998 971 fulltexts = [
999 972 b'foo',
1000 973 b'bar',
1001 974 b'x' * 1024,
1002 975 ]
1003 976
1004 977 nodes = []
1005 978 with self._maketransactionfn() as tr:
1006 979 for fulltext in fulltexts:
1007 980 nodes.append(f.add(fulltext, None, tr, 0, nullid, nullid))
1008 981
1009 982 f = self._makefilefn()
1010 983 deltas = []
1011 984 for i, fulltext in enumerate(fulltexts):
1012 985 delta = mdiff.trivialdiffheader(len(fulltext)) + fulltext
1013 986
1014 987 deltas.append((nodes[i], nullid, nullid, nullid, nullid, delta, 0))
1015 988
1016 989 with self._maketransactionfn() as tr:
1017 990 self.assertEqual(f.addgroup(deltas, lambda x: 0, tr), nodes)
1018 991
1019 992 self.assertEqual(len(f), len(deltas))
1020 993 self.assertEqual(list(f.revs()), [0, 1, 2])
1021 994 self.assertEqual(f.rev(nodes[0]), 0)
1022 995 self.assertEqual(f.rev(nodes[1]), 1)
1023 996 self.assertEqual(f.rev(nodes[2]), 2)
1024 997 self.assertEqual(f.node(0), nodes[0])
1025 998 self.assertEqual(f.node(1), nodes[1])
1026 999 self.assertEqual(f.node(2), nodes[2])
1027 1000
1028 1001 def makeifileindextests(makefilefn, maketransactionfn):
1029 1002 """Create a unittest.TestCase class suitable for testing file storage.
1030 1003
1031 1004 ``makefilefn`` is a callable which receives the test case as an
1032 1005 argument and returns an object implementing the ``ifilestorage`` interface.
1033 1006
1034 1007 ``maketransactionfn`` is a callable which receives the test case as an
1035 1008 argument and returns a transaction object.
1036 1009
1037 1010 Returns a type that is a ``unittest.TestCase`` that can be used for
1038 1011 testing the object implementing the file storage interface. Simply
1039 1012 assign the returned value to a module-level attribute and a test loader
1040 1013 should find and run it automatically.
1041 1014 """
1042 1015 d = {
1043 1016 r'_makefilefn': makefilefn,
1044 1017 r'_maketransactionfn': maketransactionfn,
1045 1018 }
1046 1019 return type(r'ifileindextests', (ifileindextests,), d)
1047 1020
1048 1021 def makeifiledatatests(makefilefn, maketransactionfn):
1049 1022 d = {
1050 1023 r'_makefilefn': makefilefn,
1051 1024 r'_maketransactionfn': maketransactionfn,
1052 1025 }
1053 1026 return type(r'ifiledatatests', (ifiledatatests,), d)
1054 1027
1055 1028 def makeifilemutationtests(makefilefn, maketransactionfn):
1056 1029 d = {
1057 1030 r'_makefilefn': makefilefn,
1058 1031 r'_maketransactionfn': maketransactionfn,
1059 1032 }
1060 1033 return type(r'ifilemutationtests', (ifilemutationtests,), d)
@@ -1,674 +1,664
1 1 # simplestorerepo.py - Extension that swaps in alternate repository storage.
2 2 #
3 3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 # To use this with the test suite:
9 9 #
10 10 # $ HGREPOFEATURES="simplestore" ./run-tests.py \
11 11 # --extra-config-opt extensions.simplestore=`pwd`/simplestorerepo.py
12 12
13 13 from __future__ import absolute_import
14 14
15 15 import stat
16 16
17 17 from mercurial.i18n import _
18 18 from mercurial.node import (
19 19 bin,
20 20 hex,
21 21 nullid,
22 22 nullrev,
23 23 )
24 24 from mercurial.thirdparty import (
25 25 attr,
26 26 cbor,
27 27 )
28 28 from mercurial import (
29 29 ancestor,
30 30 bundlerepo,
31 31 error,
32 32 extensions,
33 33 localrepo,
34 34 mdiff,
35 35 pycompat,
36 36 repository,
37 37 revlog,
38 38 store,
39 39 verify,
40 40 )
41 41 from mercurial.utils import (
42 42 interfaceutil,
43 43 storageutil,
44 44 )
45 45
46 46 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
47 47 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
48 48 # be specifying the version(s) of Mercurial they are tested with, or
49 49 # leave the attribute unspecified.
50 50 testedwith = 'ships-with-hg-core'
51 51
52 52 REQUIREMENT = 'testonly-simplestore'
53 53
54 54 def validatenode(node):
55 55 if isinstance(node, int):
56 56 raise ValueError('expected node; got int')
57 57
58 58 if len(node) != 20:
59 59 raise ValueError('expected 20 byte node')
60 60
61 61 def validaterev(rev):
62 62 if not isinstance(rev, int):
63 63 raise ValueError('expected int')
64 64
65 65 class simplestoreerror(error.StorageError):
66 66 pass
67 67
68 68 @interfaceutil.implementer(repository.irevisiondelta)
69 69 @attr.s(slots=True, frozen=True)
70 70 class simplestorerevisiondelta(object):
71 71 node = attr.ib()
72 72 p1node = attr.ib()
73 73 p2node = attr.ib()
74 74 basenode = attr.ib()
75 75 linknode = attr.ib()
76 76 flags = attr.ib()
77 77 baserevisionsize = attr.ib()
78 78 revision = attr.ib()
79 79 delta = attr.ib()
80 80
81 81 @interfaceutil.implementer(repository.ifilestorage)
82 82 class filestorage(object):
83 83 """Implements storage for a tracked path.
84 84
85 85 Data is stored in the VFS in a directory corresponding to the tracked
86 86 path.
87 87
88 88 Index data is stored in an ``index`` file using CBOR.
89 89
90 90 Fulltext data is stored in files having names of the node.
91 91 """
92 92
93 93 def __init__(self, svfs, path):
94 94 self._svfs = svfs
95 95 self._path = path
96 96
97 97 self._storepath = b'/'.join([b'data', path])
98 98 self._indexpath = b'/'.join([self._storepath, b'index'])
99 99
100 100 indexdata = self._svfs.tryread(self._indexpath)
101 101 if indexdata:
102 102 indexdata = cbor.loads(indexdata)
103 103
104 104 self._indexdata = indexdata or []
105 105 self._indexbynode = {}
106 106 self._indexbyrev = {}
107 107 self._index = []
108 108 self._refreshindex()
109 109
110 110 def _refreshindex(self):
111 111 self._indexbynode.clear()
112 112 self._indexbyrev.clear()
113 113 self._index = []
114 114
115 115 for i, entry in enumerate(self._indexdata):
116 116 self._indexbynode[entry[b'node']] = entry
117 117 self._indexbyrev[i] = entry
118 118
119 119 self._indexbynode[nullid] = {
120 120 b'node': nullid,
121 121 b'p1': nullid,
122 122 b'p2': nullid,
123 123 b'linkrev': nullrev,
124 124 b'flags': 0,
125 125 }
126 126
127 127 self._indexbyrev[nullrev] = {
128 128 b'node': nullid,
129 129 b'p1': nullid,
130 130 b'p2': nullid,
131 131 b'linkrev': nullrev,
132 132 b'flags': 0,
133 133 }
134 134
135 135 for i, entry in enumerate(self._indexdata):
136 136 p1rev, p2rev = self.parentrevs(self.rev(entry[b'node']))
137 137
138 138 # start, length, rawsize, chainbase, linkrev, p1, p2, node
139 139 self._index.append((0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev,
140 140 entry[b'node']))
141 141
142 142 self._index.append((0, 0, 0, -1, -1, -1, -1, nullid))
143 143
144 144 def __len__(self):
145 145 return len(self._indexdata)
146 146
147 147 def __iter__(self):
148 148 return iter(range(len(self)))
149 149
150 150 def revs(self, start=0, stop=None):
151 151 step = 1
152 152 if stop is not None:
153 153 if start > stop:
154 154 step = -1
155 155
156 156 stop += step
157 157 else:
158 158 stop = len(self)
159 159
160 160 return range(start, stop, step)
161 161
162 162 def parents(self, node):
163 163 validatenode(node)
164 164
165 165 if node not in self._indexbynode:
166 166 raise KeyError('unknown node')
167 167
168 168 entry = self._indexbynode[node]
169 169
170 170 return entry[b'p1'], entry[b'p2']
171 171
172 172 def parentrevs(self, rev):
173 173 p1, p2 = self.parents(self._indexbyrev[rev][b'node'])
174 174 return self.rev(p1), self.rev(p2)
175 175
176 176 def rev(self, node):
177 177 validatenode(node)
178 178
179 179 try:
180 180 self._indexbynode[node]
181 181 except KeyError:
182 182 raise error.LookupError(node, self._indexpath, _('no node'))
183 183
184 184 for rev, entry in self._indexbyrev.items():
185 185 if entry[b'node'] == node:
186 186 return rev
187 187
188 188 raise error.ProgrammingError('this should not occur')
189 189
190 190 def node(self, rev):
191 191 validaterev(rev)
192 192
193 193 return self._indexbyrev[rev][b'node']
194 194
195 195 def lookup(self, node):
196 196 if isinstance(node, int):
197 197 return self.node(node)
198 198
199 199 if len(node) == 20:
200 200 self.rev(node)
201 201 return node
202 202
203 203 try:
204 204 rev = int(node)
205 205 if '%d' % rev != node:
206 206 raise ValueError
207 207
208 208 if rev < 0:
209 209 rev = len(self) + rev
210 210 if rev < 0 or rev >= len(self):
211 211 raise ValueError
212 212
213 213 return self.node(rev)
214 214 except (ValueError, OverflowError):
215 215 pass
216 216
217 217 if len(node) == 40:
218 218 try:
219 219 rawnode = bin(node)
220 220 self.rev(rawnode)
221 221 return rawnode
222 222 except TypeError:
223 223 pass
224 224
225 225 raise error.LookupError(node, self._path, _('invalid lookup input'))
226 226
227 227 def linkrev(self, rev):
228 228 validaterev(rev)
229 229
230 230 return self._indexbyrev[rev][b'linkrev']
231 231
232 232 def _flags(self, rev):
233 233 validaterev(rev)
234 234
235 235 return self._indexbyrev[rev][b'flags']
236 236
237 237 def _candelta(self, baserev, rev):
238 238 validaterev(baserev)
239 239 validaterev(rev)
240 240
241 241 if ((self._flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)
242 242 or (self._flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)):
243 243 return False
244 244
245 245 return True
246 246
247 247 def _processflags(self, text, flags, operation, raw=False):
248 248 if flags == 0:
249 249 return text, True
250 250
251 251 if flags & ~revlog.REVIDX_KNOWN_FLAGS:
252 252 raise simplestoreerror(_("incompatible revision flag '%#x'") %
253 253 (flags & ~revlog.REVIDX_KNOWN_FLAGS))
254 254
255 255 validatehash = True
256 256 # Depending on the operation (read or write), the order might be
257 257 # reversed due to non-commutative transforms.
258 258 orderedflags = revlog.REVIDX_FLAGS_ORDER
259 259 if operation == 'write':
260 260 orderedflags = reversed(orderedflags)
261 261
262 262 for flag in orderedflags:
263 263 # If a flagprocessor has been registered for a known flag, apply the
264 264 # related operation transform and update result tuple.
265 265 if flag & flags:
266 266 vhash = True
267 267
268 268 if flag not in revlog._flagprocessors:
269 269 message = _("missing processor for flag '%#x'") % (flag)
270 270 raise simplestoreerror(message)
271 271
272 272 processor = revlog._flagprocessors[flag]
273 273 if processor is not None:
274 274 readtransform, writetransform, rawtransform = processor
275 275
276 276 if raw:
277 277 vhash = rawtransform(self, text)
278 278 elif operation == 'read':
279 279 text, vhash = readtransform(self, text)
280 280 else: # write operation
281 281 text, vhash = writetransform(self, text)
282 282 validatehash = validatehash and vhash
283 283
284 284 return text, validatehash
285 285
286 286 def checkhash(self, text, node, p1=None, p2=None, rev=None):
287 287 if p1 is None and p2 is None:
288 288 p1, p2 = self.parents(node)
289 289 if node != storageutil.hashrevisionsha1(text, p1, p2):
290 290 raise simplestoreerror(_("integrity check failed on %s") %
291 291 self._path)
292 292
293 293 def revision(self, node, raw=False):
294 294 validatenode(node)
295 295
296 296 if node == nullid:
297 297 return b''
298 298
299 299 rev = self.rev(node)
300 300 flags = self._flags(rev)
301 301
302 302 path = b'/'.join([self._storepath, hex(node)])
303 303 rawtext = self._svfs.read(path)
304 304
305 305 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
306 306 if validatehash:
307 307 self.checkhash(text, node, rev=rev)
308 308
309 309 return text
310 310
311 311 def read(self, node):
312 312 validatenode(node)
313 313
314 314 revision = self.revision(node)
315 315
316 316 if not revision.startswith(b'\1\n'):
317 317 return revision
318 318
319 319 start = revision.index(b'\1\n', 2)
320 320 return revision[start + 2:]
321 321
322 322 def renamed(self, node):
323 323 validatenode(node)
324 324
325 325 if self.parents(node)[0] != nullid:
326 326 return False
327 327
328 328 fulltext = self.revision(node)
329 329 m = storageutil.parsemeta(fulltext)[0]
330 330
331 331 if m and 'copy' in m:
332 332 return m['copy'], bin(m['copyrev'])
333 333
334 334 return False
335 335
336 336 def cmp(self, node, text):
337 337 validatenode(node)
338 338
339 339 t = text
340 340
341 341 if text.startswith(b'\1\n'):
342 342 t = b'\1\n\1\n' + text
343 343
344 344 p1, p2 = self.parents(node)
345 345
346 346 if storageutil.hashrevisionsha1(t, p1, p2) == node:
347 347 return False
348 348
349 349 if self.iscensored(self.rev(node)):
350 350 return text != b''
351 351
352 352 if self.renamed(node):
353 353 t2 = self.read(node)
354 354 return t2 != text
355 355
356 356 return True
357 357
358 358 def size(self, rev):
359 359 validaterev(rev)
360 360
361 361 node = self._indexbyrev[rev][b'node']
362 362
363 363 if self.renamed(node):
364 364 return len(self.read(node))
365 365
366 366 if self.iscensored(rev):
367 367 return 0
368 368
369 369 return len(self.revision(node))
370 370
371 371 def iscensored(self, rev):
372 372 validaterev(rev)
373 373
374 374 return self._flags(rev) & revlog.REVIDX_ISCENSORED
375 375
376 376 def commonancestorsheads(self, a, b):
377 377 validatenode(a)
378 378 validatenode(b)
379 379
380 380 a = self.rev(a)
381 381 b = self.rev(b)
382 382
383 383 ancestors = ancestor.commonancestorsheads(self.parentrevs, a, b)
384 384 return pycompat.maplist(self.node, ancestors)
385 385
386 386 def descendants(self, revs):
387 387 # This is a copy of revlog.descendants()
388 388 first = min(revs)
389 389 if first == nullrev:
390 390 for i in self:
391 391 yield i
392 392 return
393 393
394 394 seen = set(revs)
395 395 for i in self.revs(start=first + 1):
396 396 for x in self.parentrevs(i):
397 397 if x != nullrev and x in seen:
398 398 seen.add(i)
399 399 yield i
400 400 break
401 401
402 402 # Required by verify.
403 403 def files(self):
404 404 entries = self._svfs.listdir(self._storepath)
405 405
406 406 # Strip out undo.backup.* files created as part of transaction
407 407 # recording.
408 408 entries = [f for f in entries if not f.startswith('undo.backup.')]
409 409
410 410 return [b'/'.join((self._storepath, f)) for f in entries]
411 411
412 412 def add(self, text, meta, transaction, linkrev, p1, p2):
413 413 if meta or text.startswith(b'\1\n'):
414 414 text = storageutil.packmeta(meta, text)
415 415
416 416 return self.addrevision(text, transaction, linkrev, p1, p2)
417 417
418 418 def addrevision(self, text, transaction, linkrev, p1, p2, node=None,
419 419 flags=revlog.REVIDX_DEFAULT_FLAGS, cachedelta=None):
420 420 validatenode(p1)
421 421 validatenode(p2)
422 422
423 423 if flags:
424 424 node = node or storageutil.hashrevisionsha1(text, p1, p2)
425 425
426 426 rawtext, validatehash = self._processflags(text, flags, 'write')
427 427
428 428 node = node or storageutil.hashrevisionsha1(text, p1, p2)
429 429
430 430 if node in self._indexbynode:
431 431 return node
432 432
433 433 if validatehash:
434 434 self.checkhash(rawtext, node, p1=p1, p2=p2)
435 435
436 436 return self._addrawrevision(node, rawtext, transaction, linkrev, p1, p2,
437 437 flags)
438 438
439 439 def _addrawrevision(self, node, rawtext, transaction, link, p1, p2, flags):
440 440 transaction.addbackup(self._indexpath)
441 441
442 442 path = b'/'.join([self._storepath, hex(node)])
443 443
444 444 self._svfs.write(path, rawtext)
445 445
446 446 self._indexdata.append({
447 447 b'node': node,
448 448 b'p1': p1,
449 449 b'p2': p2,
450 450 b'linkrev': link,
451 451 b'flags': flags,
452 452 })
453 453
454 454 self._reflectindexupdate()
455 455
456 456 return node
457 457
458 458 def _reflectindexupdate(self):
459 459 self._refreshindex()
460 460 self._svfs.write(self._indexpath, cbor.dumps(self._indexdata))
461 461
462 462 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
463 463 nodes = []
464 464
465 465 transaction.addbackup(self._indexpath)
466 466
467 467 for node, p1, p2, linknode, deltabase, delta, flags in deltas:
468 468 linkrev = linkmapper(linknode)
469 469 flags = flags or revlog.REVIDX_DEFAULT_FLAGS
470 470
471 471 nodes.append(node)
472 472
473 473 if node in self._indexbynode:
474 474 continue
475 475
476 476 # Need to resolve the fulltext from the delta base.
477 477 if deltabase == nullid:
478 478 text = mdiff.patch(b'', delta)
479 479 else:
480 480 text = mdiff.patch(self.revision(deltabase), delta)
481 481
482 482 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
483 483 flags)
484 484
485 485 if addrevisioncb:
486 486 addrevisioncb(self, node)
487 487
488 488 return nodes
489 489
490 def revdiff(self, rev1, rev2):
491 validaterev(rev1)
492 validaterev(rev2)
493
494 node1 = self.node(rev1)
495 node2 = self.node(rev2)
496
497 return mdiff.textdiff(self.revision(node1, raw=True),
498 self.revision(node2, raw=True))
499
500 490 def heads(self, start=None, stop=None):
501 491 # This is copied from revlog.py.
502 492 if start is None and stop is None:
503 493 if not len(self):
504 494 return [nullid]
505 495 return [self.node(r) for r in self.headrevs()]
506 496
507 497 if start is None:
508 498 start = nullid
509 499 if stop is None:
510 500 stop = []
511 501 stoprevs = set([self.rev(n) for n in stop])
512 502 startrev = self.rev(start)
513 503 reachable = {startrev}
514 504 heads = {startrev}
515 505
516 506 parentrevs = self.parentrevs
517 507 for r in self.revs(start=startrev + 1):
518 508 for p in parentrevs(r):
519 509 if p in reachable:
520 510 if r not in stoprevs:
521 511 reachable.add(r)
522 512 heads.add(r)
523 513 if p in heads and p not in stoprevs:
524 514 heads.remove(p)
525 515
526 516 return [self.node(r) for r in heads]
527 517
528 518 def children(self, node):
529 519 validatenode(node)
530 520
531 521 # This is a copy of revlog.children().
532 522 c = []
533 523 p = self.rev(node)
534 524 for r in self.revs(start=p + 1):
535 525 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
536 526 if prevs:
537 527 for pr in prevs:
538 528 if pr == p:
539 529 c.append(self.node(r))
540 530 elif p == nullrev:
541 531 c.append(self.node(r))
542 532 return c
543 533
544 534 def getstrippoint(self, minlink):
545 535
546 536 # This is largely a copy of revlog.getstrippoint().
547 537 brokenrevs = set()
548 538 strippoint = len(self)
549 539
550 540 heads = {}
551 541 futurelargelinkrevs = set()
552 542 for head in self.heads():
553 543 headlinkrev = self.linkrev(self.rev(head))
554 544 heads[head] = headlinkrev
555 545 if headlinkrev >= minlink:
556 546 futurelargelinkrevs.add(headlinkrev)
557 547
558 548 # This algorithm involves walking down the rev graph, starting at the
559 549 # heads. Since the revs are topologically sorted according to linkrev,
560 550 # once all head linkrevs are below the minlink, we know there are
561 551 # no more revs that could have a linkrev greater than minlink.
562 552 # So we can stop walking.
563 553 while futurelargelinkrevs:
564 554 strippoint -= 1
565 555 linkrev = heads.pop(strippoint)
566 556
567 557 if linkrev < minlink:
568 558 brokenrevs.add(strippoint)
569 559 else:
570 560 futurelargelinkrevs.remove(linkrev)
571 561
572 562 for p in self.parentrevs(strippoint):
573 563 if p != nullrev:
574 564 plinkrev = self.linkrev(p)
575 565 heads[p] = plinkrev
576 566 if plinkrev >= minlink:
577 567 futurelargelinkrevs.add(plinkrev)
578 568
579 569 return strippoint, brokenrevs
580 570
581 571 def strip(self, minlink, transaction):
582 572 if not len(self):
583 573 return
584 574
585 575 rev, _ignored = self.getstrippoint(minlink)
586 576 if rev == len(self):
587 577 return
588 578
589 579 # Purge index data starting at the requested revision.
590 580 self._indexdata[rev:] = []
591 581 self._reflectindexupdate()
592 582
593 583 def issimplestorefile(f, kind, st):
594 584 if kind != stat.S_IFREG:
595 585 return False
596 586
597 587 if store.isrevlog(f, kind, st):
598 588 return False
599 589
600 590 # Ignore transaction undo files.
601 591 if f.startswith('undo.'):
602 592 return False
603 593
604 594 # Otherwise assume it belongs to the simple store.
605 595 return True
606 596
607 597 class simplestore(store.encodedstore):
608 598 def datafiles(self):
609 599 for x in super(simplestore, self).datafiles():
610 600 yield x
611 601
612 602 # Supplement with non-revlog files.
613 603 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
614 604
615 605 for unencoded, encoded, size in extrafiles:
616 606 try:
617 607 unencoded = store.decodefilename(unencoded)
618 608 except KeyError:
619 609 unencoded = None
620 610
621 611 yield unencoded, encoded, size
622 612
623 613 def reposetup(ui, repo):
624 614 if not repo.local():
625 615 return
626 616
627 617 if isinstance(repo, bundlerepo.bundlerepository):
628 618 raise error.Abort(_('cannot use simple store with bundlerepo'))
629 619
630 620 class simplestorerepo(repo.__class__):
631 621 def file(self, f):
632 622 return filestorage(self.svfs, f)
633 623
634 624 repo.__class__ = simplestorerepo
635 625
636 626 def featuresetup(ui, supported):
637 627 supported.add(REQUIREMENT)
638 628
639 629 def newreporequirements(orig, ui):
640 630 """Modifies default requirements for new repos to use the simple store."""
641 631 requirements = orig(ui)
642 632
643 633 # These requirements are only used to affect creation of the store
644 634 # object. We have our own store. So we can remove them.
645 635 # TODO do this once we feel like taking the test hit.
646 636 #if 'fncache' in requirements:
647 637 # requirements.remove('fncache')
648 638 #if 'dotencode' in requirements:
649 639 # requirements.remove('dotencode')
650 640
651 641 requirements.add(REQUIREMENT)
652 642
653 643 return requirements
654 644
655 645 def makestore(orig, requirements, path, vfstype):
656 646 if REQUIREMENT not in requirements:
657 647 return orig(requirements, path, vfstype)
658 648
659 649 return simplestore(path, vfstype)
660 650
661 651 def verifierinit(orig, self, *args, **kwargs):
662 652 orig(self, *args, **kwargs)
663 653
664 654 # We don't care that files in the store don't align with what is
665 655 # advertised. So suppress these warnings.
666 656 self.warnorphanstorefiles = False
667 657
668 658 def extsetup(ui):
669 659 localrepo.featuresetupfuncs.add(featuresetup)
670 660
671 661 extensions.wrapfunction(localrepo, 'newreporequirements',
672 662 newreporequirements)
673 663 extensions.wrapfunction(store, 'store', makestore)
674 664 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
General Comments 0
You need to be logged in to leave comments. Login now