##// END OF EJS Templates
filelog: drop index attribute (API)...
Gregory Szorc -
r39896:d9b3cc3d default
parent child Browse files
Show More
@@ -1,262 +1,260 b''
1 1 # filelog.py - file history class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from . import (
11 11 error,
12 12 repository,
13 13 revlog,
14 14 )
15 15 from .utils import (
16 16 interfaceutil,
17 17 )
18 18
19 19 @interfaceutil.implementer(repository.ifilestorage)
20 20 class filelog(object):
21 21 def __init__(self, opener, path):
22 22 self._revlog = revlog.revlog(opener,
23 23 '/'.join(('data', path + '.i')),
24 24 censorable=True)
25 25 # Full name of the user visible file, relative to the repository root.
26 26 # Used by LFS.
27 27 self._revlog.filename = path
28 # Used by repo upgrade.
29 self.index = self._revlog.index
30 28 # Used by changegroup generation.
31 29 self._generaldelta = self._revlog._generaldelta
32 30
33 31 def __len__(self):
34 32 return len(self._revlog)
35 33
36 34 def __iter__(self):
37 35 return self._revlog.__iter__()
38 36
39 37 def revs(self, start=0, stop=None):
40 38 return self._revlog.revs(start=start, stop=stop)
41 39
42 40 def parents(self, node):
43 41 return self._revlog.parents(node)
44 42
45 43 def parentrevs(self, rev):
46 44 return self._revlog.parentrevs(rev)
47 45
48 46 def rev(self, node):
49 47 return self._revlog.rev(node)
50 48
51 49 def node(self, rev):
52 50 return self._revlog.node(rev)
53 51
54 52 def lookup(self, node):
55 53 return self._revlog.lookup(node)
56 54
57 55 def linkrev(self, rev):
58 56 return self._revlog.linkrev(rev)
59 57
60 58 # Used by verify.
61 59 def flags(self, rev):
62 60 return self._revlog.flags(rev)
63 61
64 62 def commonancestorsheads(self, node1, node2):
65 63 return self._revlog.commonancestorsheads(node1, node2)
66 64
67 65 # Used by dagop.blockdescendants().
68 66 def descendants(self, revs):
69 67 return self._revlog.descendants(revs)
70 68
71 69 def heads(self, start=None, stop=None):
72 70 return self._revlog.heads(start, stop)
73 71
74 72 # Used by hgweb, children extension.
75 73 def children(self, node):
76 74 return self._revlog.children(node)
77 75
78 76 def deltaparent(self, rev):
79 77 return self._revlog.deltaparent(rev)
80 78
81 79 def iscensored(self, rev):
82 80 return self._revlog.iscensored(rev)
83 81
84 82 # Used by repo upgrade, verify.
85 83 def rawsize(self, rev):
86 84 return self._revlog.rawsize(rev)
87 85
88 86 # Might be unused.
89 87 def checkhash(self, text, node, p1=None, p2=None, rev=None):
90 88 return self._revlog.checkhash(text, node, p1=p1, p2=p2, rev=rev)
91 89
92 90 def revision(self, node, _df=None, raw=False):
93 91 return self._revlog.revision(node, _df=_df, raw=raw)
94 92
95 93 def revdiff(self, rev1, rev2):
96 94 return self._revlog.revdiff(rev1, rev2)
97 95
98 96 def emitrevisiondeltas(self, requests):
99 97 return self._revlog.emitrevisiondeltas(requests)
100 98
101 99 def addrevision(self, revisiondata, transaction, linkrev, p1, p2,
102 100 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS,
103 101 cachedelta=None):
104 102 return self._revlog.addrevision(revisiondata, transaction, linkrev,
105 103 p1, p2, node=node, flags=flags,
106 104 cachedelta=cachedelta)
107 105
108 106 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
109 107 return self._revlog.addgroup(deltas, linkmapper, transaction,
110 108 addrevisioncb=addrevisioncb)
111 109
112 110 def getstrippoint(self, minlink):
113 111 return self._revlog.getstrippoint(minlink)
114 112
115 113 def strip(self, minlink, transaction):
116 114 return self._revlog.strip(minlink, transaction)
117 115
118 116 def censorrevision(self, tr, node, tombstone=b''):
119 117 return self._revlog.censorrevision(node, tombstone=tombstone)
120 118
121 119 def files(self):
122 120 return self._revlog.files()
123 121
124 122 def read(self, node):
125 123 t = self.revision(node)
126 124 if not t.startswith('\1\n'):
127 125 return t
128 126 s = t.index('\1\n', 2)
129 127 return t[s + 2:]
130 128
131 129 def add(self, text, meta, transaction, link, p1=None, p2=None):
132 130 if meta or text.startswith('\1\n'):
133 131 text = revlog.packmeta(meta, text)
134 132 return self.addrevision(text, transaction, link, p1, p2)
135 133
136 134 def renamed(self, node):
137 135 if self.parents(node)[0] != revlog.nullid:
138 136 return False
139 137 t = self.revision(node)
140 138 m = revlog.parsemeta(t)[0]
141 139 # copy and copyrev occur in pairs. In rare cases due to bugs,
142 140 # one can occur without the other.
143 141 if m and "copy" in m and "copyrev" in m:
144 142 return (m["copy"], revlog.bin(m["copyrev"]))
145 143 return False
146 144
147 145 def size(self, rev):
148 146 """return the size of a given revision"""
149 147
150 148 # for revisions with renames, we have to go the slow way
151 149 node = self.node(rev)
152 150 if self.renamed(node):
153 151 return len(self.read(node))
154 152 if self.iscensored(rev):
155 153 return 0
156 154
157 155 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
158 156 return self._revlog.size(rev)
159 157
160 158 def cmp(self, node, text):
161 159 """compare text with a given file revision
162 160
163 161 returns True if text is different than what is stored.
164 162 """
165 163
166 164 t = text
167 165 if text.startswith('\1\n'):
168 166 t = '\1\n\1\n' + text
169 167
170 168 samehashes = not self._revlog.cmp(node, t)
171 169 if samehashes:
172 170 return False
173 171
174 172 # censored files compare against the empty file
175 173 if self.iscensored(self.rev(node)):
176 174 return text != ''
177 175
178 176 # renaming a file produces a different hash, even if the data
179 177 # remains unchanged. Check if it's the case (slow):
180 178 if self.renamed(node):
181 179 t2 = self.read(node)
182 180 return t2 != text
183 181
184 182 return True
185 183
186 184 def verifyintegrity(self, state):
187 185 return self._revlog.verifyintegrity(state)
188 186
189 187 # TODO these aren't part of the interface and aren't internal methods.
190 188 # Callers should be fixed to not use them.
191 189
192 190 # Used by bundlefilelog, unionfilelog.
193 191 @property
194 192 def indexfile(self):
195 193 return self._revlog.indexfile
196 194
197 195 @indexfile.setter
198 196 def indexfile(self, value):
199 197 self._revlog.indexfile = value
200 198
201 199 # Used by repo upgrade.
202 200 @property
203 201 def opener(self):
204 202 return self._revlog.opener
205 203
206 204 # Used by repo upgrade.
207 205 def clone(self, tr, destrevlog, **kwargs):
208 206 if not isinstance(destrevlog, filelog):
209 207 raise error.ProgrammingError('expected filelog to clone()')
210 208
211 209 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
212 210
213 211 class narrowfilelog(filelog):
214 212 """Filelog variation to be used with narrow stores."""
215 213
216 214 def __init__(self, opener, path, narrowmatch):
217 215 super(narrowfilelog, self).__init__(opener, path)
218 216 self._narrowmatch = narrowmatch
219 217
220 218 def renamed(self, node):
221 219 res = super(narrowfilelog, self).renamed(node)
222 220
223 221 # Renames that come from outside the narrowspec are problematic
224 222 # because we may lack the base text for the rename. This can result
225 223 # in code attempting to walk the ancestry or compute a diff
226 224 # encountering a missing revision. We address this by silently
227 225 # removing rename metadata if the source file is outside the
228 226 # narrow spec.
229 227 #
230 228 # A better solution would be to see if the base revision is available,
231 229 # rather than assuming it isn't.
232 230 #
233 231 # An even better solution would be to teach all consumers of rename
234 232 # metadata that the base revision may not be available.
235 233 #
236 234 # TODO consider better ways of doing this.
237 235 if res and not self._narrowmatch(res[0]):
238 236 return None
239 237
240 238 return res
241 239
242 240 def size(self, rev):
243 241 # Because we have a custom renamed() that may lie, we need to call
244 242 # the base renamed() to report accurate results.
245 243 node = self.node(rev)
246 244 if super(narrowfilelog, self).renamed(node):
247 245 return len(self.read(node))
248 246 else:
249 247 return super(narrowfilelog, self).size(rev)
250 248
251 249 def cmp(self, node, text):
252 250 different = super(narrowfilelog, self).cmp(node, text)
253 251
254 252 # Because renamed() may lie, we may get false positives for
255 253 # different content. Check for this by comparing against the original
256 254 # renamed() implementation.
257 255 if different:
258 256 if super(narrowfilelog, self).renamed(node):
259 257 t2 = self.read(node)
260 258 return t2 != text
261 259
262 260 return different
@@ -1,1642 +1,1639 b''
1 1 # repository.py - Interfaces and base classes for repositories and peers.
2 2 #
3 3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from .i18n import _
11 11 from . import (
12 12 error,
13 13 )
14 14 from .utils import (
15 15 interfaceutil,
16 16 )
17 17
18 18 # When narrowing is finalized and no longer subject to format changes,
19 19 # we should move this to just "narrow" or similar.
20 20 NARROW_REQUIREMENT = 'narrowhg-experimental'
21 21
22 22 # Local repository feature string.
23 23
24 24 # Revlogs are being used for file storage.
25 25 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
26 26 # The storage part of the repository is shared from an external source.
27 27 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
28 28 # LFS supported for backing file storage.
29 29 REPO_FEATURE_LFS = b'lfs'
30 30
31 31 class ipeerconnection(interfaceutil.Interface):
32 32 """Represents a "connection" to a repository.
33 33
34 34 This is the base interface for representing a connection to a repository.
35 35 It holds basic properties and methods applicable to all peer types.
36 36
37 37 This is not a complete interface definition and should not be used
38 38 outside of this module.
39 39 """
40 40 ui = interfaceutil.Attribute("""ui.ui instance""")
41 41
42 42 def url():
43 43 """Returns a URL string representing this peer.
44 44
45 45 Currently, implementations expose the raw URL used to construct the
46 46 instance. It may contain credentials as part of the URL. The
47 47 expectations of the value aren't well-defined and this could lead to
48 48 data leakage.
49 49
50 50 TODO audit/clean consumers and more clearly define the contents of this
51 51 value.
52 52 """
53 53
54 54 def local():
55 55 """Returns a local repository instance.
56 56
57 57 If the peer represents a local repository, returns an object that
58 58 can be used to interface with it. Otherwise returns ``None``.
59 59 """
60 60
61 61 def peer():
62 62 """Returns an object conforming to this interface.
63 63
64 64 Most implementations will ``return self``.
65 65 """
66 66
67 67 def canpush():
68 68 """Returns a boolean indicating if this peer can be pushed to."""
69 69
70 70 def close():
71 71 """Close the connection to this peer.
72 72
73 73 This is called when the peer will no longer be used. Resources
74 74 associated with the peer should be cleaned up.
75 75 """
76 76
77 77 class ipeercapabilities(interfaceutil.Interface):
78 78 """Peer sub-interface related to capabilities."""
79 79
80 80 def capable(name):
81 81 """Determine support for a named capability.
82 82
83 83 Returns ``False`` if capability not supported.
84 84
85 85 Returns ``True`` if boolean capability is supported. Returns a string
86 86 if capability support is non-boolean.
87 87
88 88 Capability strings may or may not map to wire protocol capabilities.
89 89 """
90 90
91 91 def requirecap(name, purpose):
92 92 """Require a capability to be present.
93 93
94 94 Raises a ``CapabilityError`` if the capability isn't present.
95 95 """
96 96
97 97 class ipeercommands(interfaceutil.Interface):
98 98 """Client-side interface for communicating over the wire protocol.
99 99
100 100 This interface is used as a gateway to the Mercurial wire protocol.
101 101 methods commonly call wire protocol commands of the same name.
102 102 """
103 103
104 104 def branchmap():
105 105 """Obtain heads in named branches.
106 106
107 107 Returns a dict mapping branch name to an iterable of nodes that are
108 108 heads on that branch.
109 109 """
110 110
111 111 def capabilities():
112 112 """Obtain capabilities of the peer.
113 113
114 114 Returns a set of string capabilities.
115 115 """
116 116
117 117 def clonebundles():
118 118 """Obtains the clone bundles manifest for the repo.
119 119
120 120 Returns the manifest as unparsed bytes.
121 121 """
122 122
123 123 def debugwireargs(one, two, three=None, four=None, five=None):
124 124 """Used to facilitate debugging of arguments passed over the wire."""
125 125
126 126 def getbundle(source, **kwargs):
127 127 """Obtain remote repository data as a bundle.
128 128
129 129 This command is how the bulk of repository data is transferred from
130 130 the peer to the local repository
131 131
132 132 Returns a generator of bundle data.
133 133 """
134 134
135 135 def heads():
136 136 """Determine all known head revisions in the peer.
137 137
138 138 Returns an iterable of binary nodes.
139 139 """
140 140
141 141 def known(nodes):
142 142 """Determine whether multiple nodes are known.
143 143
144 144 Accepts an iterable of nodes whose presence to check for.
145 145
146 146 Returns an iterable of booleans indicating of the corresponding node
147 147 at that index is known to the peer.
148 148 """
149 149
150 150 def listkeys(namespace):
151 151 """Obtain all keys in a pushkey namespace.
152 152
153 153 Returns an iterable of key names.
154 154 """
155 155
156 156 def lookup(key):
157 157 """Resolve a value to a known revision.
158 158
159 159 Returns a binary node of the resolved revision on success.
160 160 """
161 161
162 162 def pushkey(namespace, key, old, new):
163 163 """Set a value using the ``pushkey`` protocol.
164 164
165 165 Arguments correspond to the pushkey namespace and key to operate on and
166 166 the old and new values for that key.
167 167
168 168 Returns a string with the peer result. The value inside varies by the
169 169 namespace.
170 170 """
171 171
172 172 def stream_out():
173 173 """Obtain streaming clone data.
174 174
175 175 Successful result should be a generator of data chunks.
176 176 """
177 177
178 178 def unbundle(bundle, heads, url):
179 179 """Transfer repository data to the peer.
180 180
181 181 This is how the bulk of data during a push is transferred.
182 182
183 183 Returns the integer number of heads added to the peer.
184 184 """
185 185
186 186 class ipeerlegacycommands(interfaceutil.Interface):
187 187 """Interface for implementing support for legacy wire protocol commands.
188 188
189 189 Wire protocol commands transition to legacy status when they are no longer
190 190 used by modern clients. To facilitate identifying which commands are
191 191 legacy, the interfaces are split.
192 192 """
193 193
194 194 def between(pairs):
195 195 """Obtain nodes between pairs of nodes.
196 196
197 197 ``pairs`` is an iterable of node pairs.
198 198
199 199 Returns an iterable of iterables of nodes corresponding to each
200 200 requested pair.
201 201 """
202 202
203 203 def branches(nodes):
204 204 """Obtain ancestor changesets of specific nodes back to a branch point.
205 205
206 206 For each requested node, the peer finds the first ancestor node that is
207 207 a DAG root or is a merge.
208 208
209 209 Returns an iterable of iterables with the resolved values for each node.
210 210 """
211 211
212 212 def changegroup(nodes, source):
213 213 """Obtain a changegroup with data for descendants of specified nodes."""
214 214
215 215 def changegroupsubset(bases, heads, source):
216 216 pass
217 217
218 218 class ipeercommandexecutor(interfaceutil.Interface):
219 219 """Represents a mechanism to execute remote commands.
220 220
221 221 This is the primary interface for requesting that wire protocol commands
222 222 be executed. Instances of this interface are active in a context manager
223 223 and have a well-defined lifetime. When the context manager exits, all
224 224 outstanding requests are waited on.
225 225 """
226 226
227 227 def callcommand(name, args):
228 228 """Request that a named command be executed.
229 229
230 230 Receives the command name and a dictionary of command arguments.
231 231
232 232 Returns a ``concurrent.futures.Future`` that will resolve to the
233 233 result of that command request. That exact value is left up to
234 234 the implementation and possibly varies by command.
235 235
236 236 Not all commands can coexist with other commands in an executor
237 237 instance: it depends on the underlying wire protocol transport being
238 238 used and the command itself.
239 239
240 240 Implementations MAY call ``sendcommands()`` automatically if the
241 241 requested command can not coexist with other commands in this executor.
242 242
243 243 Implementations MAY call ``sendcommands()`` automatically when the
244 244 future's ``result()`` is called. So, consumers using multiple
245 245 commands with an executor MUST ensure that ``result()`` is not called
246 246 until all command requests have been issued.
247 247 """
248 248
249 249 def sendcommands():
250 250 """Trigger submission of queued command requests.
251 251
252 252 Not all transports submit commands as soon as they are requested to
253 253 run. When called, this method forces queued command requests to be
254 254 issued. It will no-op if all commands have already been sent.
255 255
256 256 When called, no more new commands may be issued with this executor.
257 257 """
258 258
259 259 def close():
260 260 """Signal that this command request is finished.
261 261
262 262 When called, no more new commands may be issued. All outstanding
263 263 commands that have previously been issued are waited on before
264 264 returning. This not only includes waiting for the futures to resolve,
265 265 but also waiting for all response data to arrive. In other words,
266 266 calling this waits for all on-wire state for issued command requests
267 267 to finish.
268 268
269 269 When used as a context manager, this method is called when exiting the
270 270 context manager.
271 271
272 272 This method may call ``sendcommands()`` if there are buffered commands.
273 273 """
274 274
275 275 class ipeerrequests(interfaceutil.Interface):
276 276 """Interface for executing commands on a peer."""
277 277
278 278 def commandexecutor():
279 279 """A context manager that resolves to an ipeercommandexecutor.
280 280
281 281 The object this resolves to can be used to issue command requests
282 282 to the peer.
283 283
284 284 Callers should call its ``callcommand`` method to issue command
285 285 requests.
286 286
287 287 A new executor should be obtained for each distinct set of commands
288 288 (possibly just a single command) that the consumer wants to execute
289 289 as part of a single operation or round trip. This is because some
290 290 peers are half-duplex and/or don't support persistent connections.
291 291 e.g. in the case of HTTP peers, commands sent to an executor represent
292 292 a single HTTP request. While some peers may support multiple command
293 293 sends over the wire per executor, consumers need to code to the least
294 294 capable peer. So it should be assumed that command executors buffer
295 295 called commands until they are told to send them and that each
296 296 command executor could result in a new connection or wire-level request
297 297 being issued.
298 298 """
299 299
300 300 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
301 301 """Unified interface for peer repositories.
302 302
303 303 All peer instances must conform to this interface.
304 304 """
305 305
306 306 @interfaceutil.implementer(ipeerbase)
307 307 class peer(object):
308 308 """Base class for peer repositories."""
309 309
310 310 def capable(self, name):
311 311 caps = self.capabilities()
312 312 if name in caps:
313 313 return True
314 314
315 315 name = '%s=' % name
316 316 for cap in caps:
317 317 if cap.startswith(name):
318 318 return cap[len(name):]
319 319
320 320 return False
321 321
322 322 def requirecap(self, name, purpose):
323 323 if self.capable(name):
324 324 return
325 325
326 326 raise error.CapabilityError(
327 327 _('cannot %s; remote repository does not support the %r '
328 328 'capability') % (purpose, name))
329 329
330 330 class iverifyproblem(interfaceutil.Interface):
331 331 """Represents a problem with the integrity of the repository.
332 332
333 333 Instances of this interface are emitted to describe an integrity issue
334 334 with a repository (e.g. corrupt storage, missing data, etc).
335 335
336 336 Instances are essentially messages associated with severity.
337 337 """
338 338 warning = interfaceutil.Attribute(
339 339 """Message indicating a non-fatal problem.""")
340 340
341 341 error = interfaceutil.Attribute(
342 342 """Message indicating a fatal problem.""")
343 343
344 344 class irevisiondelta(interfaceutil.Interface):
345 345 """Represents a delta between one revision and another.
346 346
347 347 Instances convey enough information to allow a revision to be exchanged
348 348 with another repository.
349 349
350 350 Instances represent the fulltext revision data or a delta against
351 351 another revision. Therefore the ``revision`` and ``delta`` attributes
352 352 are mutually exclusive.
353 353
354 354 Typically used for changegroup generation.
355 355 """
356 356
357 357 node = interfaceutil.Attribute(
358 358 """20 byte node of this revision.""")
359 359
360 360 p1node = interfaceutil.Attribute(
361 361 """20 byte node of 1st parent of this revision.""")
362 362
363 363 p2node = interfaceutil.Attribute(
364 364 """20 byte node of 2nd parent of this revision.""")
365 365
366 366 linknode = interfaceutil.Attribute(
367 367 """20 byte node of the changelog revision this node is linked to.""")
368 368
369 369 flags = interfaceutil.Attribute(
370 370 """2 bytes of integer flags that apply to this revision.""")
371 371
372 372 basenode = interfaceutil.Attribute(
373 373 """20 byte node of the revision this data is a delta against.
374 374
375 375 ``nullid`` indicates that the revision is a full revision and not
376 376 a delta.
377 377 """)
378 378
379 379 baserevisionsize = interfaceutil.Attribute(
380 380 """Size of base revision this delta is against.
381 381
382 382 May be ``None`` if ``basenode`` is ``nullid``.
383 383 """)
384 384
385 385 revision = interfaceutil.Attribute(
386 386 """Raw fulltext of revision data for this node.""")
387 387
388 388 delta = interfaceutil.Attribute(
389 389 """Delta between ``basenode`` and ``node``.
390 390
391 391 Stored in the bdiff delta format.
392 392 """)
393 393
394 394 class irevisiondeltarequest(interfaceutil.Interface):
395 395 """Represents a request to generate an ``irevisiondelta``."""
396 396
397 397 node = interfaceutil.Attribute(
398 398 """20 byte node of revision being requested.""")
399 399
400 400 p1node = interfaceutil.Attribute(
401 401 """20 byte node of 1st parent of revision.""")
402 402
403 403 p2node = interfaceutil.Attribute(
404 404 """20 byte node of 2nd parent of revision.""")
405 405
406 406 linknode = interfaceutil.Attribute(
407 407 """20 byte node to store in ``linknode`` attribute.""")
408 408
409 409 basenode = interfaceutil.Attribute(
410 410 """Base revision that delta should be generated against.
411 411
412 412 If ``nullid``, the derived ``irevisiondelta`` should have its
413 413 ``revision`` field populated and no delta should be generated.
414 414
415 415 If ``None``, the delta may be generated against any revision that
416 416 is an ancestor of this revision. Or a full revision may be used.
417 417
418 418 If any other value, the delta should be produced against that
419 419 revision.
420 420 """)
421 421
422 422 ellipsis = interfaceutil.Attribute(
423 423 """Boolean on whether the ellipsis flag should be set.""")
424 424
425 425 class ifilerevisionssequence(interfaceutil.Interface):
426 426 """Contains index data for all revisions of a file.
427 427
428 428 Types implementing this behave like lists of tuples. The index
429 429 in the list corresponds to the revision number. The values contain
430 430 index metadata.
431 431
432 432 The *null* revision (revision number -1) is always the last item
433 433 in the index.
434 434 """
435 435
436 436 def __len__():
437 437 """The total number of revisions."""
438 438
439 439 def __getitem__(rev):
440 440 """Returns the object having a specific revision number.
441 441
442 442 Returns an 8-tuple with the following fields:
443 443
444 444 offset+flags
445 445 Contains the offset and flags for the revision. 64-bit unsigned
446 446 integer where first 6 bytes are the offset and the next 2 bytes
447 447 are flags. The offset can be 0 if it is not used by the store.
448 448 compressed size
449 449 Size of the revision data in the store. It can be 0 if it isn't
450 450 needed by the store.
451 451 uncompressed size
452 452 Fulltext size. It can be 0 if it isn't needed by the store.
453 453 base revision
454 454 Revision number of revision the delta for storage is encoded
455 455 against. -1 indicates not encoded against a base revision.
456 456 link revision
457 457 Revision number of changelog revision this entry is related to.
458 458 p1 revision
459 459 Revision number of 1st parent. -1 if no 1st parent.
460 460 p2 revision
461 461 Revision number of 2nd parent. -1 if no 1st parent.
462 462 node
463 463 Binary node value for this revision number.
464 464
465 465 Negative values should index off the end of the sequence. ``-1``
466 466 should return the null revision. ``-2`` should return the most
467 467 recent revision.
468 468 """
469 469
470 470 def __contains__(rev):
471 471 """Whether a revision number exists."""
472 472
473 473 def insert(self, i, entry):
474 474 """Add an item to the index at specific revision."""
475 475
476 476 class ifileindex(interfaceutil.Interface):
477 477 """Storage interface for index data of a single file.
478 478
479 479 File storage data is divided into index metadata and data storage.
480 480 This interface defines the index portion of the interface.
481 481
482 482 The index logically consists of:
483 483
484 484 * A mapping between revision numbers and nodes.
485 485 * DAG data (storing and querying the relationship between nodes).
486 486 * Metadata to facilitate storage.
487 487 """
488 index = interfaceutil.Attribute(
489 """An ``ifilerevisionssequence`` instance.""")
490
491 488 def __len__():
492 489 """Obtain the number of revisions stored for this file."""
493 490
494 491 def __iter__():
495 492 """Iterate over revision numbers for this file."""
496 493
497 494 def revs(start=0, stop=None):
498 495 """Iterate over revision numbers for this file, with control."""
499 496
500 497 def parents(node):
501 498 """Returns a 2-tuple of parent nodes for a revision.
502 499
503 500 Values will be ``nullid`` if the parent is empty.
504 501 """
505 502
506 503 def parentrevs(rev):
507 504 """Like parents() but operates on revision numbers."""
508 505
509 506 def rev(node):
510 507 """Obtain the revision number given a node.
511 508
512 509 Raises ``error.LookupError`` if the node is not known.
513 510 """
514 511
515 512 def node(rev):
516 513 """Obtain the node value given a revision number.
517 514
518 515 Raises ``IndexError`` if the node is not known.
519 516 """
520 517
521 518 def lookup(node):
522 519 """Attempt to resolve a value to a node.
523 520
524 521 Value can be a binary node, hex node, revision number, or a string
525 522 that can be converted to an integer.
526 523
527 524 Raises ``error.LookupError`` if a node could not be resolved.
528 525 """
529 526
530 527 def linkrev(rev):
531 528 """Obtain the changeset revision number a revision is linked to."""
532 529
533 530 def flags(rev):
534 531 """Obtain flags used to affect storage of a revision."""
535 532
536 533 def iscensored(rev):
537 534 """Return whether a revision's content has been censored."""
538 535
539 536 def commonancestorsheads(node1, node2):
540 537 """Obtain an iterable of nodes containing heads of common ancestors.
541 538
542 539 See ``ancestor.commonancestorsheads()``.
543 540 """
544 541
545 542 def descendants(revs):
546 543 """Obtain descendant revision numbers for a set of revision numbers.
547 544
548 545 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
549 546 """
550 547
551 548 def heads(start=None, stop=None):
552 549 """Obtain a list of nodes that are DAG heads, with control.
553 550
554 551 The set of revisions examined can be limited by specifying
555 552 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
556 553 iterable of nodes. DAG traversal starts at earlier revision
557 554 ``start`` and iterates forward until any node in ``stop`` is
558 555 encountered.
559 556 """
560 557
561 558 def children(node):
562 559 """Obtain nodes that are children of a node.
563 560
564 561 Returns a list of nodes.
565 562 """
566 563
567 564 def deltaparent(rev):
568 565 """"Return the revision that is a suitable parent to delta against."""
569 566
570 567 class ifiledata(interfaceutil.Interface):
571 568 """Storage interface for data storage of a specific file.
572 569
573 570 This complements ``ifileindex`` and provides an interface for accessing
574 571 data for a tracked file.
575 572 """
576 573 def rawsize(rev):
577 574 """The size of the fulltext data for a revision as stored."""
578 575
579 576 def size(rev):
580 577 """Obtain the fulltext size of file data.
581 578
582 579 Any metadata is excluded from size measurements. Use ``rawsize()`` if
583 580 metadata size is important.
584 581 """
585 582
586 583 def checkhash(fulltext, node, p1=None, p2=None, rev=None):
587 584 """Validate the stored hash of a given fulltext and node.
588 585
589 586 Raises ``error.StorageError`` is hash validation fails.
590 587 """
591 588
592 589 def revision(node, raw=False):
593 590 """"Obtain fulltext data for a node.
594 591
595 592 By default, any storage transformations are applied before the data
596 593 is returned. If ``raw`` is True, non-raw storage transformations
597 594 are not applied.
598 595
599 596 The fulltext data may contain a header containing metadata. Most
600 597 consumers should use ``read()`` to obtain the actual file data.
601 598 """
602 599
603 600 def read(node):
604 601 """Resolve file fulltext data.
605 602
606 603 This is similar to ``revision()`` except any metadata in the data
607 604 headers is stripped.
608 605 """
609 606
610 607 def renamed(node):
611 608 """Obtain copy metadata for a node.
612 609
613 610 Returns ``False`` if no copy metadata is stored or a 2-tuple of
614 611 (path, node) from which this revision was copied.
615 612 """
616 613
617 614 def cmp(node, fulltext):
618 615 """Compare fulltext to another revision.
619 616
620 617 Returns True if the fulltext is different from what is stored.
621 618
622 619 This takes copy metadata into account.
623 620
624 621 TODO better document the copy metadata and censoring logic.
625 622 """
626 623
627 624 def revdiff(rev1, rev2):
628 625 """Obtain a delta between two revision numbers.
629 626
630 627 Operates on raw data in the store (``revision(node, raw=True)``).
631 628
632 629 The returned data is the result of ``bdiff.bdiff`` on the raw
633 630 revision data.
634 631 """
635 632
636 633 def emitrevisiondeltas(requests):
637 634 """Produce ``irevisiondelta`` from ``irevisiondeltarequest``s.
638 635
639 636 Given an iterable of objects conforming to the ``irevisiondeltarequest``
640 637 interface, emits objects conforming to the ``irevisiondelta``
641 638 interface.
642 639
643 640 This method is a generator.
644 641
645 642 ``irevisiondelta`` should be emitted in the same order of
646 643 ``irevisiondeltarequest`` that was passed in.
647 644
648 645 The emitted objects MUST conform by the results of
649 646 ``irevisiondeltarequest``. Namely, they must respect any requests
650 647 for building a delta from a specific ``basenode`` if defined.
651 648
652 649 When sending deltas, implementations must take into account whether
653 650 the client has the base delta before encoding a delta against that
654 651 revision. A revision encountered previously in ``requests`` is
655 652 always a suitable base revision. An example of a bad delta is a delta
656 653 against a non-ancestor revision. Another example of a bad delta is a
657 654 delta against a censored revision.
658 655 """
659 656
660 657 class ifilemutation(interfaceutil.Interface):
661 658 """Storage interface for mutation events of a tracked file."""
662 659
663 660 def add(filedata, meta, transaction, linkrev, p1, p2):
664 661 """Add a new revision to the store.
665 662
666 663 Takes file data, dictionary of metadata, a transaction, linkrev,
667 664 and parent nodes.
668 665
669 666 Returns the node that was added.
670 667
671 668 May no-op if a revision matching the supplied data is already stored.
672 669 """
673 670
674 671 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
675 672 flags=0, cachedelta=None):
676 673 """Add a new revision to the store.
677 674
678 675 This is similar to ``add()`` except it operates at a lower level.
679 676
680 677 The data passed in already contains a metadata header, if any.
681 678
682 679 ``node`` and ``flags`` can be used to define the expected node and
683 680 the flags to use with storage.
684 681
685 682 ``add()`` is usually called when adding files from e.g. the working
686 683 directory. ``addrevision()`` is often called by ``add()`` and for
687 684 scenarios where revision data has already been computed, such as when
688 685 applying raw data from a peer repo.
689 686 """
690 687
691 688 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
692 689 """Process a series of deltas for storage.
693 690
694 691 ``deltas`` is an iterable of 7-tuples of
695 692 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
696 693 to add.
697 694
698 695 The ``delta`` field contains ``mpatch`` data to apply to a base
699 696 revision, identified by ``deltabase``. The base node can be
700 697 ``nullid``, in which case the header from the delta can be ignored
701 698 and the delta used as the fulltext.
702 699
703 700 ``addrevisioncb`` should be called for each node as it is committed.
704 701
705 702 Returns a list of nodes that were processed. A node will be in the list
706 703 even if it existed in the store previously.
707 704 """
708 705
709 706 def censorrevision(tr, node, tombstone=b''):
710 707 """Remove the content of a single revision.
711 708
712 709 The specified ``node`` will have its content purged from storage.
713 710 Future attempts to access the revision data for this node will
714 711 result in failure.
715 712
716 713 A ``tombstone`` message can optionally be stored. This message may be
717 714 displayed to users when they attempt to access the missing revision
718 715 data.
719 716
720 717 Storage backends may have stored deltas against the previous content
721 718 in this revision. As part of censoring a revision, these storage
722 719 backends are expected to rewrite any internally stored deltas such
723 720 that they no longer reference the deleted content.
724 721 """
725 722
726 723 def getstrippoint(minlink):
727 724 """Find the minimum revision that must be stripped to strip a linkrev.
728 725
729 726 Returns a 2-tuple containing the minimum revision number and a set
730 727 of all revisions numbers that would be broken by this strip.
731 728
732 729 TODO this is highly revlog centric and should be abstracted into
733 730 a higher-level deletion API. ``repair.strip()`` relies on this.
734 731 """
735 732
736 733 def strip(minlink, transaction):
737 734 """Remove storage of items starting at a linkrev.
738 735
739 736 This uses ``getstrippoint()`` to determine the first node to remove.
740 737 Then it effectively truncates storage for all revisions after that.
741 738
742 739 TODO this is highly revlog centric and should be abstracted into a
743 740 higher-level deletion API.
744 741 """
745 742
746 743 class ifilestorage(ifileindex, ifiledata, ifilemutation):
747 744 """Complete storage interface for a single tracked file."""
748 745
749 746 _generaldelta = interfaceutil.Attribute(
750 747 """Whether deltas can be against any parent revision.
751 748
752 749 TODO this is used by changegroup code and it could probably be
753 750 folded into another API.
754 751 """)
755 752
756 753 def files():
757 754 """Obtain paths that are backing storage for this file.
758 755
759 756 TODO this is used heavily by verify code and there should probably
760 757 be a better API for that.
761 758 """
762 759
763 760 def verifyintegrity(state):
764 761 """Verifies the integrity of file storage.
765 762
766 763 ``state`` is a dict holding state of the verifier process. It can be
767 764 used to communicate data between invocations of multiple storage
768 765 primitives.
769 766
770 767 The method yields objects conforming to the ``iverifyproblem``
771 768 interface.
772 769 """
773 770
774 771 class idirs(interfaceutil.Interface):
775 772 """Interface representing a collection of directories from paths.
776 773
777 774 This interface is essentially a derived data structure representing
778 775 directories from a collection of paths.
779 776 """
780 777
781 778 def addpath(path):
782 779 """Add a path to the collection.
783 780
784 781 All directories in the path will be added to the collection.
785 782 """
786 783
787 784 def delpath(path):
788 785 """Remove a path from the collection.
789 786
790 787 If the removal was the last path in a particular directory, the
791 788 directory is removed from the collection.
792 789 """
793 790
794 791 def __iter__():
795 792 """Iterate over the directories in this collection of paths."""
796 793
797 794 def __contains__(path):
798 795 """Whether a specific directory is in this collection."""
799 796
800 797 class imanifestdict(interfaceutil.Interface):
801 798 """Interface representing a manifest data structure.
802 799
803 800 A manifest is effectively a dict mapping paths to entries. Each entry
804 801 consists of a binary node and extra flags affecting that entry.
805 802 """
806 803
807 804 def __getitem__(path):
808 805 """Returns the binary node value for a path in the manifest.
809 806
810 807 Raises ``KeyError`` if the path does not exist in the manifest.
811 808
812 809 Equivalent to ``self.find(path)[0]``.
813 810 """
814 811
815 812 def find(path):
816 813 """Returns the entry for a path in the manifest.
817 814
818 815 Returns a 2-tuple of (node, flags).
819 816
820 817 Raises ``KeyError`` if the path does not exist in the manifest.
821 818 """
822 819
823 820 def __len__():
824 821 """Return the number of entries in the manifest."""
825 822
826 823 def __nonzero__():
827 824 """Returns True if the manifest has entries, False otherwise."""
828 825
829 826 __bool__ = __nonzero__
830 827
831 828 def __setitem__(path, node):
832 829 """Define the node value for a path in the manifest.
833 830
834 831 If the path is already in the manifest, its flags will be copied to
835 832 the new entry.
836 833 """
837 834
838 835 def __contains__(path):
839 836 """Whether a path exists in the manifest."""
840 837
841 838 def __delitem__(path):
842 839 """Remove a path from the manifest.
843 840
844 841 Raises ``KeyError`` if the path is not in the manifest.
845 842 """
846 843
847 844 def __iter__():
848 845 """Iterate over paths in the manifest."""
849 846
850 847 def iterkeys():
851 848 """Iterate over paths in the manifest."""
852 849
853 850 def keys():
854 851 """Obtain a list of paths in the manifest."""
855 852
856 853 def filesnotin(other, match=None):
857 854 """Obtain the set of paths in this manifest but not in another.
858 855
859 856 ``match`` is an optional matcher function to be applied to both
860 857 manifests.
861 858
862 859 Returns a set of paths.
863 860 """
864 861
865 862 def dirs():
866 863 """Returns an object implementing the ``idirs`` interface."""
867 864
868 865 def hasdir(dir):
869 866 """Returns a bool indicating if a directory is in this manifest."""
870 867
871 868 def matches(match):
872 869 """Generate a new manifest filtered through a matcher.
873 870
874 871 Returns an object conforming to the ``imanifestdict`` interface.
875 872 """
876 873
877 874 def walk(match):
878 875 """Generator of paths in manifest satisfying a matcher.
879 876
880 877 This is equivalent to ``self.matches(match).iterkeys()`` except a new
881 878 manifest object is not created.
882 879
883 880 If the matcher has explicit files listed and they don't exist in
884 881 the manifest, ``match.bad()`` is called for each missing file.
885 882 """
886 883
887 884 def diff(other, match=None, clean=False):
888 885 """Find differences between this manifest and another.
889 886
890 887 This manifest is compared to ``other``.
891 888
892 889 If ``match`` is provided, the two manifests are filtered against this
893 890 matcher and only entries satisfying the matcher are compared.
894 891
895 892 If ``clean`` is True, unchanged files are included in the returned
896 893 object.
897 894
898 895 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
899 896 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
900 897 represents the node and flags for this manifest and ``(node2, flag2)``
901 898 are the same for the other manifest.
902 899 """
903 900
904 901 def setflag(path, flag):
905 902 """Set the flag value for a given path.
906 903
907 904 Raises ``KeyError`` if the path is not already in the manifest.
908 905 """
909 906
910 907 def get(path, default=None):
911 908 """Obtain the node value for a path or a default value if missing."""
912 909
913 910 def flags(path, default=''):
914 911 """Return the flags value for a path or a default value if missing."""
915 912
916 913 def copy():
917 914 """Return a copy of this manifest."""
918 915
919 916 def items():
920 917 """Returns an iterable of (path, node) for items in this manifest."""
921 918
922 919 def iteritems():
923 920 """Identical to items()."""
924 921
925 922 def iterentries():
926 923 """Returns an iterable of (path, node, flags) for this manifest.
927 924
928 925 Similar to ``iteritems()`` except items are a 3-tuple and include
929 926 flags.
930 927 """
931 928
932 929 def text():
933 930 """Obtain the raw data representation for this manifest.
934 931
935 932 Result is used to create a manifest revision.
936 933 """
937 934
938 935 def fastdelta(base, changes):
939 936 """Obtain a delta between this manifest and another given changes.
940 937
941 938 ``base`` in the raw data representation for another manifest.
942 939
943 940 ``changes`` is an iterable of ``(path, to_delete)``.
944 941
945 942 Returns a 2-tuple containing ``bytearray(self.text())`` and the
946 943 delta between ``base`` and this manifest.
947 944 """
948 945
949 946 class imanifestrevisionbase(interfaceutil.Interface):
950 947 """Base interface representing a single revision of a manifest.
951 948
952 949 Should not be used as a primary interface: should always be inherited
953 950 as part of a larger interface.
954 951 """
955 952
956 953 def new():
957 954 """Obtain a new manifest instance.
958 955
959 956 Returns an object conforming to the ``imanifestrevisionwritable``
960 957 interface. The instance will be associated with the same
961 958 ``imanifestlog`` collection as this instance.
962 959 """
963 960
964 961 def copy():
965 962 """Obtain a copy of this manifest instance.
966 963
967 964 Returns an object conforming to the ``imanifestrevisionwritable``
968 965 interface. The instance will be associated with the same
969 966 ``imanifestlog`` collection as this instance.
970 967 """
971 968
972 969 def read():
973 970 """Obtain the parsed manifest data structure.
974 971
975 972 The returned object conforms to the ``imanifestdict`` interface.
976 973 """
977 974
978 975 class imanifestrevisionstored(imanifestrevisionbase):
979 976 """Interface representing a manifest revision committed to storage."""
980 977
981 978 def node():
982 979 """The binary node for this manifest."""
983 980
984 981 parents = interfaceutil.Attribute(
985 982 """List of binary nodes that are parents for this manifest revision."""
986 983 )
987 984
988 985 def readdelta(shallow=False):
989 986 """Obtain the manifest data structure representing changes from parent.
990 987
991 988 This manifest is compared to its 1st parent. A new manifest representing
992 989 those differences is constructed.
993 990
994 991 The returned object conforms to the ``imanifestdict`` interface.
995 992 """
996 993
997 994 def readfast(shallow=False):
998 995 """Calls either ``read()`` or ``readdelta()``.
999 996
1000 997 The faster of the two options is called.
1001 998 """
1002 999
1003 1000 def find(key):
1004 1001 """Calls self.read().find(key)``.
1005 1002
1006 1003 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1007 1004 """
1008 1005
1009 1006 class imanifestrevisionwritable(imanifestrevisionbase):
1010 1007 """Interface representing a manifest revision that can be committed."""
1011 1008
1012 1009 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1013 1010 """Add this revision to storage.
1014 1011
1015 1012 Takes a transaction object, the changeset revision number it will
1016 1013 be associated with, its parent nodes, and lists of added and
1017 1014 removed paths.
1018 1015
1019 1016 If match is provided, storage can choose not to inspect or write out
1020 1017 items that do not match. Storage is still required to be able to provide
1021 1018 the full manifest in the future for any directories written (these
1022 1019 manifests should not be "narrowed on disk").
1023 1020
1024 1021 Returns the binary node of the created revision.
1025 1022 """
1026 1023
1027 1024 class imanifeststorage(interfaceutil.Interface):
1028 1025 """Storage interface for manifest data."""
1029 1026
1030 1027 tree = interfaceutil.Attribute(
1031 1028 """The path to the directory this manifest tracks.
1032 1029
1033 1030 The empty bytestring represents the root manifest.
1034 1031 """)
1035 1032
1036 1033 index = interfaceutil.Attribute(
1037 1034 """An ``ifilerevisionssequence`` instance.""")
1038 1035
1039 1036 indexfile = interfaceutil.Attribute(
1040 1037 """Path of revlog index file.
1041 1038
1042 1039 TODO this is revlog specific and should not be exposed.
1043 1040 """)
1044 1041
1045 1042 opener = interfaceutil.Attribute(
1046 1043 """VFS opener to use to access underlying files used for storage.
1047 1044
1048 1045 TODO this is revlog specific and should not be exposed.
1049 1046 """)
1050 1047
1051 1048 version = interfaceutil.Attribute(
1052 1049 """Revlog version number.
1053 1050
1054 1051 TODO this is revlog specific and should not be exposed.
1055 1052 """)
1056 1053
1057 1054 _generaldelta = interfaceutil.Attribute(
1058 1055 """Whether generaldelta storage is being used.
1059 1056
1060 1057 TODO this is revlog specific and should not be exposed.
1061 1058 """)
1062 1059
1063 1060 fulltextcache = interfaceutil.Attribute(
1064 1061 """Dict with cache of fulltexts.
1065 1062
1066 1063 TODO this doesn't feel appropriate for the storage interface.
1067 1064 """)
1068 1065
1069 1066 def __len__():
1070 1067 """Obtain the number of revisions stored for this manifest."""
1071 1068
1072 1069 def __iter__():
1073 1070 """Iterate over revision numbers for this manifest."""
1074 1071
1075 1072 def rev(node):
1076 1073 """Obtain the revision number given a binary node.
1077 1074
1078 1075 Raises ``error.LookupError`` if the node is not known.
1079 1076 """
1080 1077
1081 1078 def node(rev):
1082 1079 """Obtain the node value given a revision number.
1083 1080
1084 1081 Raises ``error.LookupError`` if the revision is not known.
1085 1082 """
1086 1083
1087 1084 def lookup(value):
1088 1085 """Attempt to resolve a value to a node.
1089 1086
1090 1087 Value can be a binary node, hex node, revision number, or a bytes
1091 1088 that can be converted to an integer.
1092 1089
1093 1090 Raises ``error.LookupError`` if a ndoe could not be resolved.
1094 1091
1095 1092 TODO this is only used by debug* commands and can probably be deleted
1096 1093 easily.
1097 1094 """
1098 1095
1099 1096 def parents(node):
1100 1097 """Returns a 2-tuple of parent nodes for a node.
1101 1098
1102 1099 Values will be ``nullid`` if the parent is empty.
1103 1100 """
1104 1101
1105 1102 def parentrevs(rev):
1106 1103 """Like parents() but operates on revision numbers."""
1107 1104
1108 1105 def linkrev(rev):
1109 1106 """Obtain the changeset revision number a revision is linked to."""
1110 1107
1111 1108 def revision(node, _df=None, raw=False):
1112 1109 """Obtain fulltext data for a node."""
1113 1110
1114 1111 def revdiff(rev1, rev2):
1115 1112 """Obtain a delta between two revision numbers.
1116 1113
1117 1114 The returned data is the result of ``bdiff.bdiff()`` on the raw
1118 1115 revision data.
1119 1116 """
1120 1117
1121 1118 def cmp(node, fulltext):
1122 1119 """Compare fulltext to another revision.
1123 1120
1124 1121 Returns True if the fulltext is different from what is stored.
1125 1122 """
1126 1123
1127 1124 def emitrevisiondeltas(requests):
1128 1125 """Produce ``irevisiondelta`` from ``irevisiondeltarequest``s.
1129 1126
1130 1127 See the documentation for ``ifiledata`` for more.
1131 1128 """
1132 1129
1133 1130 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1134 1131 """Process a series of deltas for storage.
1135 1132
1136 1133 See the documentation in ``ifilemutation`` for more.
1137 1134 """
1138 1135
1139 1136 def rawsize(rev):
1140 1137 """Obtain the size of tracked data.
1141 1138
1142 1139 Is equivalent to ``len(m.revision(node, raw=True))``.
1143 1140
1144 1141 TODO this method is only used by upgrade code and may be removed.
1145 1142 """
1146 1143
1147 1144 def getstrippoint(minlink):
1148 1145 """Find minimum revision that must be stripped to strip a linkrev.
1149 1146
1150 1147 See the documentation in ``ifilemutation`` for more.
1151 1148 """
1152 1149
1153 1150 def strip(minlink, transaction):
1154 1151 """Remove storage of items starting at a linkrev.
1155 1152
1156 1153 See the documentation in ``ifilemutation`` for more.
1157 1154 """
1158 1155
1159 1156 def checksize():
1160 1157 """Obtain the expected sizes of backing files.
1161 1158
1162 1159 TODO this is used by verify and it should not be part of the interface.
1163 1160 """
1164 1161
1165 1162 def files():
1166 1163 """Obtain paths that are backing storage for this manifest.
1167 1164
1168 1165 TODO this is used by verify and there should probably be a better API
1169 1166 for this functionality.
1170 1167 """
1171 1168
1172 1169 def deltaparent(rev):
1173 1170 """Obtain the revision that a revision is delta'd against.
1174 1171
1175 1172 TODO delta encoding is an implementation detail of storage and should
1176 1173 not be exposed to the storage interface.
1177 1174 """
1178 1175
1179 1176 def clone(tr, dest, **kwargs):
1180 1177 """Clone this instance to another."""
1181 1178
1182 1179 def clearcaches(clear_persisted_data=False):
1183 1180 """Clear any caches associated with this instance."""
1184 1181
1185 1182 def dirlog(d):
1186 1183 """Obtain a manifest storage instance for a tree."""
1187 1184
1188 1185 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1189 1186 match=None):
1190 1187 """Add a revision to storage.
1191 1188
1192 1189 ``m`` is an object conforming to ``imanifestdict``.
1193 1190
1194 1191 ``link`` is the linkrev revision number.
1195 1192
1196 1193 ``p1`` and ``p2`` are the parent revision numbers.
1197 1194
1198 1195 ``added`` and ``removed`` are iterables of added and removed paths,
1199 1196 respectively.
1200 1197
1201 1198 ``readtree`` is a function that can be used to read the child tree(s)
1202 1199 when recursively writing the full tree structure when using
1203 1200 treemanifets.
1204 1201
1205 1202 ``match`` is a matcher that can be used to hint to storage that not all
1206 1203 paths must be inspected; this is an optimization and can be safely
1207 1204 ignored. Note that the storage must still be able to reproduce a full
1208 1205 manifest including files that did not match.
1209 1206 """
1210 1207
1211 1208 class imanifestlog(interfaceutil.Interface):
1212 1209 """Interface representing a collection of manifest snapshots.
1213 1210
1214 1211 Represents the root manifest in a repository.
1215 1212
1216 1213 Also serves as a means to access nested tree manifests and to cache
1217 1214 tree manifests.
1218 1215 """
1219 1216
1220 1217 def __getitem__(node):
1221 1218 """Obtain a manifest instance for a given binary node.
1222 1219
1223 1220 Equivalent to calling ``self.get('', node)``.
1224 1221
1225 1222 The returned object conforms to the ``imanifestrevisionstored``
1226 1223 interface.
1227 1224 """
1228 1225
1229 1226 def get(tree, node, verify=True):
1230 1227 """Retrieve the manifest instance for a given directory and binary node.
1231 1228
1232 1229 ``node`` always refers to the node of the root manifest (which will be
1233 1230 the only manifest if flat manifests are being used).
1234 1231
1235 1232 If ``tree`` is the empty string, the root manifest is returned.
1236 1233 Otherwise the manifest for the specified directory will be returned
1237 1234 (requires tree manifests).
1238 1235
1239 1236 If ``verify`` is True, ``LookupError`` is raised if the node is not
1240 1237 known.
1241 1238
1242 1239 The returned object conforms to the ``imanifestrevisionstored``
1243 1240 interface.
1244 1241 """
1245 1242
1246 1243 def getstorage(tree):
1247 1244 """Retrieve an interface to storage for a particular tree.
1248 1245
1249 1246 If ``tree`` is the empty bytestring, storage for the root manifest will
1250 1247 be returned. Otherwise storage for a tree manifest is returned.
1251 1248
1252 1249 TODO formalize interface for returned object.
1253 1250 """
1254 1251
1255 1252 def clearcaches():
1256 1253 """Clear caches associated with this collection."""
1257 1254
1258 1255 def rev(node):
1259 1256 """Obtain the revision number for a binary node.
1260 1257
1261 1258 Raises ``error.LookupError`` if the node is not known.
1262 1259 """
1263 1260
1264 1261 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1265 1262 """Local repository sub-interface providing access to tracked file storage.
1266 1263
1267 1264 This interface defines how a repository accesses storage for a single
1268 1265 tracked file path.
1269 1266 """
1270 1267
1271 1268 def file(f):
1272 1269 """Obtain a filelog for a tracked path.
1273 1270
1274 1271 The returned type conforms to the ``ifilestorage`` interface.
1275 1272 """
1276 1273
1277 1274 class ilocalrepositorymain(interfaceutil.Interface):
1278 1275 """Main interface for local repositories.
1279 1276
1280 1277 This currently captures the reality of things - not how things should be.
1281 1278 """
1282 1279
1283 1280 supportedformats = interfaceutil.Attribute(
1284 1281 """Set of requirements that apply to stream clone.
1285 1282
1286 1283 This is actually a class attribute and is shared among all instances.
1287 1284 """)
1288 1285
1289 1286 supported = interfaceutil.Attribute(
1290 1287 """Set of requirements that this repo is capable of opening.""")
1291 1288
1292 1289 requirements = interfaceutil.Attribute(
1293 1290 """Set of requirements this repo uses.""")
1294 1291
1295 1292 features = interfaceutil.Attribute(
1296 1293 """Set of "features" this repository supports.
1297 1294
1298 1295 A "feature" is a loosely-defined term. It can refer to a feature
1299 1296 in the classical sense or can describe an implementation detail
1300 1297 of the repository. For example, a ``readonly`` feature may denote
1301 1298 the repository as read-only. Or a ``revlogfilestore`` feature may
1302 1299 denote that the repository is using revlogs for file storage.
1303 1300
1304 1301 The intent of features is to provide a machine-queryable mechanism
1305 1302 for repo consumers to test for various repository characteristics.
1306 1303
1307 1304 Features are similar to ``requirements``. The main difference is that
1308 1305 requirements are stored on-disk and represent requirements to open the
1309 1306 repository. Features are more run-time capabilities of the repository
1310 1307 and more granular capabilities (which may be derived from requirements).
1311 1308 """)
1312 1309
1313 1310 filtername = interfaceutil.Attribute(
1314 1311 """Name of the repoview that is active on this repo.""")
1315 1312
1316 1313 wvfs = interfaceutil.Attribute(
1317 1314 """VFS used to access the working directory.""")
1318 1315
1319 1316 vfs = interfaceutil.Attribute(
1320 1317 """VFS rooted at the .hg directory.
1321 1318
1322 1319 Used to access repository data not in the store.
1323 1320 """)
1324 1321
1325 1322 svfs = interfaceutil.Attribute(
1326 1323 """VFS rooted at the store.
1327 1324
1328 1325 Used to access repository data in the store. Typically .hg/store.
1329 1326 But can point elsewhere if the store is shared.
1330 1327 """)
1331 1328
1332 1329 root = interfaceutil.Attribute(
1333 1330 """Path to the root of the working directory.""")
1334 1331
1335 1332 path = interfaceutil.Attribute(
1336 1333 """Path to the .hg directory.""")
1337 1334
1338 1335 origroot = interfaceutil.Attribute(
1339 1336 """The filesystem path that was used to construct the repo.""")
1340 1337
1341 1338 auditor = interfaceutil.Attribute(
1342 1339 """A pathauditor for the working directory.
1343 1340
1344 1341 This checks if a path refers to a nested repository.
1345 1342
1346 1343 Operates on the filesystem.
1347 1344 """)
1348 1345
1349 1346 nofsauditor = interfaceutil.Attribute(
1350 1347 """A pathauditor for the working directory.
1351 1348
1352 1349 This is like ``auditor`` except it doesn't do filesystem checks.
1353 1350 """)
1354 1351
1355 1352 baseui = interfaceutil.Attribute(
1356 1353 """Original ui instance passed into constructor.""")
1357 1354
1358 1355 ui = interfaceutil.Attribute(
1359 1356 """Main ui instance for this instance.""")
1360 1357
1361 1358 sharedpath = interfaceutil.Attribute(
1362 1359 """Path to the .hg directory of the repo this repo was shared from.""")
1363 1360
1364 1361 store = interfaceutil.Attribute(
1365 1362 """A store instance.""")
1366 1363
1367 1364 spath = interfaceutil.Attribute(
1368 1365 """Path to the store.""")
1369 1366
1370 1367 sjoin = interfaceutil.Attribute(
1371 1368 """Alias to self.store.join.""")
1372 1369
1373 1370 cachevfs = interfaceutil.Attribute(
1374 1371 """A VFS used to access the cache directory.
1375 1372
1376 1373 Typically .hg/cache.
1377 1374 """)
1378 1375
1379 1376 filteredrevcache = interfaceutil.Attribute(
1380 1377 """Holds sets of revisions to be filtered.""")
1381 1378
1382 1379 names = interfaceutil.Attribute(
1383 1380 """A ``namespaces`` instance.""")
1384 1381
1385 1382 def close():
1386 1383 """Close the handle on this repository."""
1387 1384
1388 1385 def peer():
1389 1386 """Obtain an object conforming to the ``peer`` interface."""
1390 1387
1391 1388 def unfiltered():
1392 1389 """Obtain an unfiltered/raw view of this repo."""
1393 1390
1394 1391 def filtered(name, visibilityexceptions=None):
1395 1392 """Obtain a named view of this repository."""
1396 1393
1397 1394 obsstore = interfaceutil.Attribute(
1398 1395 """A store of obsolescence data.""")
1399 1396
1400 1397 changelog = interfaceutil.Attribute(
1401 1398 """A handle on the changelog revlog.""")
1402 1399
1403 1400 manifestlog = interfaceutil.Attribute(
1404 1401 """An instance conforming to the ``imanifestlog`` interface.
1405 1402
1406 1403 Provides access to manifests for the repository.
1407 1404 """)
1408 1405
1409 1406 dirstate = interfaceutil.Attribute(
1410 1407 """Working directory state.""")
1411 1408
1412 1409 narrowpats = interfaceutil.Attribute(
1413 1410 """Matcher patterns for this repository's narrowspec.""")
1414 1411
1415 1412 def narrowmatch():
1416 1413 """Obtain a matcher for the narrowspec."""
1417 1414
1418 1415 def setnarrowpats(newincludes, newexcludes):
1419 1416 """Define the narrowspec for this repository."""
1420 1417
1421 1418 def __getitem__(changeid):
1422 1419 """Try to resolve a changectx."""
1423 1420
1424 1421 def __contains__(changeid):
1425 1422 """Whether a changeset exists."""
1426 1423
1427 1424 def __nonzero__():
1428 1425 """Always returns True."""
1429 1426 return True
1430 1427
1431 1428 __bool__ = __nonzero__
1432 1429
1433 1430 def __len__():
1434 1431 """Returns the number of changesets in the repo."""
1435 1432
1436 1433 def __iter__():
1437 1434 """Iterate over revisions in the changelog."""
1438 1435
1439 1436 def revs(expr, *args):
1440 1437 """Evaluate a revset.
1441 1438
1442 1439 Emits revisions.
1443 1440 """
1444 1441
1445 1442 def set(expr, *args):
1446 1443 """Evaluate a revset.
1447 1444
1448 1445 Emits changectx instances.
1449 1446 """
1450 1447
1451 1448 def anyrevs(specs, user=False, localalias=None):
1452 1449 """Find revisions matching one of the given revsets."""
1453 1450
1454 1451 def url():
1455 1452 """Returns a string representing the location of this repo."""
1456 1453
1457 1454 def hook(name, throw=False, **args):
1458 1455 """Call a hook."""
1459 1456
1460 1457 def tags():
1461 1458 """Return a mapping of tag to node."""
1462 1459
1463 1460 def tagtype(tagname):
1464 1461 """Return the type of a given tag."""
1465 1462
1466 1463 def tagslist():
1467 1464 """Return a list of tags ordered by revision."""
1468 1465
1469 1466 def nodetags(node):
1470 1467 """Return the tags associated with a node."""
1471 1468
1472 1469 def nodebookmarks(node):
1473 1470 """Return the list of bookmarks pointing to the specified node."""
1474 1471
1475 1472 def branchmap():
1476 1473 """Return a mapping of branch to heads in that branch."""
1477 1474
1478 1475 def revbranchcache():
1479 1476 pass
1480 1477
1481 1478 def branchtip(branchtip, ignoremissing=False):
1482 1479 """Return the tip node for a given branch."""
1483 1480
1484 1481 def lookup(key):
1485 1482 """Resolve the node for a revision."""
1486 1483
1487 1484 def lookupbranch(key):
1488 1485 """Look up the branch name of the given revision or branch name."""
1489 1486
1490 1487 def known(nodes):
1491 1488 """Determine whether a series of nodes is known.
1492 1489
1493 1490 Returns a list of bools.
1494 1491 """
1495 1492
1496 1493 def local():
1497 1494 """Whether the repository is local."""
1498 1495 return True
1499 1496
1500 1497 def publishing():
1501 1498 """Whether the repository is a publishing repository."""
1502 1499
1503 1500 def cancopy():
1504 1501 pass
1505 1502
1506 1503 def shared():
1507 1504 """The type of shared repository or None."""
1508 1505
1509 1506 def wjoin(f, *insidef):
1510 1507 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1511 1508
1512 1509 def setparents(p1, p2):
1513 1510 """Set the parent nodes of the working directory."""
1514 1511
1515 1512 def filectx(path, changeid=None, fileid=None):
1516 1513 """Obtain a filectx for the given file revision."""
1517 1514
1518 1515 def getcwd():
1519 1516 """Obtain the current working directory from the dirstate."""
1520 1517
1521 1518 def pathto(f, cwd=None):
1522 1519 """Obtain the relative path to a file."""
1523 1520
1524 1521 def adddatafilter(name, fltr):
1525 1522 pass
1526 1523
1527 1524 def wread(filename):
1528 1525 """Read a file from wvfs, using data filters."""
1529 1526
1530 1527 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1531 1528 """Write data to a file in the wvfs, using data filters."""
1532 1529
1533 1530 def wwritedata(filename, data):
1534 1531 """Resolve data for writing to the wvfs, using data filters."""
1535 1532
1536 1533 def currenttransaction():
1537 1534 """Obtain the current transaction instance or None."""
1538 1535
1539 1536 def transaction(desc, report=None):
1540 1537 """Open a new transaction to write to the repository."""
1541 1538
1542 1539 def undofiles():
1543 1540 """Returns a list of (vfs, path) for files to undo transactions."""
1544 1541
1545 1542 def recover():
1546 1543 """Roll back an interrupted transaction."""
1547 1544
1548 1545 def rollback(dryrun=False, force=False):
1549 1546 """Undo the last transaction.
1550 1547
1551 1548 DANGEROUS.
1552 1549 """
1553 1550
1554 1551 def updatecaches(tr=None, full=False):
1555 1552 """Warm repo caches."""
1556 1553
1557 1554 def invalidatecaches():
1558 1555 """Invalidate cached data due to the repository mutating."""
1559 1556
1560 1557 def invalidatevolatilesets():
1561 1558 pass
1562 1559
1563 1560 def invalidatedirstate():
1564 1561 """Invalidate the dirstate."""
1565 1562
1566 1563 def invalidate(clearfilecache=False):
1567 1564 pass
1568 1565
1569 1566 def invalidateall():
1570 1567 pass
1571 1568
1572 1569 def lock(wait=True):
1573 1570 """Lock the repository store and return a lock instance."""
1574 1571
1575 1572 def wlock(wait=True):
1576 1573 """Lock the non-store parts of the repository."""
1577 1574
1578 1575 def currentwlock():
1579 1576 """Return the wlock if it's held or None."""
1580 1577
1581 1578 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1582 1579 pass
1583 1580
1584 1581 def commit(text='', user=None, date=None, match=None, force=False,
1585 1582 editor=False, extra=None):
1586 1583 """Add a new revision to the repository."""
1587 1584
1588 1585 def commitctx(ctx, error=False):
1589 1586 """Commit a commitctx instance to the repository."""
1590 1587
1591 1588 def destroying():
1592 1589 """Inform the repository that nodes are about to be destroyed."""
1593 1590
1594 1591 def destroyed():
1595 1592 """Inform the repository that nodes have been destroyed."""
1596 1593
1597 1594 def status(node1='.', node2=None, match=None, ignored=False,
1598 1595 clean=False, unknown=False, listsubrepos=False):
1599 1596 """Convenience method to call repo[x].status()."""
1600 1597
1601 1598 def addpostdsstatus(ps):
1602 1599 pass
1603 1600
1604 1601 def postdsstatus():
1605 1602 pass
1606 1603
1607 1604 def clearpostdsstatus():
1608 1605 pass
1609 1606
1610 1607 def heads(start=None):
1611 1608 """Obtain list of nodes that are DAG heads."""
1612 1609
1613 1610 def branchheads(branch=None, start=None, closed=False):
1614 1611 pass
1615 1612
1616 1613 def branches(nodes):
1617 1614 pass
1618 1615
1619 1616 def between(pairs):
1620 1617 pass
1621 1618
1622 1619 def checkpush(pushop):
1623 1620 pass
1624 1621
1625 1622 prepushoutgoinghooks = interfaceutil.Attribute(
1626 1623 """util.hooks instance.""")
1627 1624
1628 1625 def pushkey(namespace, key, old, new):
1629 1626 pass
1630 1627
1631 1628 def listkeys(namespace):
1632 1629 pass
1633 1630
1634 1631 def debugwireargs(one, two, three=None, four=None, five=None):
1635 1632 pass
1636 1633
1637 1634 def savecommitmessage(text):
1638 1635 pass
1639 1636
1640 1637 class completelocalrepository(ilocalrepositorymain,
1641 1638 ilocalrepositoryfilestorage):
1642 1639 """Complete interface for a local repository."""
@@ -1,735 +1,735 b''
1 1 # simplestorerepo.py - Extension that swaps in alternate repository storage.
2 2 #
3 3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 # To use this with the test suite:
9 9 #
10 10 # $ HGREPOFEATURES="simplestore" ./run-tests.py \
11 11 # --extra-config-opt extensions.simplestore=`pwd`/simplestorerepo.py
12 12
13 13 from __future__ import absolute_import
14 14
15 15 import stat
16 16
17 17 from mercurial.i18n import _
18 18 from mercurial.node import (
19 19 bin,
20 20 hex,
21 21 nullid,
22 22 nullrev,
23 23 )
24 24 from mercurial.thirdparty import (
25 25 attr,
26 26 cbor,
27 27 )
28 28 from mercurial import (
29 29 ancestor,
30 30 bundlerepo,
31 31 error,
32 32 extensions,
33 33 localrepo,
34 34 mdiff,
35 35 pycompat,
36 36 repository,
37 37 revlog,
38 38 store,
39 39 verify,
40 40 )
41 41 from mercurial.utils import (
42 42 interfaceutil,
43 43 )
44 44
45 45 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
46 46 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
47 47 # be specifying the version(s) of Mercurial they are tested with, or
48 48 # leave the attribute unspecified.
49 49 testedwith = 'ships-with-hg-core'
50 50
51 51 REQUIREMENT = 'testonly-simplestore'
52 52
53 53 def validatenode(node):
54 54 if isinstance(node, int):
55 55 raise ValueError('expected node; got int')
56 56
57 57 if len(node) != 20:
58 58 raise ValueError('expected 20 byte node')
59 59
60 60 def validaterev(rev):
61 61 if not isinstance(rev, int):
62 62 raise ValueError('expected int')
63 63
64 64 class simplestoreerror(error.StorageError):
65 65 pass
66 66
67 67 @interfaceutil.implementer(repository.irevisiondelta)
68 68 @attr.s(slots=True, frozen=True)
69 69 class simplestorerevisiondelta(object):
70 70 node = attr.ib()
71 71 p1node = attr.ib()
72 72 p2node = attr.ib()
73 73 basenode = attr.ib()
74 74 linknode = attr.ib()
75 75 flags = attr.ib()
76 76 baserevisionsize = attr.ib()
77 77 revision = attr.ib()
78 78 delta = attr.ib()
79 79
80 80 @interfaceutil.implementer(repository.ifilestorage)
81 81 class filestorage(object):
82 82 """Implements storage for a tracked path.
83 83
84 84 Data is stored in the VFS in a directory corresponding to the tracked
85 85 path.
86 86
87 87 Index data is stored in an ``index`` file using CBOR.
88 88
89 89 Fulltext data is stored in files having names of the node.
90 90 """
91 91
92 92 def __init__(self, svfs, path):
93 93 self._svfs = svfs
94 94 self._path = path
95 95
96 96 self._storepath = b'/'.join([b'data', path])
97 97 self._indexpath = b'/'.join([self._storepath, b'index'])
98 98
99 99 indexdata = self._svfs.tryread(self._indexpath)
100 100 if indexdata:
101 101 indexdata = cbor.loads(indexdata)
102 102
103 103 self._indexdata = indexdata or []
104 104 self._indexbynode = {}
105 105 self._indexbyrev = {}
106 self.index = []
106 self._index = []
107 107 self._refreshindex()
108 108
109 109 # This is used by changegroup code :/
110 110 self._generaldelta = True
111 111
112 112 def _refreshindex(self):
113 113 self._indexbynode.clear()
114 114 self._indexbyrev.clear()
115 self.index = []
115 self._index = []
116 116
117 117 for i, entry in enumerate(self._indexdata):
118 118 self._indexbynode[entry[b'node']] = entry
119 119 self._indexbyrev[i] = entry
120 120
121 121 self._indexbynode[nullid] = {
122 122 b'node': nullid,
123 123 b'p1': nullid,
124 124 b'p2': nullid,
125 125 b'linkrev': nullrev,
126 126 b'flags': 0,
127 127 }
128 128
129 129 self._indexbyrev[nullrev] = {
130 130 b'node': nullid,
131 131 b'p1': nullid,
132 132 b'p2': nullid,
133 133 b'linkrev': nullrev,
134 134 b'flags': 0,
135 135 }
136 136
137 137 for i, entry in enumerate(self._indexdata):
138 138 p1rev, p2rev = self.parentrevs(self.rev(entry[b'node']))
139 139
140 140 # start, length, rawsize, chainbase, linkrev, p1, p2, node
141 self.index.append((0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev,
141 self._index.append((0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev,
142 142 entry[b'node']))
143 143
144 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
144 self._index.append((0, 0, 0, -1, -1, -1, -1, nullid))
145 145
146 146 def __len__(self):
147 147 return len(self._indexdata)
148 148
149 149 def __iter__(self):
150 150 return iter(range(len(self)))
151 151
152 152 def revs(self, start=0, stop=None):
153 153 step = 1
154 154 if stop is not None:
155 155 if start > stop:
156 156 step = -1
157 157
158 158 stop += step
159 159 else:
160 160 stop = len(self)
161 161
162 162 return range(start, stop, step)
163 163
164 164 def parents(self, node):
165 165 validatenode(node)
166 166
167 167 if node not in self._indexbynode:
168 168 raise KeyError('unknown node')
169 169
170 170 entry = self._indexbynode[node]
171 171
172 172 return entry[b'p1'], entry[b'p2']
173 173
174 174 def parentrevs(self, rev):
175 175 p1, p2 = self.parents(self._indexbyrev[rev][b'node'])
176 176 return self.rev(p1), self.rev(p2)
177 177
178 178 def rev(self, node):
179 179 validatenode(node)
180 180
181 181 try:
182 182 self._indexbynode[node]
183 183 except KeyError:
184 184 raise error.LookupError(node, self._indexpath, _('no node'))
185 185
186 186 for rev, entry in self._indexbyrev.items():
187 187 if entry[b'node'] == node:
188 188 return rev
189 189
190 190 raise error.ProgrammingError('this should not occur')
191 191
192 192 def node(self, rev):
193 193 validaterev(rev)
194 194
195 195 return self._indexbyrev[rev][b'node']
196 196
197 197 def lookup(self, node):
198 198 if isinstance(node, int):
199 199 return self.node(node)
200 200
201 201 if len(node) == 20:
202 202 self.rev(node)
203 203 return node
204 204
205 205 try:
206 206 rev = int(node)
207 207 if '%d' % rev != node:
208 208 raise ValueError
209 209
210 210 if rev < 0:
211 211 rev = len(self) + rev
212 212 if rev < 0 or rev >= len(self):
213 213 raise ValueError
214 214
215 215 return self.node(rev)
216 216 except (ValueError, OverflowError):
217 217 pass
218 218
219 219 if len(node) == 40:
220 220 try:
221 221 rawnode = bin(node)
222 222 self.rev(rawnode)
223 223 return rawnode
224 224 except TypeError:
225 225 pass
226 226
227 227 raise error.LookupError(node, self._path, _('invalid lookup input'))
228 228
229 229 def linkrev(self, rev):
230 230 validaterev(rev)
231 231
232 232 return self._indexbyrev[rev][b'linkrev']
233 233
234 234 def flags(self, rev):
235 235 validaterev(rev)
236 236
237 237 return self._indexbyrev[rev][b'flags']
238 238
239 239 def deltaparent(self, rev):
240 240 validaterev(rev)
241 241
242 242 p1node = self.parents(self.node(rev))[0]
243 243 return self.rev(p1node)
244 244
245 245 def _candelta(self, baserev, rev):
246 246 validaterev(baserev)
247 247 validaterev(rev)
248 248
249 249 if ((self.flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)
250 250 or (self.flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)):
251 251 return False
252 252
253 253 return True
254 254
255 255 def rawsize(self, rev):
256 256 validaterev(rev)
257 257 node = self.node(rev)
258 258 return len(self.revision(node, raw=True))
259 259
260 260 def _processflags(self, text, flags, operation, raw=False):
261 261 if flags == 0:
262 262 return text, True
263 263
264 264 if flags & ~revlog.REVIDX_KNOWN_FLAGS:
265 265 raise simplestoreerror(_("incompatible revision flag '%#x'") %
266 266 (flags & ~revlog.REVIDX_KNOWN_FLAGS))
267 267
268 268 validatehash = True
269 269 # Depending on the operation (read or write), the order might be
270 270 # reversed due to non-commutative transforms.
271 271 orderedflags = revlog.REVIDX_FLAGS_ORDER
272 272 if operation == 'write':
273 273 orderedflags = reversed(orderedflags)
274 274
275 275 for flag in orderedflags:
276 276 # If a flagprocessor has been registered for a known flag, apply the
277 277 # related operation transform and update result tuple.
278 278 if flag & flags:
279 279 vhash = True
280 280
281 281 if flag not in revlog._flagprocessors:
282 282 message = _("missing processor for flag '%#x'") % (flag)
283 283 raise simplestoreerror(message)
284 284
285 285 processor = revlog._flagprocessors[flag]
286 286 if processor is not None:
287 287 readtransform, writetransform, rawtransform = processor
288 288
289 289 if raw:
290 290 vhash = rawtransform(self, text)
291 291 elif operation == 'read':
292 292 text, vhash = readtransform(self, text)
293 293 else: # write operation
294 294 text, vhash = writetransform(self, text)
295 295 validatehash = validatehash and vhash
296 296
297 297 return text, validatehash
298 298
299 299 def checkhash(self, text, node, p1=None, p2=None, rev=None):
300 300 if p1 is None and p2 is None:
301 301 p1, p2 = self.parents(node)
302 302 if node != revlog.hash(text, p1, p2):
303 303 raise simplestoreerror(_("integrity check failed on %s") %
304 304 self._path)
305 305
306 306 def revision(self, node, raw=False):
307 307 validatenode(node)
308 308
309 309 if node == nullid:
310 310 return b''
311 311
312 312 rev = self.rev(node)
313 313 flags = self.flags(rev)
314 314
315 315 path = b'/'.join([self._storepath, hex(node)])
316 316 rawtext = self._svfs.read(path)
317 317
318 318 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
319 319 if validatehash:
320 320 self.checkhash(text, node, rev=rev)
321 321
322 322 return text
323 323
324 324 def read(self, node):
325 325 validatenode(node)
326 326
327 327 revision = self.revision(node)
328 328
329 329 if not revision.startswith(b'\1\n'):
330 330 return revision
331 331
332 332 start = revision.index(b'\1\n', 2)
333 333 return revision[start + 2:]
334 334
335 335 def renamed(self, node):
336 336 validatenode(node)
337 337
338 338 if self.parents(node)[0] != nullid:
339 339 return False
340 340
341 341 fulltext = self.revision(node)
342 342 m = revlog.parsemeta(fulltext)[0]
343 343
344 344 if m and 'copy' in m:
345 345 return m['copy'], bin(m['copyrev'])
346 346
347 347 return False
348 348
349 349 def cmp(self, node, text):
350 350 validatenode(node)
351 351
352 352 t = text
353 353
354 354 if text.startswith(b'\1\n'):
355 355 t = b'\1\n\1\n' + text
356 356
357 357 p1, p2 = self.parents(node)
358 358
359 359 if revlog.hash(t, p1, p2) == node:
360 360 return False
361 361
362 362 if self.iscensored(self.rev(node)):
363 363 return text != b''
364 364
365 365 if self.renamed(node):
366 366 t2 = self.read(node)
367 367 return t2 != text
368 368
369 369 return True
370 370
371 371 def size(self, rev):
372 372 validaterev(rev)
373 373
374 374 node = self._indexbyrev[rev][b'node']
375 375
376 376 if self.renamed(node):
377 377 return len(self.read(node))
378 378
379 379 if self.iscensored(rev):
380 380 return 0
381 381
382 382 return len(self.revision(node))
383 383
384 384 def iscensored(self, rev):
385 385 validaterev(rev)
386 386
387 387 return self.flags(rev) & revlog.REVIDX_ISCENSORED
388 388
389 389 def commonancestorsheads(self, a, b):
390 390 validatenode(a)
391 391 validatenode(b)
392 392
393 393 a = self.rev(a)
394 394 b = self.rev(b)
395 395
396 396 ancestors = ancestor.commonancestorsheads(self.parentrevs, a, b)
397 397 return pycompat.maplist(self.node, ancestors)
398 398
399 399 def descendants(self, revs):
400 400 # This is a copy of revlog.descendants()
401 401 first = min(revs)
402 402 if first == nullrev:
403 403 for i in self:
404 404 yield i
405 405 return
406 406
407 407 seen = set(revs)
408 408 for i in self.revs(start=first + 1):
409 409 for x in self.parentrevs(i):
410 410 if x != nullrev and x in seen:
411 411 seen.add(i)
412 412 yield i
413 413 break
414 414
415 415 # Required by verify.
416 416 def files(self):
417 417 entries = self._svfs.listdir(self._storepath)
418 418
419 419 # Strip out undo.backup.* files created as part of transaction
420 420 # recording.
421 421 entries = [f for f in entries if not f.startswith('undo.backup.')]
422 422
423 423 return [b'/'.join((self._storepath, f)) for f in entries]
424 424
425 425 def add(self, text, meta, transaction, linkrev, p1, p2):
426 426 if meta or text.startswith(b'\1\n'):
427 427 text = revlog.packmeta(meta, text)
428 428
429 429 return self.addrevision(text, transaction, linkrev, p1, p2)
430 430
431 431 def addrevision(self, text, transaction, linkrev, p1, p2, node=None,
432 432 flags=revlog.REVIDX_DEFAULT_FLAGS, cachedelta=None):
433 433 validatenode(p1)
434 434 validatenode(p2)
435 435
436 436 if flags:
437 437 node = node or revlog.hash(text, p1, p2)
438 438
439 439 rawtext, validatehash = self._processflags(text, flags, 'write')
440 440
441 441 node = node or revlog.hash(text, p1, p2)
442 442
443 443 if node in self._indexbynode:
444 444 return node
445 445
446 446 if validatehash:
447 447 self.checkhash(rawtext, node, p1=p1, p2=p2)
448 448
449 449 return self._addrawrevision(node, rawtext, transaction, linkrev, p1, p2,
450 450 flags)
451 451
452 452 def _addrawrevision(self, node, rawtext, transaction, link, p1, p2, flags):
453 453 transaction.addbackup(self._indexpath)
454 454
455 455 path = b'/'.join([self._storepath, hex(node)])
456 456
457 457 self._svfs.write(path, rawtext)
458 458
459 459 self._indexdata.append({
460 460 b'node': node,
461 461 b'p1': p1,
462 462 b'p2': p2,
463 463 b'linkrev': link,
464 464 b'flags': flags,
465 465 })
466 466
467 467 self._reflectindexupdate()
468 468
469 469 return node
470 470
471 471 def _reflectindexupdate(self):
472 472 self._refreshindex()
473 473 self._svfs.write(self._indexpath, cbor.dumps(self._indexdata))
474 474
475 475 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
476 476 nodes = []
477 477
478 478 transaction.addbackup(self._indexpath)
479 479
480 480 for node, p1, p2, linknode, deltabase, delta, flags in deltas:
481 481 linkrev = linkmapper(linknode)
482 482 flags = flags or revlog.REVIDX_DEFAULT_FLAGS
483 483
484 484 nodes.append(node)
485 485
486 486 if node in self._indexbynode:
487 487 continue
488 488
489 489 # Need to resolve the fulltext from the delta base.
490 490 if deltabase == nullid:
491 491 text = mdiff.patch(b'', delta)
492 492 else:
493 493 text = mdiff.patch(self.revision(deltabase), delta)
494 494
495 495 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
496 496 flags)
497 497
498 498 if addrevisioncb:
499 499 addrevisioncb(self, node)
500 500
501 501 return nodes
502 502
503 503 def revdiff(self, rev1, rev2):
504 504 validaterev(rev1)
505 505 validaterev(rev2)
506 506
507 507 node1 = self.node(rev1)
508 508 node2 = self.node(rev2)
509 509
510 510 return mdiff.textdiff(self.revision(node1, raw=True),
511 511 self.revision(node2, raw=True))
512 512
513 513 def emitrevisiondeltas(self, requests):
514 514 for request in requests:
515 515 node = request.node
516 516 rev = self.rev(node)
517 517
518 518 if request.basenode == nullid:
519 519 baserev = nullrev
520 520 elif request.basenode is not None:
521 521 baserev = self.rev(request.basenode)
522 522 else:
523 523 # This is a test extension and we can do simple things
524 524 # for choosing a delta parent.
525 525 baserev = self.deltaparent(rev)
526 526
527 527 if baserev != nullrev and not self._candelta(baserev, rev):
528 528 baserev = nullrev
529 529
530 530 revision = None
531 531 delta = None
532 532 baserevisionsize = None
533 533
534 534 if self.iscensored(baserev) or self.iscensored(rev):
535 535 try:
536 536 revision = self.revision(node, raw=True)
537 537 except error.CensoredNodeError as e:
538 538 revision = e.tombstone
539 539
540 540 if baserev != nullrev:
541 541 baserevisionsize = self.rawsize(baserev)
542 542
543 543 elif baserev == nullrev:
544 544 revision = self.revision(node, raw=True)
545 545 else:
546 546 delta = self.revdiff(baserev, rev)
547 547
548 548 extraflags = revlog.REVIDX_ELLIPSIS if request.ellipsis else 0
549 549
550 550 yield simplestorerevisiondelta(
551 551 node=node,
552 552 p1node=request.p1node,
553 553 p2node=request.p2node,
554 554 linknode=request.linknode,
555 555 basenode=self.node(baserev),
556 556 flags=self.flags(rev) | extraflags,
557 557 baserevisionsize=baserevisionsize,
558 558 revision=revision,
559 559 delta=delta)
560 560
561 561 def heads(self, start=None, stop=None):
562 562 # This is copied from revlog.py.
563 563 if start is None and stop is None:
564 564 if not len(self):
565 565 return [nullid]
566 566 return [self.node(r) for r in self.headrevs()]
567 567
568 568 if start is None:
569 569 start = nullid
570 570 if stop is None:
571 571 stop = []
572 572 stoprevs = set([self.rev(n) for n in stop])
573 573 startrev = self.rev(start)
574 574 reachable = {startrev}
575 575 heads = {startrev}
576 576
577 577 parentrevs = self.parentrevs
578 578 for r in self.revs(start=startrev + 1):
579 579 for p in parentrevs(r):
580 580 if p in reachable:
581 581 if r not in stoprevs:
582 582 reachable.add(r)
583 583 heads.add(r)
584 584 if p in heads and p not in stoprevs:
585 585 heads.remove(p)
586 586
587 587 return [self.node(r) for r in heads]
588 588
589 589 def children(self, node):
590 590 validatenode(node)
591 591
592 592 # This is a copy of revlog.children().
593 593 c = []
594 594 p = self.rev(node)
595 595 for r in self.revs(start=p + 1):
596 596 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
597 597 if prevs:
598 598 for pr in prevs:
599 599 if pr == p:
600 600 c.append(self.node(r))
601 601 elif p == nullrev:
602 602 c.append(self.node(r))
603 603 return c
604 604
605 605 def getstrippoint(self, minlink):
606 606
607 607 # This is largely a copy of revlog.getstrippoint().
608 608 brokenrevs = set()
609 609 strippoint = len(self)
610 610
611 611 heads = {}
612 612 futurelargelinkrevs = set()
613 613 for head in self.heads():
614 614 headlinkrev = self.linkrev(self.rev(head))
615 615 heads[head] = headlinkrev
616 616 if headlinkrev >= minlink:
617 617 futurelargelinkrevs.add(headlinkrev)
618 618
619 619 # This algorithm involves walking down the rev graph, starting at the
620 620 # heads. Since the revs are topologically sorted according to linkrev,
621 621 # once all head linkrevs are below the minlink, we know there are
622 622 # no more revs that could have a linkrev greater than minlink.
623 623 # So we can stop walking.
624 624 while futurelargelinkrevs:
625 625 strippoint -= 1
626 626 linkrev = heads.pop(strippoint)
627 627
628 628 if linkrev < minlink:
629 629 brokenrevs.add(strippoint)
630 630 else:
631 631 futurelargelinkrevs.remove(linkrev)
632 632
633 633 for p in self.parentrevs(strippoint):
634 634 if p != nullrev:
635 635 plinkrev = self.linkrev(p)
636 636 heads[p] = plinkrev
637 637 if plinkrev >= minlink:
638 638 futurelargelinkrevs.add(plinkrev)
639 639
640 640 return strippoint, brokenrevs
641 641
642 642 def strip(self, minlink, transaction):
643 643 if not len(self):
644 644 return
645 645
646 646 rev, _ignored = self.getstrippoint(minlink)
647 647 if rev == len(self):
648 648 return
649 649
650 650 # Purge index data starting at the requested revision.
651 651 self._indexdata[rev:] = []
652 652 self._reflectindexupdate()
653 653
654 654 def issimplestorefile(f, kind, st):
655 655 if kind != stat.S_IFREG:
656 656 return False
657 657
658 658 if store.isrevlog(f, kind, st):
659 659 return False
660 660
661 661 # Ignore transaction undo files.
662 662 if f.startswith('undo.'):
663 663 return False
664 664
665 665 # Otherwise assume it belongs to the simple store.
666 666 return True
667 667
668 668 class simplestore(store.encodedstore):
669 669 def datafiles(self):
670 670 for x in super(simplestore, self).datafiles():
671 671 yield x
672 672
673 673 # Supplement with non-revlog files.
674 674 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
675 675
676 676 for unencoded, encoded, size in extrafiles:
677 677 try:
678 678 unencoded = store.decodefilename(unencoded)
679 679 except KeyError:
680 680 unencoded = None
681 681
682 682 yield unencoded, encoded, size
683 683
684 684 def reposetup(ui, repo):
685 685 if not repo.local():
686 686 return
687 687
688 688 if isinstance(repo, bundlerepo.bundlerepository):
689 689 raise error.Abort(_('cannot use simple store with bundlerepo'))
690 690
691 691 class simplestorerepo(repo.__class__):
692 692 def file(self, f):
693 693 return filestorage(self.svfs, f)
694 694
695 695 repo.__class__ = simplestorerepo
696 696
697 697 def featuresetup(ui, supported):
698 698 supported.add(REQUIREMENT)
699 699
700 700 def newreporequirements(orig, ui):
701 701 """Modifies default requirements for new repos to use the simple store."""
702 702 requirements = orig(ui)
703 703
704 704 # These requirements are only used to affect creation of the store
705 705 # object. We have our own store. So we can remove them.
706 706 # TODO do this once we feel like taking the test hit.
707 707 #if 'fncache' in requirements:
708 708 # requirements.remove('fncache')
709 709 #if 'dotencode' in requirements:
710 710 # requirements.remove('dotencode')
711 711
712 712 requirements.add(REQUIREMENT)
713 713
714 714 return requirements
715 715
716 716 def makestore(orig, requirements, path, vfstype):
717 717 if REQUIREMENT not in requirements:
718 718 return orig(requirements, path, vfstype)
719 719
720 720 return simplestore(path, vfstype)
721 721
722 722 def verifierinit(orig, self, *args, **kwargs):
723 723 orig(self, *args, **kwargs)
724 724
725 725 # We don't care that files in the store don't align with what is
726 726 # advertised. So suppress these warnings.
727 727 self.warnorphanstorefiles = False
728 728
729 729 def extsetup(ui):
730 730 localrepo.featuresetupfuncs.add(featuresetup)
731 731
732 732 extensions.wrapfunction(localrepo, 'newreporequirements',
733 733 newreporequirements)
734 734 extensions.wrapfunction(store, 'store', makestore)
735 735 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
General Comments 0
You need to be logged in to leave comments. Login now