##// END OF EJS Templates
filelog: remove version attribute (API)...
Gregory Szorc -
r39879:68282a7b default
parent child Browse files
Show More
@@ -1,277 +1,275
1 1 # filelog.py - file history class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from . import (
11 11 error,
12 12 repository,
13 13 revlog,
14 14 )
15 15 from .utils import (
16 16 interfaceutil,
17 17 )
18 18
19 19 @interfaceutil.implementer(repository.ifilestorage)
20 20 class filelog(object):
21 21 def __init__(self, opener, path):
22 22 self._revlog = revlog.revlog(opener,
23 23 '/'.join(('data', path + '.i')),
24 24 censorable=True)
25 25 # Full name of the user visible file, relative to the repository root.
26 26 # Used by LFS.
27 27 self.filename = path
28 28 # Used by repo upgrade.
29 29 self.index = self._revlog.index
30 # Used by verify.
31 self.version = self._revlog.version
32 30 # Used by changegroup generation.
33 31 self._generaldelta = self._revlog._generaldelta
34 32
35 33 def __len__(self):
36 34 return len(self._revlog)
37 35
38 36 def __iter__(self):
39 37 return self._revlog.__iter__()
40 38
41 39 def revs(self, start=0, stop=None):
42 40 return self._revlog.revs(start=start, stop=stop)
43 41
44 42 def parents(self, node):
45 43 return self._revlog.parents(node)
46 44
47 45 def parentrevs(self, rev):
48 46 return self._revlog.parentrevs(rev)
49 47
50 48 def rev(self, node):
51 49 return self._revlog.rev(node)
52 50
53 51 def node(self, rev):
54 52 return self._revlog.node(rev)
55 53
56 54 def lookup(self, node):
57 55 return self._revlog.lookup(node)
58 56
59 57 def linkrev(self, rev):
60 58 return self._revlog.linkrev(rev)
61 59
62 60 # Used by LFS, verify.
63 61 def flags(self, rev):
64 62 return self._revlog.flags(rev)
65 63
66 64 def commonancestorsheads(self, node1, node2):
67 65 return self._revlog.commonancestorsheads(node1, node2)
68 66
69 67 # Used by dagop.blockdescendants().
70 68 def descendants(self, revs):
71 69 return self._revlog.descendants(revs)
72 70
73 71 def heads(self, start=None, stop=None):
74 72 return self._revlog.heads(start, stop)
75 73
76 74 # Used by hgweb, children extension.
77 75 def children(self, node):
78 76 return self._revlog.children(node)
79 77
80 78 def deltaparent(self, rev):
81 79 return self._revlog.deltaparent(rev)
82 80
83 81 def iscensored(self, rev):
84 82 return self._revlog.iscensored(rev)
85 83
86 84 # Used by verify.
87 85 def rawsize(self, rev):
88 86 return self._revlog.rawsize(rev)
89 87
90 88 # Might be unused.
91 89 def checkhash(self, text, node, p1=None, p2=None, rev=None):
92 90 return self._revlog.checkhash(text, node, p1=p1, p2=p2, rev=rev)
93 91
94 92 def revision(self, node, _df=None, raw=False):
95 93 return self._revlog.revision(node, _df=_df, raw=raw)
96 94
97 95 def revdiff(self, rev1, rev2):
98 96 return self._revlog.revdiff(rev1, rev2)
99 97
100 98 def emitrevisiondeltas(self, requests):
101 99 return self._revlog.emitrevisiondeltas(requests)
102 100
103 101 def addrevision(self, revisiondata, transaction, linkrev, p1, p2,
104 102 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS,
105 103 cachedelta=None):
106 104 return self._revlog.addrevision(revisiondata, transaction, linkrev,
107 105 p1, p2, node=node, flags=flags,
108 106 cachedelta=cachedelta)
109 107
110 108 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
111 109 return self._revlog.addgroup(deltas, linkmapper, transaction,
112 110 addrevisioncb=addrevisioncb)
113 111
114 112 def getstrippoint(self, minlink):
115 113 return self._revlog.getstrippoint(minlink)
116 114
117 115 def strip(self, minlink, transaction):
118 116 return self._revlog.strip(minlink, transaction)
119 117
120 118 def censorrevision(self, tr, node, tombstone=b''):
121 119 return self._revlog.censorrevision(node, tombstone=tombstone)
122 120
123 121 def files(self):
124 122 return self._revlog.files()
125 123
126 124 # Used by verify.
127 125 def checksize(self):
128 126 return self._revlog.checksize()
129 127
130 128 def read(self, node):
131 129 t = self.revision(node)
132 130 if not t.startswith('\1\n'):
133 131 return t
134 132 s = t.index('\1\n', 2)
135 133 return t[s + 2:]
136 134
137 135 def add(self, text, meta, transaction, link, p1=None, p2=None):
138 136 if meta or text.startswith('\1\n'):
139 137 text = revlog.packmeta(meta, text)
140 138 return self.addrevision(text, transaction, link, p1, p2)
141 139
142 140 def renamed(self, node):
143 141 if self.parents(node)[0] != revlog.nullid:
144 142 return False
145 143 t = self.revision(node)
146 144 m = revlog.parsemeta(t)[0]
147 145 # copy and copyrev occur in pairs. In rare cases due to bugs,
148 146 # one can occur without the other.
149 147 if m and "copy" in m and "copyrev" in m:
150 148 return (m["copy"], revlog.bin(m["copyrev"]))
151 149 return False
152 150
153 151 def size(self, rev):
154 152 """return the size of a given revision"""
155 153
156 154 # for revisions with renames, we have to go the slow way
157 155 node = self.node(rev)
158 156 if self.renamed(node):
159 157 return len(self.read(node))
160 158 if self.iscensored(rev):
161 159 return 0
162 160
163 161 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
164 162 return self._revlog.size(rev)
165 163
166 164 def cmp(self, node, text):
167 165 """compare text with a given file revision
168 166
169 167 returns True if text is different than what is stored.
170 168 """
171 169
172 170 t = text
173 171 if text.startswith('\1\n'):
174 172 t = '\1\n\1\n' + text
175 173
176 174 samehashes = not self._revlog.cmp(node, t)
177 175 if samehashes:
178 176 return False
179 177
180 178 # censored files compare against the empty file
181 179 if self.iscensored(self.rev(node)):
182 180 return text != ''
183 181
184 182 # renaming a file produces a different hash, even if the data
185 183 # remains unchanged. Check if it's the case (slow):
186 184 if self.renamed(node):
187 185 t2 = self.read(node)
188 186 return t2 != text
189 187
190 188 return True
191 189
192 190 def verifyintegrity(self, state):
193 191 return self._revlog.verifyintegrity(state)
194 192
195 193 # TODO these aren't part of the interface and aren't internal methods.
196 194 # Callers should be fixed to not use them.
197 195
198 196 # Used by LFS.
199 197 @property
200 198 def filename(self):
201 199 return self._revlog.filename
202 200
203 201 @filename.setter
204 202 def filename(self, value):
205 203 self._revlog.filename = value
206 204
207 205 # Used by bundlefilelog, unionfilelog.
208 206 @property
209 207 def indexfile(self):
210 208 return self._revlog.indexfile
211 209
212 210 @indexfile.setter
213 211 def indexfile(self, value):
214 212 self._revlog.indexfile = value
215 213
216 214 # Used by LFS, repo upgrade.
217 215 @property
218 216 def opener(self):
219 217 return self._revlog.opener
220 218
221 219 # Used by repo upgrade.
222 220 def clone(self, tr, destrevlog, **kwargs):
223 221 if not isinstance(destrevlog, filelog):
224 222 raise error.ProgrammingError('expected filelog to clone()')
225 223
226 224 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
227 225
228 226 class narrowfilelog(filelog):
229 227 """Filelog variation to be used with narrow stores."""
230 228
231 229 def __init__(self, opener, path, narrowmatch):
232 230 super(narrowfilelog, self).__init__(opener, path)
233 231 self._narrowmatch = narrowmatch
234 232
235 233 def renamed(self, node):
236 234 res = super(narrowfilelog, self).renamed(node)
237 235
238 236 # Renames that come from outside the narrowspec are problematic
239 237 # because we may lack the base text for the rename. This can result
240 238 # in code attempting to walk the ancestry or compute a diff
241 239 # encountering a missing revision. We address this by silently
242 240 # removing rename metadata if the source file is outside the
243 241 # narrow spec.
244 242 #
245 243 # A better solution would be to see if the base revision is available,
246 244 # rather than assuming it isn't.
247 245 #
248 246 # An even better solution would be to teach all consumers of rename
249 247 # metadata that the base revision may not be available.
250 248 #
251 249 # TODO consider better ways of doing this.
252 250 if res and not self._narrowmatch(res[0]):
253 251 return None
254 252
255 253 return res
256 254
257 255 def size(self, rev):
258 256 # Because we have a custom renamed() that may lie, we need to call
259 257 # the base renamed() to report accurate results.
260 258 node = self.node(rev)
261 259 if super(narrowfilelog, self).renamed(node):
262 260 return len(self.read(node))
263 261 else:
264 262 return super(narrowfilelog, self).size(rev)
265 263
266 264 def cmp(self, node, text):
267 265 different = super(narrowfilelog, self).cmp(node, text)
268 266
269 267 # Because renamed() may lie, we may get false positives for
270 268 # different content. Check for this by comparing against the original
271 269 # renamed() implementation.
272 270 if different:
273 271 if super(narrowfilelog, self).renamed(node):
274 272 t2 = self.read(node)
275 273 return t2 != text
276 274
277 275 return different
@@ -1,1619 +1,1613
1 1 # repository.py - Interfaces and base classes for repositories and peers.
2 2 #
3 3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from .i18n import _
11 11 from . import (
12 12 error,
13 13 )
14 14 from .utils import (
15 15 interfaceutil,
16 16 )
17 17
18 18 # When narrowing is finalized and no longer subject to format changes,
19 19 # we should move this to just "narrow" or similar.
20 20 NARROW_REQUIREMENT = 'narrowhg-experimental'
21 21
22 22 class ipeerconnection(interfaceutil.Interface):
23 23 """Represents a "connection" to a repository.
24 24
25 25 This is the base interface for representing a connection to a repository.
26 26 It holds basic properties and methods applicable to all peer types.
27 27
28 28 This is not a complete interface definition and should not be used
29 29 outside of this module.
30 30 """
31 31 ui = interfaceutil.Attribute("""ui.ui instance""")
32 32
33 33 def url():
34 34 """Returns a URL string representing this peer.
35 35
36 36 Currently, implementations expose the raw URL used to construct the
37 37 instance. It may contain credentials as part of the URL. The
38 38 expectations of the value aren't well-defined and this could lead to
39 39 data leakage.
40 40
41 41 TODO audit/clean consumers and more clearly define the contents of this
42 42 value.
43 43 """
44 44
45 45 def local():
46 46 """Returns a local repository instance.
47 47
48 48 If the peer represents a local repository, returns an object that
49 49 can be used to interface with it. Otherwise returns ``None``.
50 50 """
51 51
52 52 def peer():
53 53 """Returns an object conforming to this interface.
54 54
55 55 Most implementations will ``return self``.
56 56 """
57 57
58 58 def canpush():
59 59 """Returns a boolean indicating if this peer can be pushed to."""
60 60
61 61 def close():
62 62 """Close the connection to this peer.
63 63
64 64 This is called when the peer will no longer be used. Resources
65 65 associated with the peer should be cleaned up.
66 66 """
67 67
68 68 class ipeercapabilities(interfaceutil.Interface):
69 69 """Peer sub-interface related to capabilities."""
70 70
71 71 def capable(name):
72 72 """Determine support for a named capability.
73 73
74 74 Returns ``False`` if capability not supported.
75 75
76 76 Returns ``True`` if boolean capability is supported. Returns a string
77 77 if capability support is non-boolean.
78 78
79 79 Capability strings may or may not map to wire protocol capabilities.
80 80 """
81 81
82 82 def requirecap(name, purpose):
83 83 """Require a capability to be present.
84 84
85 85 Raises a ``CapabilityError`` if the capability isn't present.
86 86 """
87 87
88 88 class ipeercommands(interfaceutil.Interface):
89 89 """Client-side interface for communicating over the wire protocol.
90 90
91 91 This interface is used as a gateway to the Mercurial wire protocol.
92 92 methods commonly call wire protocol commands of the same name.
93 93 """
94 94
95 95 def branchmap():
96 96 """Obtain heads in named branches.
97 97
98 98 Returns a dict mapping branch name to an iterable of nodes that are
99 99 heads on that branch.
100 100 """
101 101
102 102 def capabilities():
103 103 """Obtain capabilities of the peer.
104 104
105 105 Returns a set of string capabilities.
106 106 """
107 107
108 108 def clonebundles():
109 109 """Obtains the clone bundles manifest for the repo.
110 110
111 111 Returns the manifest as unparsed bytes.
112 112 """
113 113
114 114 def debugwireargs(one, two, three=None, four=None, five=None):
115 115 """Used to facilitate debugging of arguments passed over the wire."""
116 116
117 117 def getbundle(source, **kwargs):
118 118 """Obtain remote repository data as a bundle.
119 119
120 120 This command is how the bulk of repository data is transferred from
121 121 the peer to the local repository
122 122
123 123 Returns a generator of bundle data.
124 124 """
125 125
126 126 def heads():
127 127 """Determine all known head revisions in the peer.
128 128
129 129 Returns an iterable of binary nodes.
130 130 """
131 131
132 132 def known(nodes):
133 133 """Determine whether multiple nodes are known.
134 134
135 135 Accepts an iterable of nodes whose presence to check for.
136 136
137 137 Returns an iterable of booleans indicating of the corresponding node
138 138 at that index is known to the peer.
139 139 """
140 140
141 141 def listkeys(namespace):
142 142 """Obtain all keys in a pushkey namespace.
143 143
144 144 Returns an iterable of key names.
145 145 """
146 146
147 147 def lookup(key):
148 148 """Resolve a value to a known revision.
149 149
150 150 Returns a binary node of the resolved revision on success.
151 151 """
152 152
153 153 def pushkey(namespace, key, old, new):
154 154 """Set a value using the ``pushkey`` protocol.
155 155
156 156 Arguments correspond to the pushkey namespace and key to operate on and
157 157 the old and new values for that key.
158 158
159 159 Returns a string with the peer result. The value inside varies by the
160 160 namespace.
161 161 """
162 162
163 163 def stream_out():
164 164 """Obtain streaming clone data.
165 165
166 166 Successful result should be a generator of data chunks.
167 167 """
168 168
169 169 def unbundle(bundle, heads, url):
170 170 """Transfer repository data to the peer.
171 171
172 172 This is how the bulk of data during a push is transferred.
173 173
174 174 Returns the integer number of heads added to the peer.
175 175 """
176 176
177 177 class ipeerlegacycommands(interfaceutil.Interface):
178 178 """Interface for implementing support for legacy wire protocol commands.
179 179
180 180 Wire protocol commands transition to legacy status when they are no longer
181 181 used by modern clients. To facilitate identifying which commands are
182 182 legacy, the interfaces are split.
183 183 """
184 184
185 185 def between(pairs):
186 186 """Obtain nodes between pairs of nodes.
187 187
188 188 ``pairs`` is an iterable of node pairs.
189 189
190 190 Returns an iterable of iterables of nodes corresponding to each
191 191 requested pair.
192 192 """
193 193
194 194 def branches(nodes):
195 195 """Obtain ancestor changesets of specific nodes back to a branch point.
196 196
197 197 For each requested node, the peer finds the first ancestor node that is
198 198 a DAG root or is a merge.
199 199
200 200 Returns an iterable of iterables with the resolved values for each node.
201 201 """
202 202
203 203 def changegroup(nodes, source):
204 204 """Obtain a changegroup with data for descendants of specified nodes."""
205 205
206 206 def changegroupsubset(bases, heads, source):
207 207 pass
208 208
209 209 class ipeercommandexecutor(interfaceutil.Interface):
210 210 """Represents a mechanism to execute remote commands.
211 211
212 212 This is the primary interface for requesting that wire protocol commands
213 213 be executed. Instances of this interface are active in a context manager
214 214 and have a well-defined lifetime. When the context manager exits, all
215 215 outstanding requests are waited on.
216 216 """
217 217
218 218 def callcommand(name, args):
219 219 """Request that a named command be executed.
220 220
221 221 Receives the command name and a dictionary of command arguments.
222 222
223 223 Returns a ``concurrent.futures.Future`` that will resolve to the
224 224 result of that command request. That exact value is left up to
225 225 the implementation and possibly varies by command.
226 226
227 227 Not all commands can coexist with other commands in an executor
228 228 instance: it depends on the underlying wire protocol transport being
229 229 used and the command itself.
230 230
231 231 Implementations MAY call ``sendcommands()`` automatically if the
232 232 requested command can not coexist with other commands in this executor.
233 233
234 234 Implementations MAY call ``sendcommands()`` automatically when the
235 235 future's ``result()`` is called. So, consumers using multiple
236 236 commands with an executor MUST ensure that ``result()`` is not called
237 237 until all command requests have been issued.
238 238 """
239 239
240 240 def sendcommands():
241 241 """Trigger submission of queued command requests.
242 242
243 243 Not all transports submit commands as soon as they are requested to
244 244 run. When called, this method forces queued command requests to be
245 245 issued. It will no-op if all commands have already been sent.
246 246
247 247 When called, no more new commands may be issued with this executor.
248 248 """
249 249
250 250 def close():
251 251 """Signal that this command request is finished.
252 252
253 253 When called, no more new commands may be issued. All outstanding
254 254 commands that have previously been issued are waited on before
255 255 returning. This not only includes waiting for the futures to resolve,
256 256 but also waiting for all response data to arrive. In other words,
257 257 calling this waits for all on-wire state for issued command requests
258 258 to finish.
259 259
260 260 When used as a context manager, this method is called when exiting the
261 261 context manager.
262 262
263 263 This method may call ``sendcommands()`` if there are buffered commands.
264 264 """
265 265
266 266 class ipeerrequests(interfaceutil.Interface):
267 267 """Interface for executing commands on a peer."""
268 268
269 269 def commandexecutor():
270 270 """A context manager that resolves to an ipeercommandexecutor.
271 271
272 272 The object this resolves to can be used to issue command requests
273 273 to the peer.
274 274
275 275 Callers should call its ``callcommand`` method to issue command
276 276 requests.
277 277
278 278 A new executor should be obtained for each distinct set of commands
279 279 (possibly just a single command) that the consumer wants to execute
280 280 as part of a single operation or round trip. This is because some
281 281 peers are half-duplex and/or don't support persistent connections.
282 282 e.g. in the case of HTTP peers, commands sent to an executor represent
283 283 a single HTTP request. While some peers may support multiple command
284 284 sends over the wire per executor, consumers need to code to the least
285 285 capable peer. So it should be assumed that command executors buffer
286 286 called commands until they are told to send them and that each
287 287 command executor could result in a new connection or wire-level request
288 288 being issued.
289 289 """
290 290
291 291 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
292 292 """Unified interface for peer repositories.
293 293
294 294 All peer instances must conform to this interface.
295 295 """
296 296
297 297 @interfaceutil.implementer(ipeerbase)
298 298 class peer(object):
299 299 """Base class for peer repositories."""
300 300
301 301 def capable(self, name):
302 302 caps = self.capabilities()
303 303 if name in caps:
304 304 return True
305 305
306 306 name = '%s=' % name
307 307 for cap in caps:
308 308 if cap.startswith(name):
309 309 return cap[len(name):]
310 310
311 311 return False
312 312
313 313 def requirecap(self, name, purpose):
314 314 if self.capable(name):
315 315 return
316 316
317 317 raise error.CapabilityError(
318 318 _('cannot %s; remote repository does not support the %r '
319 319 'capability') % (purpose, name))
320 320
321 321 class iverifyproblem(interfaceutil.Interface):
322 322 """Represents a problem with the integrity of the repository.
323 323
324 324 Instances of this interface are emitted to describe an integrity issue
325 325 with a repository (e.g. corrupt storage, missing data, etc).
326 326
327 327 Instances are essentially messages associated with severity.
328 328 """
329 329 warning = interfaceutil.Attribute(
330 330 """Message indicating a non-fatal problem.""")
331 331
332 332 error = interfaceutil.Attribute(
333 333 """Message indicating a fatal problem.""")
334 334
335 335 class irevisiondelta(interfaceutil.Interface):
336 336 """Represents a delta between one revision and another.
337 337
338 338 Instances convey enough information to allow a revision to be exchanged
339 339 with another repository.
340 340
341 341 Instances represent the fulltext revision data or a delta against
342 342 another revision. Therefore the ``revision`` and ``delta`` attributes
343 343 are mutually exclusive.
344 344
345 345 Typically used for changegroup generation.
346 346 """
347 347
348 348 node = interfaceutil.Attribute(
349 349 """20 byte node of this revision.""")
350 350
351 351 p1node = interfaceutil.Attribute(
352 352 """20 byte node of 1st parent of this revision.""")
353 353
354 354 p2node = interfaceutil.Attribute(
355 355 """20 byte node of 2nd parent of this revision.""")
356 356
357 357 linknode = interfaceutil.Attribute(
358 358 """20 byte node of the changelog revision this node is linked to.""")
359 359
360 360 flags = interfaceutil.Attribute(
361 361 """2 bytes of integer flags that apply to this revision.""")
362 362
363 363 basenode = interfaceutil.Attribute(
364 364 """20 byte node of the revision this data is a delta against.
365 365
366 366 ``nullid`` indicates that the revision is a full revision and not
367 367 a delta.
368 368 """)
369 369
370 370 baserevisionsize = interfaceutil.Attribute(
371 371 """Size of base revision this delta is against.
372 372
373 373 May be ``None`` if ``basenode`` is ``nullid``.
374 374 """)
375 375
376 376 revision = interfaceutil.Attribute(
377 377 """Raw fulltext of revision data for this node.""")
378 378
379 379 delta = interfaceutil.Attribute(
380 380 """Delta between ``basenode`` and ``node``.
381 381
382 382 Stored in the bdiff delta format.
383 383 """)
384 384
385 385 class irevisiondeltarequest(interfaceutil.Interface):
386 386 """Represents a request to generate an ``irevisiondelta``."""
387 387
388 388 node = interfaceutil.Attribute(
389 389 """20 byte node of revision being requested.""")
390 390
391 391 p1node = interfaceutil.Attribute(
392 392 """20 byte node of 1st parent of revision.""")
393 393
394 394 p2node = interfaceutil.Attribute(
395 395 """20 byte node of 2nd parent of revision.""")
396 396
397 397 linknode = interfaceutil.Attribute(
398 398 """20 byte node to store in ``linknode`` attribute.""")
399 399
400 400 basenode = interfaceutil.Attribute(
401 401 """Base revision that delta should be generated against.
402 402
403 403 If ``nullid``, the derived ``irevisiondelta`` should have its
404 404 ``revision`` field populated and no delta should be generated.
405 405
406 406 If ``None``, the delta may be generated against any revision that
407 407 is an ancestor of this revision. Or a full revision may be used.
408 408
409 409 If any other value, the delta should be produced against that
410 410 revision.
411 411 """)
412 412
413 413 ellipsis = interfaceutil.Attribute(
414 414 """Boolean on whether the ellipsis flag should be set.""")
415 415
416 416 class ifilerevisionssequence(interfaceutil.Interface):
417 417 """Contains index data for all revisions of a file.
418 418
419 419 Types implementing this behave like lists of tuples. The index
420 420 in the list corresponds to the revision number. The values contain
421 421 index metadata.
422 422
423 423 The *null* revision (revision number -1) is always the last item
424 424 in the index.
425 425 """
426 426
427 427 def __len__():
428 428 """The total number of revisions."""
429 429
430 430 def __getitem__(rev):
431 431 """Returns the object having a specific revision number.
432 432
433 433 Returns an 8-tuple with the following fields:
434 434
435 435 offset+flags
436 436 Contains the offset and flags for the revision. 64-bit unsigned
437 437 integer where first 6 bytes are the offset and the next 2 bytes
438 438 are flags. The offset can be 0 if it is not used by the store.
439 439 compressed size
440 440 Size of the revision data in the store. It can be 0 if it isn't
441 441 needed by the store.
442 442 uncompressed size
443 443 Fulltext size. It can be 0 if it isn't needed by the store.
444 444 base revision
445 445 Revision number of revision the delta for storage is encoded
446 446 against. -1 indicates not encoded against a base revision.
447 447 link revision
448 448 Revision number of changelog revision this entry is related to.
449 449 p1 revision
450 450 Revision number of 1st parent. -1 if no 1st parent.
451 451 p2 revision
452 452 Revision number of 2nd parent. -1 if no 1st parent.
453 453 node
454 454 Binary node value for this revision number.
455 455
456 456 Negative values should index off the end of the sequence. ``-1``
457 457 should return the null revision. ``-2`` should return the most
458 458 recent revision.
459 459 """
460 460
461 461 def __contains__(rev):
462 462 """Whether a revision number exists."""
463 463
464 464 def insert(self, i, entry):
465 465 """Add an item to the index at specific revision."""
466 466
467 467 class ifileindex(interfaceutil.Interface):
468 468 """Storage interface for index data of a single file.
469 469
470 470 File storage data is divided into index metadata and data storage.
471 471 This interface defines the index portion of the interface.
472 472
473 473 The index logically consists of:
474 474
475 475 * A mapping between revision numbers and nodes.
476 476 * DAG data (storing and querying the relationship between nodes).
477 477 * Metadata to facilitate storage.
478 478 """
479 479 index = interfaceutil.Attribute(
480 480 """An ``ifilerevisionssequence`` instance.""")
481 481
482 482 def __len__():
483 483 """Obtain the number of revisions stored for this file."""
484 484
485 485 def __iter__():
486 486 """Iterate over revision numbers for this file."""
487 487
488 488 def revs(start=0, stop=None):
489 489 """Iterate over revision numbers for this file, with control."""
490 490
491 491 def parents(node):
492 492 """Returns a 2-tuple of parent nodes for a revision.
493 493
494 494 Values will be ``nullid`` if the parent is empty.
495 495 """
496 496
497 497 def parentrevs(rev):
498 498 """Like parents() but operates on revision numbers."""
499 499
500 500 def rev(node):
501 501 """Obtain the revision number given a node.
502 502
503 503 Raises ``error.LookupError`` if the node is not known.
504 504 """
505 505
506 506 def node(rev):
507 507 """Obtain the node value given a revision number.
508 508
509 509 Raises ``IndexError`` if the node is not known.
510 510 """
511 511
512 512 def lookup(node):
513 513 """Attempt to resolve a value to a node.
514 514
515 515 Value can be a binary node, hex node, revision number, or a string
516 516 that can be converted to an integer.
517 517
518 518 Raises ``error.LookupError`` if a node could not be resolved.
519 519 """
520 520
521 521 def linkrev(rev):
522 522 """Obtain the changeset revision number a revision is linked to."""
523 523
524 524 def flags(rev):
525 525 """Obtain flags used to affect storage of a revision."""
526 526
527 527 def iscensored(rev):
528 528 """Return whether a revision's content has been censored."""
529 529
530 530 def commonancestorsheads(node1, node2):
531 531 """Obtain an iterable of nodes containing heads of common ancestors.
532 532
533 533 See ``ancestor.commonancestorsheads()``.
534 534 """
535 535
536 536 def descendants(revs):
537 537 """Obtain descendant revision numbers for a set of revision numbers.
538 538
539 539 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
540 540 """
541 541
542 542 def heads(start=None, stop=None):
543 543 """Obtain a list of nodes that are DAG heads, with control.
544 544
545 545 The set of revisions examined can be limited by specifying
546 546 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
547 547 iterable of nodes. DAG traversal starts at earlier revision
548 548 ``start`` and iterates forward until any node in ``stop`` is
549 549 encountered.
550 550 """
551 551
552 552 def children(node):
553 553 """Obtain nodes that are children of a node.
554 554
555 555 Returns a list of nodes.
556 556 """
557 557
558 558 def deltaparent(rev):
559 559 """"Return the revision that is a suitable parent to delta against."""
560 560
561 561 class ifiledata(interfaceutil.Interface):
562 562 """Storage interface for data storage of a specific file.
563 563
564 564 This complements ``ifileindex`` and provides an interface for accessing
565 565 data for a tracked file.
566 566 """
567 567 def rawsize(rev):
568 568 """The size of the fulltext data for a revision as stored."""
569 569
570 570 def size(rev):
571 571 """Obtain the fulltext size of file data.
572 572
573 573 Any metadata is excluded from size measurements. Use ``rawsize()`` if
574 574 metadata size is important.
575 575 """
576 576
577 577 def checkhash(fulltext, node, p1=None, p2=None, rev=None):
578 578 """Validate the stored hash of a given fulltext and node.
579 579
580 580 Raises ``error.StorageError`` is hash validation fails.
581 581 """
582 582
583 583 def revision(node, raw=False):
584 584 """"Obtain fulltext data for a node.
585 585
586 586 By default, any storage transformations are applied before the data
587 587 is returned. If ``raw`` is True, non-raw storage transformations
588 588 are not applied.
589 589
590 590 The fulltext data may contain a header containing metadata. Most
591 591 consumers should use ``read()`` to obtain the actual file data.
592 592 """
593 593
594 594 def read(node):
595 595 """Resolve file fulltext data.
596 596
597 597 This is similar to ``revision()`` except any metadata in the data
598 598 headers is stripped.
599 599 """
600 600
601 601 def renamed(node):
602 602 """Obtain copy metadata for a node.
603 603
604 604 Returns ``False`` if no copy metadata is stored or a 2-tuple of
605 605 (path, node) from which this revision was copied.
606 606 """
607 607
608 608 def cmp(node, fulltext):
609 609 """Compare fulltext to another revision.
610 610
611 611 Returns True if the fulltext is different from what is stored.
612 612
613 613 This takes copy metadata into account.
614 614
615 615 TODO better document the copy metadata and censoring logic.
616 616 """
617 617
618 618 def revdiff(rev1, rev2):
619 619 """Obtain a delta between two revision numbers.
620 620
621 621 Operates on raw data in the store (``revision(node, raw=True)``).
622 622
623 623 The returned data is the result of ``bdiff.bdiff`` on the raw
624 624 revision data.
625 625 """
626 626
627 627 def emitrevisiondeltas(requests):
628 628 """Produce ``irevisiondelta`` from ``irevisiondeltarequest``s.
629 629
630 630 Given an iterable of objects conforming to the ``irevisiondeltarequest``
631 631 interface, emits objects conforming to the ``irevisiondelta``
632 632 interface.
633 633
634 634 This method is a generator.
635 635
636 636 ``irevisiondelta`` should be emitted in the same order of
637 637 ``irevisiondeltarequest`` that was passed in.
638 638
639 639 The emitted objects MUST conform by the results of
640 640 ``irevisiondeltarequest``. Namely, they must respect any requests
641 641 for building a delta from a specific ``basenode`` if defined.
642 642
643 643 When sending deltas, implementations must take into account whether
644 644 the client has the base delta before encoding a delta against that
645 645 revision. A revision encountered previously in ``requests`` is
646 646 always a suitable base revision. An example of a bad delta is a delta
647 647 against a non-ancestor revision. Another example of a bad delta is a
648 648 delta against a censored revision.
649 649 """
650 650
651 651 class ifilemutation(interfaceutil.Interface):
652 652 """Storage interface for mutation events of a tracked file."""
653 653
654 654 def add(filedata, meta, transaction, linkrev, p1, p2):
655 655 """Add a new revision to the store.
656 656
657 657 Takes file data, dictionary of metadata, a transaction, linkrev,
658 658 and parent nodes.
659 659
660 660 Returns the node that was added.
661 661
662 662 May no-op if a revision matching the supplied data is already stored.
663 663 """
664 664
665 665 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
666 666 flags=0, cachedelta=None):
667 667 """Add a new revision to the store.
668 668
669 669 This is similar to ``add()`` except it operates at a lower level.
670 670
671 671 The data passed in already contains a metadata header, if any.
672 672
673 673 ``node`` and ``flags`` can be used to define the expected node and
674 674 the flags to use with storage.
675 675
676 676 ``add()`` is usually called when adding files from e.g. the working
677 677 directory. ``addrevision()`` is often called by ``add()`` and for
678 678 scenarios where revision data has already been computed, such as when
679 679 applying raw data from a peer repo.
680 680 """
681 681
682 682 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
683 683 """Process a series of deltas for storage.
684 684
685 685 ``deltas`` is an iterable of 7-tuples of
686 686 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
687 687 to add.
688 688
689 689 The ``delta`` field contains ``mpatch`` data to apply to a base
690 690 revision, identified by ``deltabase``. The base node can be
691 691 ``nullid``, in which case the header from the delta can be ignored
692 692 and the delta used as the fulltext.
693 693
694 694 ``addrevisioncb`` should be called for each node as it is committed.
695 695
696 696 Returns a list of nodes that were processed. A node will be in the list
697 697 even if it existed in the store previously.
698 698 """
699 699
700 700 def censorrevision(tr, node, tombstone=b''):
701 701 """Remove the content of a single revision.
702 702
703 703 The specified ``node`` will have its content purged from storage.
704 704 Future attempts to access the revision data for this node will
705 705 result in failure.
706 706
707 707 A ``tombstone`` message can optionally be stored. This message may be
708 708 displayed to users when they attempt to access the missing revision
709 709 data.
710 710
711 711 Storage backends may have stored deltas against the previous content
712 712 in this revision. As part of censoring a revision, these storage
713 713 backends are expected to rewrite any internally stored deltas such
714 714 that they no longer reference the deleted content.
715 715 """
716 716
717 717 def getstrippoint(minlink):
718 718 """Find the minimum revision that must be stripped to strip a linkrev.
719 719
720 720 Returns a 2-tuple containing the minimum revision number and a set
721 721 of all revisions numbers that would be broken by this strip.
722 722
723 723 TODO this is highly revlog centric and should be abstracted into
724 724 a higher-level deletion API. ``repair.strip()`` relies on this.
725 725 """
726 726
727 727 def strip(minlink, transaction):
728 728 """Remove storage of items starting at a linkrev.
729 729
730 730 This uses ``getstrippoint()`` to determine the first node to remove.
731 731 Then it effectively truncates storage for all revisions after that.
732 732
733 733 TODO this is highly revlog centric and should be abstracted into a
734 734 higher-level deletion API.
735 735 """
736 736
737 737 class ifilestorage(ifileindex, ifiledata, ifilemutation):
738 738 """Complete storage interface for a single tracked file."""
739 739
740 version = interfaceutil.Attribute(
741 """Version number of storage.
742
743 TODO this feels revlog centric and could likely be removed.
744 """)
745
746 740 _generaldelta = interfaceutil.Attribute(
747 741 """Whether deltas can be against any parent revision.
748 742
749 743 TODO this is used by changegroup code and it could probably be
750 744 folded into another API.
751 745 """)
752 746
753 747 def files():
754 748 """Obtain paths that are backing storage for this file.
755 749
756 750 TODO this is used heavily by verify code and there should probably
757 751 be a better API for that.
758 752 """
759 753
760 754 def checksize():
761 755 """Obtain the expected sizes of backing files.
762 756
763 757 TODO this is used by verify and it should not be part of the interface.
764 758 """
765 759
766 760 def verifyintegrity(state):
767 761 """Verifies the integrity of file storage.
768 762
769 763 ``state`` is a dict holding state of the verifier process. It can be
770 764 used to communicate data between invocations of multiple storage
771 765 primitives.
772 766
773 767 The method yields objects conforming to the ``iverifyproblem``
774 768 interface.
775 769 """
776 770
777 771 class idirs(interfaceutil.Interface):
778 772 """Interface representing a collection of directories from paths.
779 773
780 774 This interface is essentially a derived data structure representing
781 775 directories from a collection of paths.
782 776 """
783 777
784 778 def addpath(path):
785 779 """Add a path to the collection.
786 780
787 781 All directories in the path will be added to the collection.
788 782 """
789 783
790 784 def delpath(path):
791 785 """Remove a path from the collection.
792 786
793 787 If the removal was the last path in a particular directory, the
794 788 directory is removed from the collection.
795 789 """
796 790
797 791 def __iter__():
798 792 """Iterate over the directories in this collection of paths."""
799 793
800 794 def __contains__(path):
801 795 """Whether a specific directory is in this collection."""
802 796
803 797 class imanifestdict(interfaceutil.Interface):
804 798 """Interface representing a manifest data structure.
805 799
806 800 A manifest is effectively a dict mapping paths to entries. Each entry
807 801 consists of a binary node and extra flags affecting that entry.
808 802 """
809 803
810 804 def __getitem__(path):
811 805 """Returns the binary node value for a path in the manifest.
812 806
813 807 Raises ``KeyError`` if the path does not exist in the manifest.
814 808
815 809 Equivalent to ``self.find(path)[0]``.
816 810 """
817 811
818 812 def find(path):
819 813 """Returns the entry for a path in the manifest.
820 814
821 815 Returns a 2-tuple of (node, flags).
822 816
823 817 Raises ``KeyError`` if the path does not exist in the manifest.
824 818 """
825 819
826 820 def __len__():
827 821 """Return the number of entries in the manifest."""
828 822
829 823 def __nonzero__():
830 824 """Returns True if the manifest has entries, False otherwise."""
831 825
832 826 __bool__ = __nonzero__
833 827
834 828 def __setitem__(path, node):
835 829 """Define the node value for a path in the manifest.
836 830
837 831 If the path is already in the manifest, its flags will be copied to
838 832 the new entry.
839 833 """
840 834
841 835 def __contains__(path):
842 836 """Whether a path exists in the manifest."""
843 837
844 838 def __delitem__(path):
845 839 """Remove a path from the manifest.
846 840
847 841 Raises ``KeyError`` if the path is not in the manifest.
848 842 """
849 843
850 844 def __iter__():
851 845 """Iterate over paths in the manifest."""
852 846
853 847 def iterkeys():
854 848 """Iterate over paths in the manifest."""
855 849
856 850 def keys():
857 851 """Obtain a list of paths in the manifest."""
858 852
859 853 def filesnotin(other, match=None):
860 854 """Obtain the set of paths in this manifest but not in another.
861 855
862 856 ``match`` is an optional matcher function to be applied to both
863 857 manifests.
864 858
865 859 Returns a set of paths.
866 860 """
867 861
868 862 def dirs():
869 863 """Returns an object implementing the ``idirs`` interface."""
870 864
871 865 def hasdir(dir):
872 866 """Returns a bool indicating if a directory is in this manifest."""
873 867
874 868 def matches(match):
875 869 """Generate a new manifest filtered through a matcher.
876 870
877 871 Returns an object conforming to the ``imanifestdict`` interface.
878 872 """
879 873
880 874 def walk(match):
881 875 """Generator of paths in manifest satisfying a matcher.
882 876
883 877 This is equivalent to ``self.matches(match).iterkeys()`` except a new
884 878 manifest object is not created.
885 879
886 880 If the matcher has explicit files listed and they don't exist in
887 881 the manifest, ``match.bad()`` is called for each missing file.
888 882 """
889 883
890 884 def diff(other, match=None, clean=False):
891 885 """Find differences between this manifest and another.
892 886
893 887 This manifest is compared to ``other``.
894 888
895 889 If ``match`` is provided, the two manifests are filtered against this
896 890 matcher and only entries satisfying the matcher are compared.
897 891
898 892 If ``clean`` is True, unchanged files are included in the returned
899 893 object.
900 894
901 895 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
902 896 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
903 897 represents the node and flags for this manifest and ``(node2, flag2)``
904 898 are the same for the other manifest.
905 899 """
906 900
907 901 def setflag(path, flag):
908 902 """Set the flag value for a given path.
909 903
910 904 Raises ``KeyError`` if the path is not already in the manifest.
911 905 """
912 906
913 907 def get(path, default=None):
914 908 """Obtain the node value for a path or a default value if missing."""
915 909
916 910 def flags(path, default=''):
917 911 """Return the flags value for a path or a default value if missing."""
918 912
919 913 def copy():
920 914 """Return a copy of this manifest."""
921 915
922 916 def items():
923 917 """Returns an iterable of (path, node) for items in this manifest."""
924 918
925 919 def iteritems():
926 920 """Identical to items()."""
927 921
928 922 def iterentries():
929 923 """Returns an iterable of (path, node, flags) for this manifest.
930 924
931 925 Similar to ``iteritems()`` except items are a 3-tuple and include
932 926 flags.
933 927 """
934 928
935 929 def text():
936 930 """Obtain the raw data representation for this manifest.
937 931
938 932 Result is used to create a manifest revision.
939 933 """
940 934
941 935 def fastdelta(base, changes):
942 936 """Obtain a delta between this manifest and another given changes.
943 937
944 938 ``base`` in the raw data representation for another manifest.
945 939
946 940 ``changes`` is an iterable of ``(path, to_delete)``.
947 941
948 942 Returns a 2-tuple containing ``bytearray(self.text())`` and the
949 943 delta between ``base`` and this manifest.
950 944 """
951 945
952 946 class imanifestrevisionbase(interfaceutil.Interface):
953 947 """Base interface representing a single revision of a manifest.
954 948
955 949 Should not be used as a primary interface: should always be inherited
956 950 as part of a larger interface.
957 951 """
958 952
959 953 def new():
960 954 """Obtain a new manifest instance.
961 955
962 956 Returns an object conforming to the ``imanifestrevisionwritable``
963 957 interface. The instance will be associated with the same
964 958 ``imanifestlog`` collection as this instance.
965 959 """
966 960
967 961 def copy():
968 962 """Obtain a copy of this manifest instance.
969 963
970 964 Returns an object conforming to the ``imanifestrevisionwritable``
971 965 interface. The instance will be associated with the same
972 966 ``imanifestlog`` collection as this instance.
973 967 """
974 968
975 969 def read():
976 970 """Obtain the parsed manifest data structure.
977 971
978 972 The returned object conforms to the ``imanifestdict`` interface.
979 973 """
980 974
981 975 class imanifestrevisionstored(imanifestrevisionbase):
982 976 """Interface representing a manifest revision committed to storage."""
983 977
984 978 def node():
985 979 """The binary node for this manifest."""
986 980
987 981 parents = interfaceutil.Attribute(
988 982 """List of binary nodes that are parents for this manifest revision."""
989 983 )
990 984
991 985 def readdelta(shallow=False):
992 986 """Obtain the manifest data structure representing changes from parent.
993 987
994 988 This manifest is compared to its 1st parent. A new manifest representing
995 989 those differences is constructed.
996 990
997 991 The returned object conforms to the ``imanifestdict`` interface.
998 992 """
999 993
1000 994 def readfast(shallow=False):
1001 995 """Calls either ``read()`` or ``readdelta()``.
1002 996
1003 997 The faster of the two options is called.
1004 998 """
1005 999
1006 1000 def find(key):
1007 1001 """Calls self.read().find(key)``.
1008 1002
1009 1003 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1010 1004 """
1011 1005
1012 1006 class imanifestrevisionwritable(imanifestrevisionbase):
1013 1007 """Interface representing a manifest revision that can be committed."""
1014 1008
1015 1009 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1016 1010 """Add this revision to storage.
1017 1011
1018 1012 Takes a transaction object, the changeset revision number it will
1019 1013 be associated with, its parent nodes, and lists of added and
1020 1014 removed paths.
1021 1015
1022 1016 If match is provided, storage can choose not to inspect or write out
1023 1017 items that do not match. Storage is still required to be able to provide
1024 1018 the full manifest in the future for any directories written (these
1025 1019 manifests should not be "narrowed on disk").
1026 1020
1027 1021 Returns the binary node of the created revision.
1028 1022 """
1029 1023
1030 1024 class imanifeststorage(interfaceutil.Interface):
1031 1025 """Storage interface for manifest data."""
1032 1026
1033 1027 tree = interfaceutil.Attribute(
1034 1028 """The path to the directory this manifest tracks.
1035 1029
1036 1030 The empty bytestring represents the root manifest.
1037 1031 """)
1038 1032
1039 1033 index = interfaceutil.Attribute(
1040 1034 """An ``ifilerevisionssequence`` instance.""")
1041 1035
1042 1036 indexfile = interfaceutil.Attribute(
1043 1037 """Path of revlog index file.
1044 1038
1045 1039 TODO this is revlog specific and should not be exposed.
1046 1040 """)
1047 1041
1048 1042 opener = interfaceutil.Attribute(
1049 1043 """VFS opener to use to access underlying files used for storage.
1050 1044
1051 1045 TODO this is revlog specific and should not be exposed.
1052 1046 """)
1053 1047
1054 1048 version = interfaceutil.Attribute(
1055 1049 """Revlog version number.
1056 1050
1057 1051 TODO this is revlog specific and should not be exposed.
1058 1052 """)
1059 1053
1060 1054 _generaldelta = interfaceutil.Attribute(
1061 1055 """Whether generaldelta storage is being used.
1062 1056
1063 1057 TODO this is revlog specific and should not be exposed.
1064 1058 """)
1065 1059
1066 1060 fulltextcache = interfaceutil.Attribute(
1067 1061 """Dict with cache of fulltexts.
1068 1062
1069 1063 TODO this doesn't feel appropriate for the storage interface.
1070 1064 """)
1071 1065
1072 1066 def __len__():
1073 1067 """Obtain the number of revisions stored for this manifest."""
1074 1068
1075 1069 def __iter__():
1076 1070 """Iterate over revision numbers for this manifest."""
1077 1071
1078 1072 def rev(node):
1079 1073 """Obtain the revision number given a binary node.
1080 1074
1081 1075 Raises ``error.LookupError`` if the node is not known.
1082 1076 """
1083 1077
1084 1078 def node(rev):
1085 1079 """Obtain the node value given a revision number.
1086 1080
1087 1081 Raises ``error.LookupError`` if the revision is not known.
1088 1082 """
1089 1083
1090 1084 def lookup(value):
1091 1085 """Attempt to resolve a value to a node.
1092 1086
1093 1087 Value can be a binary node, hex node, revision number, or a bytes
1094 1088 that can be converted to an integer.
1095 1089
1096 1090 Raises ``error.LookupError`` if a ndoe could not be resolved.
1097 1091
1098 1092 TODO this is only used by debug* commands and can probably be deleted
1099 1093 easily.
1100 1094 """
1101 1095
1102 1096 def parents(node):
1103 1097 """Returns a 2-tuple of parent nodes for a node.
1104 1098
1105 1099 Values will be ``nullid`` if the parent is empty.
1106 1100 """
1107 1101
1108 1102 def parentrevs(rev):
1109 1103 """Like parents() but operates on revision numbers."""
1110 1104
1111 1105 def linkrev(rev):
1112 1106 """Obtain the changeset revision number a revision is linked to."""
1113 1107
1114 1108 def revision(node, _df=None, raw=False):
1115 1109 """Obtain fulltext data for a node."""
1116 1110
1117 1111 def revdiff(rev1, rev2):
1118 1112 """Obtain a delta between two revision numbers.
1119 1113
1120 1114 The returned data is the result of ``bdiff.bdiff()`` on the raw
1121 1115 revision data.
1122 1116 """
1123 1117
1124 1118 def cmp(node, fulltext):
1125 1119 """Compare fulltext to another revision.
1126 1120
1127 1121 Returns True if the fulltext is different from what is stored.
1128 1122 """
1129 1123
1130 1124 def emitrevisiondeltas(requests):
1131 1125 """Produce ``irevisiondelta`` from ``irevisiondeltarequest``s.
1132 1126
1133 1127 See the documentation for ``ifiledata`` for more.
1134 1128 """
1135 1129
1136 1130 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1137 1131 """Process a series of deltas for storage.
1138 1132
1139 1133 See the documentation in ``ifilemutation`` for more.
1140 1134 """
1141 1135
1142 1136 def getstrippoint(minlink):
1143 1137 """Find minimum revision that must be stripped to strip a linkrev.
1144 1138
1145 1139 See the documentation in ``ifilemutation`` for more.
1146 1140 """
1147 1141
1148 1142 def strip(minlink, transaction):
1149 1143 """Remove storage of items starting at a linkrev.
1150 1144
1151 1145 See the documentation in ``ifilemutation`` for more.
1152 1146 """
1153 1147
1154 1148 def checksize():
1155 1149 """Obtain the expected sizes of backing files.
1156 1150
1157 1151 TODO this is used by verify and it should not be part of the interface.
1158 1152 """
1159 1153
1160 1154 def files():
1161 1155 """Obtain paths that are backing storage for this manifest.
1162 1156
1163 1157 TODO this is used by verify and there should probably be a better API
1164 1158 for this functionality.
1165 1159 """
1166 1160
1167 1161 def deltaparent(rev):
1168 1162 """Obtain the revision that a revision is delta'd against.
1169 1163
1170 1164 TODO delta encoding is an implementation detail of storage and should
1171 1165 not be exposed to the storage interface.
1172 1166 """
1173 1167
1174 1168 def clone(tr, dest, **kwargs):
1175 1169 """Clone this instance to another."""
1176 1170
1177 1171 def clearcaches(clear_persisted_data=False):
1178 1172 """Clear any caches associated with this instance."""
1179 1173
1180 1174 def dirlog(d):
1181 1175 """Obtain a manifest storage instance for a tree."""
1182 1176
1183 1177 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1184 1178 match=None):
1185 1179 """Add a revision to storage.
1186 1180
1187 1181 ``m`` is an object conforming to ``imanifestdict``.
1188 1182
1189 1183 ``link`` is the linkrev revision number.
1190 1184
1191 1185 ``p1`` and ``p2`` are the parent revision numbers.
1192 1186
1193 1187 ``added`` and ``removed`` are iterables of added and removed paths,
1194 1188 respectively.
1195 1189
1196 1190 ``readtree`` is a function that can be used to read the child tree(s)
1197 1191 when recursively writing the full tree structure when using
1198 1192 treemanifets.
1199 1193
1200 1194 ``match`` is a matcher that can be used to hint to storage that not all
1201 1195 paths must be inspected; this is an optimization and can be safely
1202 1196 ignored. Note that the storage must still be able to reproduce a full
1203 1197 manifest including files that did not match.
1204 1198 """
1205 1199
1206 1200 class imanifestlog(interfaceutil.Interface):
1207 1201 """Interface representing a collection of manifest snapshots.
1208 1202
1209 1203 Represents the root manifest in a repository.
1210 1204
1211 1205 Also serves as a means to access nested tree manifests and to cache
1212 1206 tree manifests.
1213 1207 """
1214 1208
1215 1209 def __getitem__(node):
1216 1210 """Obtain a manifest instance for a given binary node.
1217 1211
1218 1212 Equivalent to calling ``self.get('', node)``.
1219 1213
1220 1214 The returned object conforms to the ``imanifestrevisionstored``
1221 1215 interface.
1222 1216 """
1223 1217
1224 1218 def get(tree, node, verify=True):
1225 1219 """Retrieve the manifest instance for a given directory and binary node.
1226 1220
1227 1221 ``node`` always refers to the node of the root manifest (which will be
1228 1222 the only manifest if flat manifests are being used).
1229 1223
1230 1224 If ``tree`` is the empty string, the root manifest is returned.
1231 1225 Otherwise the manifest for the specified directory will be returned
1232 1226 (requires tree manifests).
1233 1227
1234 1228 If ``verify`` is True, ``LookupError`` is raised if the node is not
1235 1229 known.
1236 1230
1237 1231 The returned object conforms to the ``imanifestrevisionstored``
1238 1232 interface.
1239 1233 """
1240 1234
1241 1235 def getstorage(tree):
1242 1236 """Retrieve an interface to storage for a particular tree.
1243 1237
1244 1238 If ``tree`` is the empty bytestring, storage for the root manifest will
1245 1239 be returned. Otherwise storage for a tree manifest is returned.
1246 1240
1247 1241 TODO formalize interface for returned object.
1248 1242 """
1249 1243
1250 1244 def clearcaches():
1251 1245 """Clear caches associated with this collection."""
1252 1246
1253 1247 def rev(node):
1254 1248 """Obtain the revision number for a binary node.
1255 1249
1256 1250 Raises ``error.LookupError`` if the node is not known.
1257 1251 """
1258 1252
1259 1253 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1260 1254 """Local repository sub-interface providing access to tracked file storage.
1261 1255
1262 1256 This interface defines how a repository accesses storage for a single
1263 1257 tracked file path.
1264 1258 """
1265 1259
1266 1260 def file(f):
1267 1261 """Obtain a filelog for a tracked path.
1268 1262
1269 1263 The returned type conforms to the ``ifilestorage`` interface.
1270 1264 """
1271 1265
1272 1266 class ilocalrepositorymain(interfaceutil.Interface):
1273 1267 """Main interface for local repositories.
1274 1268
1275 1269 This currently captures the reality of things - not how things should be.
1276 1270 """
1277 1271
1278 1272 supportedformats = interfaceutil.Attribute(
1279 1273 """Set of requirements that apply to stream clone.
1280 1274
1281 1275 This is actually a class attribute and is shared among all instances.
1282 1276 """)
1283 1277
1284 1278 supported = interfaceutil.Attribute(
1285 1279 """Set of requirements that this repo is capable of opening.""")
1286 1280
1287 1281 requirements = interfaceutil.Attribute(
1288 1282 """Set of requirements this repo uses.""")
1289 1283
1290 1284 filtername = interfaceutil.Attribute(
1291 1285 """Name of the repoview that is active on this repo.""")
1292 1286
1293 1287 wvfs = interfaceutil.Attribute(
1294 1288 """VFS used to access the working directory.""")
1295 1289
1296 1290 vfs = interfaceutil.Attribute(
1297 1291 """VFS rooted at the .hg directory.
1298 1292
1299 1293 Used to access repository data not in the store.
1300 1294 """)
1301 1295
1302 1296 svfs = interfaceutil.Attribute(
1303 1297 """VFS rooted at the store.
1304 1298
1305 1299 Used to access repository data in the store. Typically .hg/store.
1306 1300 But can point elsewhere if the store is shared.
1307 1301 """)
1308 1302
1309 1303 root = interfaceutil.Attribute(
1310 1304 """Path to the root of the working directory.""")
1311 1305
1312 1306 path = interfaceutil.Attribute(
1313 1307 """Path to the .hg directory.""")
1314 1308
1315 1309 origroot = interfaceutil.Attribute(
1316 1310 """The filesystem path that was used to construct the repo.""")
1317 1311
1318 1312 auditor = interfaceutil.Attribute(
1319 1313 """A pathauditor for the working directory.
1320 1314
1321 1315 This checks if a path refers to a nested repository.
1322 1316
1323 1317 Operates on the filesystem.
1324 1318 """)
1325 1319
1326 1320 nofsauditor = interfaceutil.Attribute(
1327 1321 """A pathauditor for the working directory.
1328 1322
1329 1323 This is like ``auditor`` except it doesn't do filesystem checks.
1330 1324 """)
1331 1325
1332 1326 baseui = interfaceutil.Attribute(
1333 1327 """Original ui instance passed into constructor.""")
1334 1328
1335 1329 ui = interfaceutil.Attribute(
1336 1330 """Main ui instance for this instance.""")
1337 1331
1338 1332 sharedpath = interfaceutil.Attribute(
1339 1333 """Path to the .hg directory of the repo this repo was shared from.""")
1340 1334
1341 1335 store = interfaceutil.Attribute(
1342 1336 """A store instance.""")
1343 1337
1344 1338 spath = interfaceutil.Attribute(
1345 1339 """Path to the store.""")
1346 1340
1347 1341 sjoin = interfaceutil.Attribute(
1348 1342 """Alias to self.store.join.""")
1349 1343
1350 1344 cachevfs = interfaceutil.Attribute(
1351 1345 """A VFS used to access the cache directory.
1352 1346
1353 1347 Typically .hg/cache.
1354 1348 """)
1355 1349
1356 1350 filteredrevcache = interfaceutil.Attribute(
1357 1351 """Holds sets of revisions to be filtered.""")
1358 1352
1359 1353 names = interfaceutil.Attribute(
1360 1354 """A ``namespaces`` instance.""")
1361 1355
1362 1356 def close():
1363 1357 """Close the handle on this repository."""
1364 1358
1365 1359 def peer():
1366 1360 """Obtain an object conforming to the ``peer`` interface."""
1367 1361
1368 1362 def unfiltered():
1369 1363 """Obtain an unfiltered/raw view of this repo."""
1370 1364
1371 1365 def filtered(name, visibilityexceptions=None):
1372 1366 """Obtain a named view of this repository."""
1373 1367
1374 1368 obsstore = interfaceutil.Attribute(
1375 1369 """A store of obsolescence data.""")
1376 1370
1377 1371 changelog = interfaceutil.Attribute(
1378 1372 """A handle on the changelog revlog.""")
1379 1373
1380 1374 manifestlog = interfaceutil.Attribute(
1381 1375 """An instance conforming to the ``imanifestlog`` interface.
1382 1376
1383 1377 Provides access to manifests for the repository.
1384 1378 """)
1385 1379
1386 1380 dirstate = interfaceutil.Attribute(
1387 1381 """Working directory state.""")
1388 1382
1389 1383 narrowpats = interfaceutil.Attribute(
1390 1384 """Matcher patterns for this repository's narrowspec.""")
1391 1385
1392 1386 def narrowmatch():
1393 1387 """Obtain a matcher for the narrowspec."""
1394 1388
1395 1389 def setnarrowpats(newincludes, newexcludes):
1396 1390 """Define the narrowspec for this repository."""
1397 1391
1398 1392 def __getitem__(changeid):
1399 1393 """Try to resolve a changectx."""
1400 1394
1401 1395 def __contains__(changeid):
1402 1396 """Whether a changeset exists."""
1403 1397
1404 1398 def __nonzero__():
1405 1399 """Always returns True."""
1406 1400 return True
1407 1401
1408 1402 __bool__ = __nonzero__
1409 1403
1410 1404 def __len__():
1411 1405 """Returns the number of changesets in the repo."""
1412 1406
1413 1407 def __iter__():
1414 1408 """Iterate over revisions in the changelog."""
1415 1409
1416 1410 def revs(expr, *args):
1417 1411 """Evaluate a revset.
1418 1412
1419 1413 Emits revisions.
1420 1414 """
1421 1415
1422 1416 def set(expr, *args):
1423 1417 """Evaluate a revset.
1424 1418
1425 1419 Emits changectx instances.
1426 1420 """
1427 1421
1428 1422 def anyrevs(specs, user=False, localalias=None):
1429 1423 """Find revisions matching one of the given revsets."""
1430 1424
1431 1425 def url():
1432 1426 """Returns a string representing the location of this repo."""
1433 1427
1434 1428 def hook(name, throw=False, **args):
1435 1429 """Call a hook."""
1436 1430
1437 1431 def tags():
1438 1432 """Return a mapping of tag to node."""
1439 1433
1440 1434 def tagtype(tagname):
1441 1435 """Return the type of a given tag."""
1442 1436
1443 1437 def tagslist():
1444 1438 """Return a list of tags ordered by revision."""
1445 1439
1446 1440 def nodetags(node):
1447 1441 """Return the tags associated with a node."""
1448 1442
1449 1443 def nodebookmarks(node):
1450 1444 """Return the list of bookmarks pointing to the specified node."""
1451 1445
1452 1446 def branchmap():
1453 1447 """Return a mapping of branch to heads in that branch."""
1454 1448
1455 1449 def revbranchcache():
1456 1450 pass
1457 1451
1458 1452 def branchtip(branchtip, ignoremissing=False):
1459 1453 """Return the tip node for a given branch."""
1460 1454
1461 1455 def lookup(key):
1462 1456 """Resolve the node for a revision."""
1463 1457
1464 1458 def lookupbranch(key):
1465 1459 """Look up the branch name of the given revision or branch name."""
1466 1460
1467 1461 def known(nodes):
1468 1462 """Determine whether a series of nodes is known.
1469 1463
1470 1464 Returns a list of bools.
1471 1465 """
1472 1466
1473 1467 def local():
1474 1468 """Whether the repository is local."""
1475 1469 return True
1476 1470
1477 1471 def publishing():
1478 1472 """Whether the repository is a publishing repository."""
1479 1473
1480 1474 def cancopy():
1481 1475 pass
1482 1476
1483 1477 def shared():
1484 1478 """The type of shared repository or None."""
1485 1479
1486 1480 def wjoin(f, *insidef):
1487 1481 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1488 1482
1489 1483 def setparents(p1, p2):
1490 1484 """Set the parent nodes of the working directory."""
1491 1485
1492 1486 def filectx(path, changeid=None, fileid=None):
1493 1487 """Obtain a filectx for the given file revision."""
1494 1488
1495 1489 def getcwd():
1496 1490 """Obtain the current working directory from the dirstate."""
1497 1491
1498 1492 def pathto(f, cwd=None):
1499 1493 """Obtain the relative path to a file."""
1500 1494
1501 1495 def adddatafilter(name, fltr):
1502 1496 pass
1503 1497
1504 1498 def wread(filename):
1505 1499 """Read a file from wvfs, using data filters."""
1506 1500
1507 1501 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1508 1502 """Write data to a file in the wvfs, using data filters."""
1509 1503
1510 1504 def wwritedata(filename, data):
1511 1505 """Resolve data for writing to the wvfs, using data filters."""
1512 1506
1513 1507 def currenttransaction():
1514 1508 """Obtain the current transaction instance or None."""
1515 1509
1516 1510 def transaction(desc, report=None):
1517 1511 """Open a new transaction to write to the repository."""
1518 1512
1519 1513 def undofiles():
1520 1514 """Returns a list of (vfs, path) for files to undo transactions."""
1521 1515
1522 1516 def recover():
1523 1517 """Roll back an interrupted transaction."""
1524 1518
1525 1519 def rollback(dryrun=False, force=False):
1526 1520 """Undo the last transaction.
1527 1521
1528 1522 DANGEROUS.
1529 1523 """
1530 1524
1531 1525 def updatecaches(tr=None, full=False):
1532 1526 """Warm repo caches."""
1533 1527
1534 1528 def invalidatecaches():
1535 1529 """Invalidate cached data due to the repository mutating."""
1536 1530
1537 1531 def invalidatevolatilesets():
1538 1532 pass
1539 1533
1540 1534 def invalidatedirstate():
1541 1535 """Invalidate the dirstate."""
1542 1536
1543 1537 def invalidate(clearfilecache=False):
1544 1538 pass
1545 1539
1546 1540 def invalidateall():
1547 1541 pass
1548 1542
1549 1543 def lock(wait=True):
1550 1544 """Lock the repository store and return a lock instance."""
1551 1545
1552 1546 def wlock(wait=True):
1553 1547 """Lock the non-store parts of the repository."""
1554 1548
1555 1549 def currentwlock():
1556 1550 """Return the wlock if it's held or None."""
1557 1551
1558 1552 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1559 1553 pass
1560 1554
1561 1555 def commit(text='', user=None, date=None, match=None, force=False,
1562 1556 editor=False, extra=None):
1563 1557 """Add a new revision to the repository."""
1564 1558
1565 1559 def commitctx(ctx, error=False):
1566 1560 """Commit a commitctx instance to the repository."""
1567 1561
1568 1562 def destroying():
1569 1563 """Inform the repository that nodes are about to be destroyed."""
1570 1564
1571 1565 def destroyed():
1572 1566 """Inform the repository that nodes have been destroyed."""
1573 1567
1574 1568 def status(node1='.', node2=None, match=None, ignored=False,
1575 1569 clean=False, unknown=False, listsubrepos=False):
1576 1570 """Convenience method to call repo[x].status()."""
1577 1571
1578 1572 def addpostdsstatus(ps):
1579 1573 pass
1580 1574
1581 1575 def postdsstatus():
1582 1576 pass
1583 1577
1584 1578 def clearpostdsstatus():
1585 1579 pass
1586 1580
1587 1581 def heads(start=None):
1588 1582 """Obtain list of nodes that are DAG heads."""
1589 1583
1590 1584 def branchheads(branch=None, start=None, closed=False):
1591 1585 pass
1592 1586
1593 1587 def branches(nodes):
1594 1588 pass
1595 1589
1596 1590 def between(pairs):
1597 1591 pass
1598 1592
1599 1593 def checkpush(pushop):
1600 1594 pass
1601 1595
1602 1596 prepushoutgoinghooks = interfaceutil.Attribute(
1603 1597 """util.hooks instance.""")
1604 1598
1605 1599 def pushkey(namespace, key, old, new):
1606 1600 pass
1607 1601
1608 1602 def listkeys(namespace):
1609 1603 pass
1610 1604
1611 1605 def debugwireargs(one, two, three=None, four=None, five=None):
1612 1606 pass
1613 1607
1614 1608 def savecommitmessage(text):
1615 1609 pass
1616 1610
1617 1611 class completelocalrepository(ilocalrepositorymain,
1618 1612 ilocalrepositoryfilestorage):
1619 1613 """Complete interface for a local repository."""
@@ -1,741 +1,739
1 1 # simplestorerepo.py - Extension that swaps in alternate repository storage.
2 2 #
3 3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 # To use this with the test suite:
9 9 #
10 10 # $ HGREPOFEATURES="simplestore" ./run-tests.py \
11 11 # --extra-config-opt extensions.simplestore=`pwd`/simplestorerepo.py
12 12
13 13 from __future__ import absolute_import
14 14
15 15 import stat
16 16
17 17 from mercurial.i18n import _
18 18 from mercurial.node import (
19 19 bin,
20 20 hex,
21 21 nullid,
22 22 nullrev,
23 23 )
24 24 from mercurial.thirdparty import (
25 25 attr,
26 26 cbor,
27 27 )
28 28 from mercurial import (
29 29 ancestor,
30 30 bundlerepo,
31 31 error,
32 32 extensions,
33 33 localrepo,
34 34 mdiff,
35 35 pycompat,
36 36 repository,
37 37 revlog,
38 38 store,
39 39 verify,
40 40 )
41 41 from mercurial.utils import (
42 42 interfaceutil,
43 43 )
44 44
45 45 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
46 46 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
47 47 # be specifying the version(s) of Mercurial they are tested with, or
48 48 # leave the attribute unspecified.
49 49 testedwith = 'ships-with-hg-core'
50 50
51 51 REQUIREMENT = 'testonly-simplestore'
52 52
53 53 def validatenode(node):
54 54 if isinstance(node, int):
55 55 raise ValueError('expected node; got int')
56 56
57 57 if len(node) != 20:
58 58 raise ValueError('expected 20 byte node')
59 59
60 60 def validaterev(rev):
61 61 if not isinstance(rev, int):
62 62 raise ValueError('expected int')
63 63
64 64 class simplestoreerror(error.StorageError):
65 65 pass
66 66
67 67 @interfaceutil.implementer(repository.irevisiondelta)
68 68 @attr.s(slots=True, frozen=True)
69 69 class simplestorerevisiondelta(object):
70 70 node = attr.ib()
71 71 p1node = attr.ib()
72 72 p2node = attr.ib()
73 73 basenode = attr.ib()
74 74 linknode = attr.ib()
75 75 flags = attr.ib()
76 76 baserevisionsize = attr.ib()
77 77 revision = attr.ib()
78 78 delta = attr.ib()
79 79
80 80 @interfaceutil.implementer(repository.ifilestorage)
81 81 class filestorage(object):
82 82 """Implements storage for a tracked path.
83 83
84 84 Data is stored in the VFS in a directory corresponding to the tracked
85 85 path.
86 86
87 87 Index data is stored in an ``index`` file using CBOR.
88 88
89 89 Fulltext data is stored in files having names of the node.
90 90 """
91 91
92 92 def __init__(self, svfs, path):
93 93 self._svfs = svfs
94 94 self._path = path
95 95
96 96 self._storepath = b'/'.join([b'data', path])
97 97 self._indexpath = b'/'.join([self._storepath, b'index'])
98 98
99 99 indexdata = self._svfs.tryread(self._indexpath)
100 100 if indexdata:
101 101 indexdata = cbor.loads(indexdata)
102 102
103 103 self._indexdata = indexdata or []
104 104 self._indexbynode = {}
105 105 self._indexbyrev = {}
106 106 self.index = []
107 107 self._refreshindex()
108 108
109 109 # This is used by changegroup code :/
110 110 self._generaldelta = True
111 111
112 self.version = 1
113
114 112 def _refreshindex(self):
115 113 self._indexbynode.clear()
116 114 self._indexbyrev.clear()
117 115 self.index = []
118 116
119 117 for i, entry in enumerate(self._indexdata):
120 118 self._indexbynode[entry[b'node']] = entry
121 119 self._indexbyrev[i] = entry
122 120
123 121 self._indexbynode[nullid] = {
124 122 b'node': nullid,
125 123 b'p1': nullid,
126 124 b'p2': nullid,
127 125 b'linkrev': nullrev,
128 126 b'flags': 0,
129 127 }
130 128
131 129 self._indexbyrev[nullrev] = {
132 130 b'node': nullid,
133 131 b'p1': nullid,
134 132 b'p2': nullid,
135 133 b'linkrev': nullrev,
136 134 b'flags': 0,
137 135 }
138 136
139 137 for i, entry in enumerate(self._indexdata):
140 138 p1rev, p2rev = self.parentrevs(self.rev(entry[b'node']))
141 139
142 140 # start, length, rawsize, chainbase, linkrev, p1, p2, node
143 141 self.index.append((0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev,
144 142 entry[b'node']))
145 143
146 144 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
147 145
148 146 def __len__(self):
149 147 return len(self._indexdata)
150 148
151 149 def __iter__(self):
152 150 return iter(range(len(self)))
153 151
154 152 def revs(self, start=0, stop=None):
155 153 step = 1
156 154 if stop is not None:
157 155 if start > stop:
158 156 step = -1
159 157
160 158 stop += step
161 159 else:
162 160 stop = len(self)
163 161
164 162 return range(start, stop, step)
165 163
166 164 def parents(self, node):
167 165 validatenode(node)
168 166
169 167 if node not in self._indexbynode:
170 168 raise KeyError('unknown node')
171 169
172 170 entry = self._indexbynode[node]
173 171
174 172 return entry[b'p1'], entry[b'p2']
175 173
176 174 def parentrevs(self, rev):
177 175 p1, p2 = self.parents(self._indexbyrev[rev][b'node'])
178 176 return self.rev(p1), self.rev(p2)
179 177
180 178 def rev(self, node):
181 179 validatenode(node)
182 180
183 181 try:
184 182 self._indexbynode[node]
185 183 except KeyError:
186 184 raise error.LookupError(node, self._indexpath, _('no node'))
187 185
188 186 for rev, entry in self._indexbyrev.items():
189 187 if entry[b'node'] == node:
190 188 return rev
191 189
192 190 raise error.ProgrammingError('this should not occur')
193 191
194 192 def node(self, rev):
195 193 validaterev(rev)
196 194
197 195 return self._indexbyrev[rev][b'node']
198 196
199 197 def lookup(self, node):
200 198 if isinstance(node, int):
201 199 return self.node(node)
202 200
203 201 if len(node) == 20:
204 202 self.rev(node)
205 203 return node
206 204
207 205 try:
208 206 rev = int(node)
209 207 if '%d' % rev != node:
210 208 raise ValueError
211 209
212 210 if rev < 0:
213 211 rev = len(self) + rev
214 212 if rev < 0 or rev >= len(self):
215 213 raise ValueError
216 214
217 215 return self.node(rev)
218 216 except (ValueError, OverflowError):
219 217 pass
220 218
221 219 if len(node) == 40:
222 220 try:
223 221 rawnode = bin(node)
224 222 self.rev(rawnode)
225 223 return rawnode
226 224 except TypeError:
227 225 pass
228 226
229 227 raise error.LookupError(node, self._path, _('invalid lookup input'))
230 228
231 229 def linkrev(self, rev):
232 230 validaterev(rev)
233 231
234 232 return self._indexbyrev[rev][b'linkrev']
235 233
236 234 def flags(self, rev):
237 235 validaterev(rev)
238 236
239 237 return self._indexbyrev[rev][b'flags']
240 238
241 239 def deltaparent(self, rev):
242 240 validaterev(rev)
243 241
244 242 p1node = self.parents(self.node(rev))[0]
245 243 return self.rev(p1node)
246 244
247 245 def _candelta(self, baserev, rev):
248 246 validaterev(baserev)
249 247 validaterev(rev)
250 248
251 249 if ((self.flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)
252 250 or (self.flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)):
253 251 return False
254 252
255 253 return True
256 254
257 255 def rawsize(self, rev):
258 256 validaterev(rev)
259 257 node = self.node(rev)
260 258 return len(self.revision(node, raw=True))
261 259
262 260 def _processflags(self, text, flags, operation, raw=False):
263 261 if flags == 0:
264 262 return text, True
265 263
266 264 if flags & ~revlog.REVIDX_KNOWN_FLAGS:
267 265 raise simplestoreerror(_("incompatible revision flag '%#x'") %
268 266 (flags & ~revlog.REVIDX_KNOWN_FLAGS))
269 267
270 268 validatehash = True
271 269 # Depending on the operation (read or write), the order might be
272 270 # reversed due to non-commutative transforms.
273 271 orderedflags = revlog.REVIDX_FLAGS_ORDER
274 272 if operation == 'write':
275 273 orderedflags = reversed(orderedflags)
276 274
277 275 for flag in orderedflags:
278 276 # If a flagprocessor has been registered for a known flag, apply the
279 277 # related operation transform and update result tuple.
280 278 if flag & flags:
281 279 vhash = True
282 280
283 281 if flag not in revlog._flagprocessors:
284 282 message = _("missing processor for flag '%#x'") % (flag)
285 283 raise simplestoreerror(message)
286 284
287 285 processor = revlog._flagprocessors[flag]
288 286 if processor is not None:
289 287 readtransform, writetransform, rawtransform = processor
290 288
291 289 if raw:
292 290 vhash = rawtransform(self, text)
293 291 elif operation == 'read':
294 292 text, vhash = readtransform(self, text)
295 293 else: # write operation
296 294 text, vhash = writetransform(self, text)
297 295 validatehash = validatehash and vhash
298 296
299 297 return text, validatehash
300 298
301 299 def checkhash(self, text, node, p1=None, p2=None, rev=None):
302 300 if p1 is None and p2 is None:
303 301 p1, p2 = self.parents(node)
304 302 if node != revlog.hash(text, p1, p2):
305 303 raise simplestoreerror(_("integrity check failed on %s") %
306 304 self._path)
307 305
308 306 def revision(self, node, raw=False):
309 307 validatenode(node)
310 308
311 309 if node == nullid:
312 310 return b''
313 311
314 312 rev = self.rev(node)
315 313 flags = self.flags(rev)
316 314
317 315 path = b'/'.join([self._storepath, hex(node)])
318 316 rawtext = self._svfs.read(path)
319 317
320 318 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
321 319 if validatehash:
322 320 self.checkhash(text, node, rev=rev)
323 321
324 322 return text
325 323
326 324 def read(self, node):
327 325 validatenode(node)
328 326
329 327 revision = self.revision(node)
330 328
331 329 if not revision.startswith(b'\1\n'):
332 330 return revision
333 331
334 332 start = revision.index(b'\1\n', 2)
335 333 return revision[start + 2:]
336 334
337 335 def renamed(self, node):
338 336 validatenode(node)
339 337
340 338 if self.parents(node)[0] != nullid:
341 339 return False
342 340
343 341 fulltext = self.revision(node)
344 342 m = revlog.parsemeta(fulltext)[0]
345 343
346 344 if m and 'copy' in m:
347 345 return m['copy'], bin(m['copyrev'])
348 346
349 347 return False
350 348
351 349 def cmp(self, node, text):
352 350 validatenode(node)
353 351
354 352 t = text
355 353
356 354 if text.startswith(b'\1\n'):
357 355 t = b'\1\n\1\n' + text
358 356
359 357 p1, p2 = self.parents(node)
360 358
361 359 if revlog.hash(t, p1, p2) == node:
362 360 return False
363 361
364 362 if self.iscensored(self.rev(node)):
365 363 return text != b''
366 364
367 365 if self.renamed(node):
368 366 t2 = self.read(node)
369 367 return t2 != text
370 368
371 369 return True
372 370
373 371 def size(self, rev):
374 372 validaterev(rev)
375 373
376 374 node = self._indexbyrev[rev][b'node']
377 375
378 376 if self.renamed(node):
379 377 return len(self.read(node))
380 378
381 379 if self.iscensored(rev):
382 380 return 0
383 381
384 382 return len(self.revision(node))
385 383
386 384 def iscensored(self, rev):
387 385 validaterev(rev)
388 386
389 387 return self.flags(rev) & revlog.REVIDX_ISCENSORED
390 388
391 389 def commonancestorsheads(self, a, b):
392 390 validatenode(a)
393 391 validatenode(b)
394 392
395 393 a = self.rev(a)
396 394 b = self.rev(b)
397 395
398 396 ancestors = ancestor.commonancestorsheads(self.parentrevs, a, b)
399 397 return pycompat.maplist(self.node, ancestors)
400 398
401 399 def descendants(self, revs):
402 400 # This is a copy of revlog.descendants()
403 401 first = min(revs)
404 402 if first == nullrev:
405 403 for i in self:
406 404 yield i
407 405 return
408 406
409 407 seen = set(revs)
410 408 for i in self.revs(start=first + 1):
411 409 for x in self.parentrevs(i):
412 410 if x != nullrev and x in seen:
413 411 seen.add(i)
414 412 yield i
415 413 break
416 414
417 415 # Required by verify.
418 416 def files(self):
419 417 entries = self._svfs.listdir(self._storepath)
420 418
421 419 # Strip out undo.backup.* files created as part of transaction
422 420 # recording.
423 421 entries = [f for f in entries if not f.startswith('undo.backup.')]
424 422
425 423 return [b'/'.join((self._storepath, f)) for f in entries]
426 424
427 425 # Required by verify.
428 426 def checksize(self):
429 427 return 0, 0
430 428
431 429 def add(self, text, meta, transaction, linkrev, p1, p2):
432 430 if meta or text.startswith(b'\1\n'):
433 431 text = revlog.packmeta(meta, text)
434 432
435 433 return self.addrevision(text, transaction, linkrev, p1, p2)
436 434
437 435 def addrevision(self, text, transaction, linkrev, p1, p2, node=None,
438 436 flags=revlog.REVIDX_DEFAULT_FLAGS, cachedelta=None):
439 437 validatenode(p1)
440 438 validatenode(p2)
441 439
442 440 if flags:
443 441 node = node or revlog.hash(text, p1, p2)
444 442
445 443 rawtext, validatehash = self._processflags(text, flags, 'write')
446 444
447 445 node = node or revlog.hash(text, p1, p2)
448 446
449 447 if node in self._indexbynode:
450 448 return node
451 449
452 450 if validatehash:
453 451 self.checkhash(rawtext, node, p1=p1, p2=p2)
454 452
455 453 return self._addrawrevision(node, rawtext, transaction, linkrev, p1, p2,
456 454 flags)
457 455
458 456 def _addrawrevision(self, node, rawtext, transaction, link, p1, p2, flags):
459 457 transaction.addbackup(self._indexpath)
460 458
461 459 path = b'/'.join([self._storepath, hex(node)])
462 460
463 461 self._svfs.write(path, rawtext)
464 462
465 463 self._indexdata.append({
466 464 b'node': node,
467 465 b'p1': p1,
468 466 b'p2': p2,
469 467 b'linkrev': link,
470 468 b'flags': flags,
471 469 })
472 470
473 471 self._reflectindexupdate()
474 472
475 473 return node
476 474
477 475 def _reflectindexupdate(self):
478 476 self._refreshindex()
479 477 self._svfs.write(self._indexpath, cbor.dumps(self._indexdata))
480 478
481 479 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
482 480 nodes = []
483 481
484 482 transaction.addbackup(self._indexpath)
485 483
486 484 for node, p1, p2, linknode, deltabase, delta, flags in deltas:
487 485 linkrev = linkmapper(linknode)
488 486 flags = flags or revlog.REVIDX_DEFAULT_FLAGS
489 487
490 488 nodes.append(node)
491 489
492 490 if node in self._indexbynode:
493 491 continue
494 492
495 493 # Need to resolve the fulltext from the delta base.
496 494 if deltabase == nullid:
497 495 text = mdiff.patch(b'', delta)
498 496 else:
499 497 text = mdiff.patch(self.revision(deltabase), delta)
500 498
501 499 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
502 500 flags)
503 501
504 502 if addrevisioncb:
505 503 addrevisioncb(self, node)
506 504
507 505 return nodes
508 506
509 507 def revdiff(self, rev1, rev2):
510 508 validaterev(rev1)
511 509 validaterev(rev2)
512 510
513 511 node1 = self.node(rev1)
514 512 node2 = self.node(rev2)
515 513
516 514 return mdiff.textdiff(self.revision(node1, raw=True),
517 515 self.revision(node2, raw=True))
518 516
519 517 def emitrevisiondeltas(self, requests):
520 518 for request in requests:
521 519 node = request.node
522 520 rev = self.rev(node)
523 521
524 522 if request.basenode == nullid:
525 523 baserev = nullrev
526 524 elif request.basenode is not None:
527 525 baserev = self.rev(request.basenode)
528 526 else:
529 527 # This is a test extension and we can do simple things
530 528 # for choosing a delta parent.
531 529 baserev = self.deltaparent(rev)
532 530
533 531 if baserev != nullrev and not self._candelta(baserev, rev):
534 532 baserev = nullrev
535 533
536 534 revision = None
537 535 delta = None
538 536 baserevisionsize = None
539 537
540 538 if self.iscensored(baserev) or self.iscensored(rev):
541 539 try:
542 540 revision = self.revision(node, raw=True)
543 541 except error.CensoredNodeError as e:
544 542 revision = e.tombstone
545 543
546 544 if baserev != nullrev:
547 545 baserevisionsize = self.rawsize(baserev)
548 546
549 547 elif baserev == nullrev:
550 548 revision = self.revision(node, raw=True)
551 549 else:
552 550 delta = self.revdiff(baserev, rev)
553 551
554 552 extraflags = revlog.REVIDX_ELLIPSIS if request.ellipsis else 0
555 553
556 554 yield simplestorerevisiondelta(
557 555 node=node,
558 556 p1node=request.p1node,
559 557 p2node=request.p2node,
560 558 linknode=request.linknode,
561 559 basenode=self.node(baserev),
562 560 flags=self.flags(rev) | extraflags,
563 561 baserevisionsize=baserevisionsize,
564 562 revision=revision,
565 563 delta=delta)
566 564
567 565 def heads(self, start=None, stop=None):
568 566 # This is copied from revlog.py.
569 567 if start is None and stop is None:
570 568 if not len(self):
571 569 return [nullid]
572 570 return [self.node(r) for r in self.headrevs()]
573 571
574 572 if start is None:
575 573 start = nullid
576 574 if stop is None:
577 575 stop = []
578 576 stoprevs = set([self.rev(n) for n in stop])
579 577 startrev = self.rev(start)
580 578 reachable = {startrev}
581 579 heads = {startrev}
582 580
583 581 parentrevs = self.parentrevs
584 582 for r in self.revs(start=startrev + 1):
585 583 for p in parentrevs(r):
586 584 if p in reachable:
587 585 if r not in stoprevs:
588 586 reachable.add(r)
589 587 heads.add(r)
590 588 if p in heads and p not in stoprevs:
591 589 heads.remove(p)
592 590
593 591 return [self.node(r) for r in heads]
594 592
595 593 def children(self, node):
596 594 validatenode(node)
597 595
598 596 # This is a copy of revlog.children().
599 597 c = []
600 598 p = self.rev(node)
601 599 for r in self.revs(start=p + 1):
602 600 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
603 601 if prevs:
604 602 for pr in prevs:
605 603 if pr == p:
606 604 c.append(self.node(r))
607 605 elif p == nullrev:
608 606 c.append(self.node(r))
609 607 return c
610 608
611 609 def getstrippoint(self, minlink):
612 610
613 611 # This is largely a copy of revlog.getstrippoint().
614 612 brokenrevs = set()
615 613 strippoint = len(self)
616 614
617 615 heads = {}
618 616 futurelargelinkrevs = set()
619 617 for head in self.heads():
620 618 headlinkrev = self.linkrev(self.rev(head))
621 619 heads[head] = headlinkrev
622 620 if headlinkrev >= minlink:
623 621 futurelargelinkrevs.add(headlinkrev)
624 622
625 623 # This algorithm involves walking down the rev graph, starting at the
626 624 # heads. Since the revs are topologically sorted according to linkrev,
627 625 # once all head linkrevs are below the minlink, we know there are
628 626 # no more revs that could have a linkrev greater than minlink.
629 627 # So we can stop walking.
630 628 while futurelargelinkrevs:
631 629 strippoint -= 1
632 630 linkrev = heads.pop(strippoint)
633 631
634 632 if linkrev < minlink:
635 633 brokenrevs.add(strippoint)
636 634 else:
637 635 futurelargelinkrevs.remove(linkrev)
638 636
639 637 for p in self.parentrevs(strippoint):
640 638 if p != nullrev:
641 639 plinkrev = self.linkrev(p)
642 640 heads[p] = plinkrev
643 641 if plinkrev >= minlink:
644 642 futurelargelinkrevs.add(plinkrev)
645 643
646 644 return strippoint, brokenrevs
647 645
648 646 def strip(self, minlink, transaction):
649 647 if not len(self):
650 648 return
651 649
652 650 rev, _ignored = self.getstrippoint(minlink)
653 651 if rev == len(self):
654 652 return
655 653
656 654 # Purge index data starting at the requested revision.
657 655 self._indexdata[rev:] = []
658 656 self._reflectindexupdate()
659 657
660 658 def issimplestorefile(f, kind, st):
661 659 if kind != stat.S_IFREG:
662 660 return False
663 661
664 662 if store.isrevlog(f, kind, st):
665 663 return False
666 664
667 665 # Ignore transaction undo files.
668 666 if f.startswith('undo.'):
669 667 return False
670 668
671 669 # Otherwise assume it belongs to the simple store.
672 670 return True
673 671
674 672 class simplestore(store.encodedstore):
675 673 def datafiles(self):
676 674 for x in super(simplestore, self).datafiles():
677 675 yield x
678 676
679 677 # Supplement with non-revlog files.
680 678 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
681 679
682 680 for unencoded, encoded, size in extrafiles:
683 681 try:
684 682 unencoded = store.decodefilename(unencoded)
685 683 except KeyError:
686 684 unencoded = None
687 685
688 686 yield unencoded, encoded, size
689 687
690 688 def reposetup(ui, repo):
691 689 if not repo.local():
692 690 return
693 691
694 692 if isinstance(repo, bundlerepo.bundlerepository):
695 693 raise error.Abort(_('cannot use simple store with bundlerepo'))
696 694
697 695 class simplestorerepo(repo.__class__):
698 696 def file(self, f):
699 697 return filestorage(self.svfs, f)
700 698
701 699 repo.__class__ = simplestorerepo
702 700
703 701 def featuresetup(ui, supported):
704 702 supported.add(REQUIREMENT)
705 703
706 704 def newreporequirements(orig, ui):
707 705 """Modifies default requirements for new repos to use the simple store."""
708 706 requirements = orig(ui)
709 707
710 708 # These requirements are only used to affect creation of the store
711 709 # object. We have our own store. So we can remove them.
712 710 # TODO do this once we feel like taking the test hit.
713 711 #if 'fncache' in requirements:
714 712 # requirements.remove('fncache')
715 713 #if 'dotencode' in requirements:
716 714 # requirements.remove('dotencode')
717 715
718 716 requirements.add(REQUIREMENT)
719 717
720 718 return requirements
721 719
722 720 def makestore(orig, requirements, path, vfstype):
723 721 if REQUIREMENT not in requirements:
724 722 return orig(requirements, path, vfstype)
725 723
726 724 return simplestore(path, vfstype)
727 725
728 726 def verifierinit(orig, self, *args, **kwargs):
729 727 orig(self, *args, **kwargs)
730 728
731 729 # We don't care that files in the store don't align with what is
732 730 # advertised. So suppress these warnings.
733 731 self.warnorphanstorefiles = False
734 732
735 733 def extsetup(ui):
736 734 localrepo.featuresetupfuncs.add(featuresetup)
737 735
738 736 extensions.wrapfunction(localrepo, 'newreporequirements',
739 737 newreporequirements)
740 738 extensions.wrapfunction(store, 'store', makestore)
741 739 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
General Comments 0
You need to be logged in to leave comments. Login now