##// END OF EJS Templates
repository: remove candelta() from ifileindex...
Gregory Szorc -
r39270:3682b49e default
parent child Browse files
Show More
@@ -1,271 +1,268
1 1 # filelog.py - file history class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from . import (
11 11 error,
12 12 repository,
13 13 revlog,
14 14 )
15 15 from .utils import (
16 16 interfaceutil,
17 17 )
18 18
19 19 @interfaceutil.implementer(repository.ifilestorage)
20 20 class filelog(object):
21 21 def __init__(self, opener, path):
22 22 self._revlog = revlog.revlog(opener,
23 23 '/'.join(('data', path + '.i')),
24 24 censorable=True)
25 25 # full name of the user visible file, relative to the repository root
26 26 self.filename = path
27 27 self.index = self._revlog.index
28 28 self.version = self._revlog.version
29 29 self._generaldelta = self._revlog._generaldelta
30 30
31 31 def __len__(self):
32 32 return len(self._revlog)
33 33
34 34 def __iter__(self):
35 35 return self._revlog.__iter__()
36 36
37 37 def revs(self, start=0, stop=None):
38 38 return self._revlog.revs(start=start, stop=stop)
39 39
40 40 def parents(self, node):
41 41 return self._revlog.parents(node)
42 42
43 43 def parentrevs(self, rev):
44 44 return self._revlog.parentrevs(rev)
45 45
46 46 def rev(self, node):
47 47 return self._revlog.rev(node)
48 48
49 49 def node(self, rev):
50 50 return self._revlog.node(rev)
51 51
52 52 def lookup(self, node):
53 53 return self._revlog.lookup(node)
54 54
55 55 def linkrev(self, rev):
56 56 return self._revlog.linkrev(rev)
57 57
58 58 def flags(self, rev):
59 59 return self._revlog.flags(rev)
60 60
61 61 def commonancestorsheads(self, node1, node2):
62 62 return self._revlog.commonancestorsheads(node1, node2)
63 63
64 64 def descendants(self, revs):
65 65 return self._revlog.descendants(revs)
66 66
67 67 def headrevs(self):
68 68 return self._revlog.headrevs()
69 69
70 70 def heads(self, start=None, stop=None):
71 71 return self._revlog.heads(start, stop)
72 72
73 73 def children(self, node):
74 74 return self._revlog.children(node)
75 75
76 76 def deltaparent(self, rev):
77 77 return self._revlog.deltaparent(rev)
78 78
79 def candelta(self, baserev, rev):
80 return self._revlog.candelta(baserev, rev)
81
82 79 def iscensored(self, rev):
83 80 return self._revlog.iscensored(rev)
84 81
85 82 def rawsize(self, rev):
86 83 return self._revlog.rawsize(rev)
87 84
88 85 def checkhash(self, text, node, p1=None, p2=None, rev=None):
89 86 return self._revlog.checkhash(text, node, p1=p1, p2=p2, rev=rev)
90 87
91 88 def revision(self, node, _df=None, raw=False):
92 89 return self._revlog.revision(node, _df=_df, raw=raw)
93 90
94 91 def revdiff(self, rev1, rev2):
95 92 return self._revlog.revdiff(rev1, rev2)
96 93
97 94 def emitrevisiondeltas(self, requests):
98 95 return self._revlog.emitrevisiondeltas(requests)
99 96
100 97 def addrevision(self, revisiondata, transaction, linkrev, p1, p2,
101 98 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS,
102 99 cachedelta=None):
103 100 return self._revlog.addrevision(revisiondata, transaction, linkrev,
104 101 p1, p2, node=node, flags=flags,
105 102 cachedelta=cachedelta)
106 103
107 104 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
108 105 return self._revlog.addgroup(deltas, linkmapper, transaction,
109 106 addrevisioncb=addrevisioncb)
110 107
111 108 def getstrippoint(self, minlink):
112 109 return self._revlog.getstrippoint(minlink)
113 110
114 111 def strip(self, minlink, transaction):
115 112 return self._revlog.strip(minlink, transaction)
116 113
117 114 def files(self):
118 115 return self._revlog.files()
119 116
120 117 def checksize(self):
121 118 return self._revlog.checksize()
122 119
123 120 def read(self, node):
124 121 t = self.revision(node)
125 122 if not t.startswith('\1\n'):
126 123 return t
127 124 s = t.index('\1\n', 2)
128 125 return t[s + 2:]
129 126
130 127 def add(self, text, meta, transaction, link, p1=None, p2=None):
131 128 if meta or text.startswith('\1\n'):
132 129 text = revlog.packmeta(meta, text)
133 130 return self.addrevision(text, transaction, link, p1, p2)
134 131
135 132 def renamed(self, node):
136 133 if self.parents(node)[0] != revlog.nullid:
137 134 return False
138 135 t = self.revision(node)
139 136 m = revlog.parsemeta(t)[0]
140 137 # copy and copyrev occur in pairs. In rare cases due to bugs,
141 138 # one can occur without the other.
142 139 if m and "copy" in m and "copyrev" in m:
143 140 return (m["copy"], revlog.bin(m["copyrev"]))
144 141 return False
145 142
146 143 def size(self, rev):
147 144 """return the size of a given revision"""
148 145
149 146 # for revisions with renames, we have to go the slow way
150 147 node = self.node(rev)
151 148 if self.renamed(node):
152 149 return len(self.read(node))
153 150 if self.iscensored(rev):
154 151 return 0
155 152
156 153 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
157 154 return self._revlog.size(rev)
158 155
159 156 def cmp(self, node, text):
160 157 """compare text with a given file revision
161 158
162 159 returns True if text is different than what is stored.
163 160 """
164 161
165 162 t = text
166 163 if text.startswith('\1\n'):
167 164 t = '\1\n\1\n' + text
168 165
169 166 samehashes = not self._revlog.cmp(node, t)
170 167 if samehashes:
171 168 return False
172 169
173 170 # censored files compare against the empty file
174 171 if self.iscensored(self.rev(node)):
175 172 return text != ''
176 173
177 174 # renaming a file produces a different hash, even if the data
178 175 # remains unchanged. Check if it's the case (slow):
179 176 if self.renamed(node):
180 177 t2 = self.read(node)
181 178 return t2 != text
182 179
183 180 return True
184 181
185 182 @property
186 183 def filename(self):
187 184 return self._revlog.filename
188 185
189 186 @filename.setter
190 187 def filename(self, value):
191 188 self._revlog.filename = value
192 189
193 190 # TODO these aren't part of the interface and aren't internal methods.
194 191 # Callers should be fixed to not use them.
195 192 @property
196 193 def indexfile(self):
197 194 return self._revlog.indexfile
198 195
199 196 @indexfile.setter
200 197 def indexfile(self, value):
201 198 self._revlog.indexfile = value
202 199
203 200 @property
204 201 def datafile(self):
205 202 return self._revlog.datafile
206 203
207 204 @property
208 205 def opener(self):
209 206 return self._revlog.opener
210 207
211 208 @property
212 209 def _lazydeltabase(self):
213 210 return self._revlog._lazydeltabase
214 211
215 212 @_lazydeltabase.setter
216 213 def _lazydeltabase(self, value):
217 214 self._revlog._lazydeltabase = value
218 215
219 216 @property
220 217 def _deltabothparents(self):
221 218 return self._revlog._deltabothparents
222 219
223 220 @_deltabothparents.setter
224 221 def _deltabothparents(self, value):
225 222 self._revlog._deltabothparents = value
226 223
227 224 @property
228 225 def _inline(self):
229 226 return self._revlog._inline
230 227
231 228 @property
232 229 def _withsparseread(self):
233 230 return getattr(self._revlog, '_withsparseread', False)
234 231
235 232 @property
236 233 def _srmingapsize(self):
237 234 return self._revlog._srmingapsize
238 235
239 236 @property
240 237 def _srdensitythreshold(self):
241 238 return self._revlog._srdensitythreshold
242 239
243 240 def _deltachain(self, rev, stoprev=None):
244 241 return self._revlog._deltachain(rev, stoprev)
245 242
246 243 def chainbase(self, rev):
247 244 return self._revlog.chainbase(rev)
248 245
249 246 def chainlen(self, rev):
250 247 return self._revlog.chainlen(rev)
251 248
252 249 def clone(self, tr, destrevlog, **kwargs):
253 250 if not isinstance(destrevlog, filelog):
254 251 raise error.ProgrammingError('expected filelog to clone()')
255 252
256 253 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
257 254
258 255 def start(self, rev):
259 256 return self._revlog.start(rev)
260 257
261 258 def end(self, rev):
262 259 return self._revlog.end(rev)
263 260
264 261 def length(self, rev):
265 262 return self._revlog.length(rev)
266 263
267 264 def compress(self, data):
268 265 return self._revlog.compress(data)
269 266
270 267 def _addrevision(self, *args, **kwargs):
271 268 return self._revlog._addrevision(*args, **kwargs)
@@ -1,1403 +1,1400
1 1 # repository.py - Interfaces and base classes for repositories and peers.
2 2 #
3 3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from .i18n import _
11 11 from . import (
12 12 error,
13 13 )
14 14 from .utils import (
15 15 interfaceutil,
16 16 )
17 17
18 18 # When narrowing is finalized and no longer subject to format changes,
19 19 # we should move this to just "narrow" or similar.
20 20 NARROW_REQUIREMENT = 'narrowhg-experimental'
21 21
22 22 class ipeerconnection(interfaceutil.Interface):
23 23 """Represents a "connection" to a repository.
24 24
25 25 This is the base interface for representing a connection to a repository.
26 26 It holds basic properties and methods applicable to all peer types.
27 27
28 28 This is not a complete interface definition and should not be used
29 29 outside of this module.
30 30 """
31 31 ui = interfaceutil.Attribute("""ui.ui instance""")
32 32
33 33 def url():
34 34 """Returns a URL string representing this peer.
35 35
36 36 Currently, implementations expose the raw URL used to construct the
37 37 instance. It may contain credentials as part of the URL. The
38 38 expectations of the value aren't well-defined and this could lead to
39 39 data leakage.
40 40
41 41 TODO audit/clean consumers and more clearly define the contents of this
42 42 value.
43 43 """
44 44
45 45 def local():
46 46 """Returns a local repository instance.
47 47
48 48 If the peer represents a local repository, returns an object that
49 49 can be used to interface with it. Otherwise returns ``None``.
50 50 """
51 51
52 52 def peer():
53 53 """Returns an object conforming to this interface.
54 54
55 55 Most implementations will ``return self``.
56 56 """
57 57
58 58 def canpush():
59 59 """Returns a boolean indicating if this peer can be pushed to."""
60 60
61 61 def close():
62 62 """Close the connection to this peer.
63 63
64 64 This is called when the peer will no longer be used. Resources
65 65 associated with the peer should be cleaned up.
66 66 """
67 67
68 68 class ipeercapabilities(interfaceutil.Interface):
69 69 """Peer sub-interface related to capabilities."""
70 70
71 71 def capable(name):
72 72 """Determine support for a named capability.
73 73
74 74 Returns ``False`` if capability not supported.
75 75
76 76 Returns ``True`` if boolean capability is supported. Returns a string
77 77 if capability support is non-boolean.
78 78
79 79 Capability strings may or may not map to wire protocol capabilities.
80 80 """
81 81
82 82 def requirecap(name, purpose):
83 83 """Require a capability to be present.
84 84
85 85 Raises a ``CapabilityError`` if the capability isn't present.
86 86 """
87 87
88 88 class ipeercommands(interfaceutil.Interface):
89 89 """Client-side interface for communicating over the wire protocol.
90 90
91 91 This interface is used as a gateway to the Mercurial wire protocol.
92 92 methods commonly call wire protocol commands of the same name.
93 93 """
94 94
95 95 def branchmap():
96 96 """Obtain heads in named branches.
97 97
98 98 Returns a dict mapping branch name to an iterable of nodes that are
99 99 heads on that branch.
100 100 """
101 101
102 102 def capabilities():
103 103 """Obtain capabilities of the peer.
104 104
105 105 Returns a set of string capabilities.
106 106 """
107 107
108 108 def clonebundles():
109 109 """Obtains the clone bundles manifest for the repo.
110 110
111 111 Returns the manifest as unparsed bytes.
112 112 """
113 113
114 114 def debugwireargs(one, two, three=None, four=None, five=None):
115 115 """Used to facilitate debugging of arguments passed over the wire."""
116 116
117 117 def getbundle(source, **kwargs):
118 118 """Obtain remote repository data as a bundle.
119 119
120 120 This command is how the bulk of repository data is transferred from
121 121 the peer to the local repository
122 122
123 123 Returns a generator of bundle data.
124 124 """
125 125
126 126 def heads():
127 127 """Determine all known head revisions in the peer.
128 128
129 129 Returns an iterable of binary nodes.
130 130 """
131 131
132 132 def known(nodes):
133 133 """Determine whether multiple nodes are known.
134 134
135 135 Accepts an iterable of nodes whose presence to check for.
136 136
137 137 Returns an iterable of booleans indicating of the corresponding node
138 138 at that index is known to the peer.
139 139 """
140 140
141 141 def listkeys(namespace):
142 142 """Obtain all keys in a pushkey namespace.
143 143
144 144 Returns an iterable of key names.
145 145 """
146 146
147 147 def lookup(key):
148 148 """Resolve a value to a known revision.
149 149
150 150 Returns a binary node of the resolved revision on success.
151 151 """
152 152
153 153 def pushkey(namespace, key, old, new):
154 154 """Set a value using the ``pushkey`` protocol.
155 155
156 156 Arguments correspond to the pushkey namespace and key to operate on and
157 157 the old and new values for that key.
158 158
159 159 Returns a string with the peer result. The value inside varies by the
160 160 namespace.
161 161 """
162 162
163 163 def stream_out():
164 164 """Obtain streaming clone data.
165 165
166 166 Successful result should be a generator of data chunks.
167 167 """
168 168
169 169 def unbundle(bundle, heads, url):
170 170 """Transfer repository data to the peer.
171 171
172 172 This is how the bulk of data during a push is transferred.
173 173
174 174 Returns the integer number of heads added to the peer.
175 175 """
176 176
177 177 class ipeerlegacycommands(interfaceutil.Interface):
178 178 """Interface for implementing support for legacy wire protocol commands.
179 179
180 180 Wire protocol commands transition to legacy status when they are no longer
181 181 used by modern clients. To facilitate identifying which commands are
182 182 legacy, the interfaces are split.
183 183 """
184 184
185 185 def between(pairs):
186 186 """Obtain nodes between pairs of nodes.
187 187
188 188 ``pairs`` is an iterable of node pairs.
189 189
190 190 Returns an iterable of iterables of nodes corresponding to each
191 191 requested pair.
192 192 """
193 193
194 194 def branches(nodes):
195 195 """Obtain ancestor changesets of specific nodes back to a branch point.
196 196
197 197 For each requested node, the peer finds the first ancestor node that is
198 198 a DAG root or is a merge.
199 199
200 200 Returns an iterable of iterables with the resolved values for each node.
201 201 """
202 202
203 203 def changegroup(nodes, source):
204 204 """Obtain a changegroup with data for descendants of specified nodes."""
205 205
206 206 def changegroupsubset(bases, heads, source):
207 207 pass
208 208
209 209 class ipeercommandexecutor(interfaceutil.Interface):
210 210 """Represents a mechanism to execute remote commands.
211 211
212 212 This is the primary interface for requesting that wire protocol commands
213 213 be executed. Instances of this interface are active in a context manager
214 214 and have a well-defined lifetime. When the context manager exits, all
215 215 outstanding requests are waited on.
216 216 """
217 217
218 218 def callcommand(name, args):
219 219 """Request that a named command be executed.
220 220
221 221 Receives the command name and a dictionary of command arguments.
222 222
223 223 Returns a ``concurrent.futures.Future`` that will resolve to the
224 224 result of that command request. That exact value is left up to
225 225 the implementation and possibly varies by command.
226 226
227 227 Not all commands can coexist with other commands in an executor
228 228 instance: it depends on the underlying wire protocol transport being
229 229 used and the command itself.
230 230
231 231 Implementations MAY call ``sendcommands()`` automatically if the
232 232 requested command can not coexist with other commands in this executor.
233 233
234 234 Implementations MAY call ``sendcommands()`` automatically when the
235 235 future's ``result()`` is called. So, consumers using multiple
236 236 commands with an executor MUST ensure that ``result()`` is not called
237 237 until all command requests have been issued.
238 238 """
239 239
240 240 def sendcommands():
241 241 """Trigger submission of queued command requests.
242 242
243 243 Not all transports submit commands as soon as they are requested to
244 244 run. When called, this method forces queued command requests to be
245 245 issued. It will no-op if all commands have already been sent.
246 246
247 247 When called, no more new commands may be issued with this executor.
248 248 """
249 249
250 250 def close():
251 251 """Signal that this command request is finished.
252 252
253 253 When called, no more new commands may be issued. All outstanding
254 254 commands that have previously been issued are waited on before
255 255 returning. This not only includes waiting for the futures to resolve,
256 256 but also waiting for all response data to arrive. In other words,
257 257 calling this waits for all on-wire state for issued command requests
258 258 to finish.
259 259
260 260 When used as a context manager, this method is called when exiting the
261 261 context manager.
262 262
263 263 This method may call ``sendcommands()`` if there are buffered commands.
264 264 """
265 265
266 266 class ipeerrequests(interfaceutil.Interface):
267 267 """Interface for executing commands on a peer."""
268 268
269 269 def commandexecutor():
270 270 """A context manager that resolves to an ipeercommandexecutor.
271 271
272 272 The object this resolves to can be used to issue command requests
273 273 to the peer.
274 274
275 275 Callers should call its ``callcommand`` method to issue command
276 276 requests.
277 277
278 278 A new executor should be obtained for each distinct set of commands
279 279 (possibly just a single command) that the consumer wants to execute
280 280 as part of a single operation or round trip. This is because some
281 281 peers are half-duplex and/or don't support persistent connections.
282 282 e.g. in the case of HTTP peers, commands sent to an executor represent
283 283 a single HTTP request. While some peers may support multiple command
284 284 sends over the wire per executor, consumers need to code to the least
285 285 capable peer. So it should be assumed that command executors buffer
286 286 called commands until they are told to send them and that each
287 287 command executor could result in a new connection or wire-level request
288 288 being issued.
289 289 """
290 290
291 291 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
292 292 """Unified interface for peer repositories.
293 293
294 294 All peer instances must conform to this interface.
295 295 """
296 296
297 297 @interfaceutil.implementer(ipeerbase)
298 298 class peer(object):
299 299 """Base class for peer repositories."""
300 300
301 301 def capable(self, name):
302 302 caps = self.capabilities()
303 303 if name in caps:
304 304 return True
305 305
306 306 name = '%s=' % name
307 307 for cap in caps:
308 308 if cap.startswith(name):
309 309 return cap[len(name):]
310 310
311 311 return False
312 312
313 313 def requirecap(self, name, purpose):
314 314 if self.capable(name):
315 315 return
316 316
317 317 raise error.CapabilityError(
318 318 _('cannot %s; remote repository does not support the %r '
319 319 'capability') % (purpose, name))
320 320
321 321 class irevisiondelta(interfaceutil.Interface):
322 322 """Represents a delta between one revision and another.
323 323
324 324 Instances convey enough information to allow a revision to be exchanged
325 325 with another repository.
326 326
327 327 Instances represent the fulltext revision data or a delta against
328 328 another revision. Therefore the ``revision`` and ``delta`` attributes
329 329 are mutually exclusive.
330 330
331 331 Typically used for changegroup generation.
332 332 """
333 333
334 334 node = interfaceutil.Attribute(
335 335 """20 byte node of this revision.""")
336 336
337 337 p1node = interfaceutil.Attribute(
338 338 """20 byte node of 1st parent of this revision.""")
339 339
340 340 p2node = interfaceutil.Attribute(
341 341 """20 byte node of 2nd parent of this revision.""")
342 342
343 343 linknode = interfaceutil.Attribute(
344 344 """20 byte node of the changelog revision this node is linked to.""")
345 345
346 346 flags = interfaceutil.Attribute(
347 347 """2 bytes of integer flags that apply to this revision.""")
348 348
349 349 basenode = interfaceutil.Attribute(
350 350 """20 byte node of the revision this data is a delta against.
351 351
352 352 ``nullid`` indicates that the revision is a full revision and not
353 353 a delta.
354 354 """)
355 355
356 356 baserevisionsize = interfaceutil.Attribute(
357 357 """Size of base revision this delta is against.
358 358
359 359 May be ``None`` if ``basenode`` is ``nullid``.
360 360 """)
361 361
362 362 revision = interfaceutil.Attribute(
363 363 """Raw fulltext of revision data for this node.""")
364 364
365 365 delta = interfaceutil.Attribute(
366 366 """Delta between ``basenode`` and ``node``.
367 367
368 368 Stored in the bdiff delta format.
369 369 """)
370 370
371 371 class irevisiondeltarequest(interfaceutil.Interface):
372 372 """Represents a request to generate an ``irevisiondelta``."""
373 373
374 374 node = interfaceutil.Attribute(
375 375 """20 byte node of revision being requested.""")
376 376
377 377 p1node = interfaceutil.Attribute(
378 378 """20 byte node of 1st parent of revision.""")
379 379
380 380 p2node = interfaceutil.Attribute(
381 381 """20 byte node of 2nd parent of revision.""")
382 382
383 383 linknode = interfaceutil.Attribute(
384 384 """20 byte node to store in ``linknode`` attribute.""")
385 385
386 386 basenode = interfaceutil.Attribute(
387 387 """Base revision that delta should be generated against.
388 388
389 389 If ``nullid``, the derived ``irevisiondelta`` should have its
390 390 ``revision`` field populated and no delta should be generated.
391 391
392 392 If ``None``, the delta may be generated against any revision that
393 393 is an ancestor of this revision. Or a full revision may be used.
394 394
395 395 If any other value, the delta should be produced against that
396 396 revision.
397 397 """)
398 398
399 399 ellipsis = interfaceutil.Attribute(
400 400 """Boolean on whether the ellipsis flag should be set.""")
401 401
402 402 class ifilerevisionssequence(interfaceutil.Interface):
403 403 """Contains index data for all revisions of a file.
404 404
405 405 Types implementing this behave like lists of tuples. The index
406 406 in the list corresponds to the revision number. The values contain
407 407 index metadata.
408 408
409 409 The *null* revision (revision number -1) is always the last item
410 410 in the index.
411 411 """
412 412
413 413 def __len__():
414 414 """The total number of revisions."""
415 415
416 416 def __getitem__(rev):
417 417 """Returns the object having a specific revision number.
418 418
419 419 Returns an 8-tuple with the following fields:
420 420
421 421 offset+flags
422 422 Contains the offset and flags for the revision. 64-bit unsigned
423 423 integer where first 6 bytes are the offset and the next 2 bytes
424 424 are flags. The offset can be 0 if it is not used by the store.
425 425 compressed size
426 426 Size of the revision data in the store. It can be 0 if it isn't
427 427 needed by the store.
428 428 uncompressed size
429 429 Fulltext size. It can be 0 if it isn't needed by the store.
430 430 base revision
431 431 Revision number of revision the delta for storage is encoded
432 432 against. -1 indicates not encoded against a base revision.
433 433 link revision
434 434 Revision number of changelog revision this entry is related to.
435 435 p1 revision
436 436 Revision number of 1st parent. -1 if no 1st parent.
437 437 p2 revision
438 438 Revision number of 2nd parent. -1 if no 1st parent.
439 439 node
440 440 Binary node value for this revision number.
441 441
442 442 Negative values should index off the end of the sequence. ``-1``
443 443 should return the null revision. ``-2`` should return the most
444 444 recent revision.
445 445 """
446 446
447 447 def __contains__(rev):
448 448 """Whether a revision number exists."""
449 449
450 450 def insert(self, i, entry):
451 451 """Add an item to the index at specific revision."""
452 452
453 453 class ifileindex(interfaceutil.Interface):
454 454 """Storage interface for index data of a single file.
455 455
456 456 File storage data is divided into index metadata and data storage.
457 457 This interface defines the index portion of the interface.
458 458
459 459 The index logically consists of:
460 460
461 461 * A mapping between revision numbers and nodes.
462 462 * DAG data (storing and querying the relationship between nodes).
463 463 * Metadata to facilitate storage.
464 464 """
465 465 index = interfaceutil.Attribute(
466 466 """An ``ifilerevisionssequence`` instance.""")
467 467
468 468 def __len__():
469 469 """Obtain the number of revisions stored for this file."""
470 470
471 471 def __iter__():
472 472 """Iterate over revision numbers for this file."""
473 473
474 474 def revs(start=0, stop=None):
475 475 """Iterate over revision numbers for this file, with control."""
476 476
477 477 def parents(node):
478 478 """Returns a 2-tuple of parent nodes for a revision.
479 479
480 480 Values will be ``nullid`` if the parent is empty.
481 481 """
482 482
483 483 def parentrevs(rev):
484 484 """Like parents() but operates on revision numbers."""
485 485
486 486 def rev(node):
487 487 """Obtain the revision number given a node.
488 488
489 489 Raises ``error.LookupError`` if the node is not known.
490 490 """
491 491
492 492 def node(rev):
493 493 """Obtain the node value given a revision number.
494 494
495 495 Raises ``IndexError`` if the node is not known.
496 496 """
497 497
498 498 def lookup(node):
499 499 """Attempt to resolve a value to a node.
500 500
501 501 Value can be a binary node, hex node, revision number, or a string
502 502 that can be converted to an integer.
503 503
504 504 Raises ``error.LookupError`` if a node could not be resolved.
505 505 """
506 506
507 507 def linkrev(rev):
508 508 """Obtain the changeset revision number a revision is linked to."""
509 509
510 510 def flags(rev):
511 511 """Obtain flags used to affect storage of a revision."""
512 512
513 513 def iscensored(rev):
514 514 """Return whether a revision's content has been censored."""
515 515
516 516 def commonancestorsheads(node1, node2):
517 517 """Obtain an iterable of nodes containing heads of common ancestors.
518 518
519 519 See ``ancestor.commonancestorsheads()``.
520 520 """
521 521
522 522 def descendants(revs):
523 523 """Obtain descendant revision numbers for a set of revision numbers.
524 524
525 525 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
526 526 """
527 527
528 528 def headrevs():
529 529 """Obtain a list of revision numbers that are DAG heads.
530 530
531 531 The list is sorted oldest to newest.
532 532
533 533 TODO determine if sorting is required.
534 534 """
535 535
536 536 def heads(start=None, stop=None):
537 537 """Obtain a list of nodes that are DAG heads, with control.
538 538
539 539 The set of revisions examined can be limited by specifying
540 540 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
541 541 iterable of nodes. DAG traversal starts at earlier revision
542 542 ``start`` and iterates forward until any node in ``stop`` is
543 543 encountered.
544 544 """
545 545
546 546 def children(node):
547 547 """Obtain nodes that are children of a node.
548 548
549 549 Returns a list of nodes.
550 550 """
551 551
552 552 def deltaparent(rev):
553 553 """"Return the revision that is a suitable parent to delta against."""
554 554
555 def candelta(baserev, rev):
556 """"Whether a delta can be generated between two revisions."""
557
558 555 class ifiledata(interfaceutil.Interface):
559 556 """Storage interface for data storage of a specific file.
560 557
561 558 This complements ``ifileindex`` and provides an interface for accessing
562 559 data for a tracked file.
563 560 """
564 561 def rawsize(rev):
565 562 """The size of the fulltext data for a revision as stored."""
566 563
567 564 def size(rev):
568 565 """Obtain the fulltext size of file data.
569 566
570 567 Any metadata is excluded from size measurements. Use ``rawsize()`` if
571 568 metadata size is important.
572 569 """
573 570
574 571 def checkhash(fulltext, node, p1=None, p2=None, rev=None):
575 572 """Validate the stored hash of a given fulltext and node.
576 573
577 574 Raises ``error.RevlogError`` is hash validation fails.
578 575 """
579 576
580 577 def revision(node, raw=False):
581 578 """"Obtain fulltext data for a node.
582 579
583 580 By default, any storage transformations are applied before the data
584 581 is returned. If ``raw`` is True, non-raw storage transformations
585 582 are not applied.
586 583
587 584 The fulltext data may contain a header containing metadata. Most
588 585 consumers should use ``read()`` to obtain the actual file data.
589 586 """
590 587
591 588 def read(node):
592 589 """Resolve file fulltext data.
593 590
594 591 This is similar to ``revision()`` except any metadata in the data
595 592 headers is stripped.
596 593 """
597 594
598 595 def renamed(node):
599 596 """Obtain copy metadata for a node.
600 597
601 598 Returns ``False`` if no copy metadata is stored or a 2-tuple of
602 599 (path, node) from which this revision was copied.
603 600 """
604 601
605 602 def cmp(node, fulltext):
606 603 """Compare fulltext to another revision.
607 604
608 605 Returns True if the fulltext is different from what is stored.
609 606
610 607 This takes copy metadata into account.
611 608
612 609 TODO better document the copy metadata and censoring logic.
613 610 """
614 611
615 612 def revdiff(rev1, rev2):
616 613 """Obtain a delta between two revision numbers.
617 614
618 615 Operates on raw data in the store (``revision(node, raw=True)``).
619 616
620 617 The returned data is the result of ``bdiff.bdiff`` on the raw
621 618 revision data.
622 619 """
623 620
624 621 def emitrevisiondeltas(requests):
625 622 """Produce ``irevisiondelta`` from ``irevisiondeltarequest``s.
626 623
627 624 Given an iterable of objects conforming to the ``irevisiondeltarequest``
628 625 interface, emits objects conforming to the ``irevisiondelta``
629 626 interface.
630 627
631 628 This method is a generator.
632 629
633 630 ``irevisiondelta`` should be emitted in the same order of
634 631 ``irevisiondeltarequest`` that was passed in.
635 632
636 633 The emitted objects MUST conform by the results of
637 634 ``irevisiondeltarequest``. Namely, they must respect any requests
638 635 for building a delta from a specific ``basenode`` if defined.
639 636
640 637 When sending deltas, implementations must take into account whether
641 638 the client has the base delta before encoding a delta against that
642 639 revision. A revision encountered previously in ``requests`` is
643 640 always a suitable base revision. An example of a bad delta is a delta
644 641 against a non-ancestor revision. Another example of a bad delta is a
645 642 delta against a censored revision.
646 643 """
647 644
648 645 class ifilemutation(interfaceutil.Interface):
649 646 """Storage interface for mutation events of a tracked file."""
650 647
651 648 def add(filedata, meta, transaction, linkrev, p1, p2):
652 649 """Add a new revision to the store.
653 650
654 651 Takes file data, dictionary of metadata, a transaction, linkrev,
655 652 and parent nodes.
656 653
657 654 Returns the node that was added.
658 655
659 656 May no-op if a revision matching the supplied data is already stored.
660 657 """
661 658
662 659 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
663 660 flags=0, cachedelta=None):
664 661 """Add a new revision to the store.
665 662
666 663 This is similar to ``add()`` except it operates at a lower level.
667 664
668 665 The data passed in already contains a metadata header, if any.
669 666
670 667 ``node`` and ``flags`` can be used to define the expected node and
671 668 the flags to use with storage.
672 669
673 670 ``add()`` is usually called when adding files from e.g. the working
674 671 directory. ``addrevision()`` is often called by ``add()`` and for
675 672 scenarios where revision data has already been computed, such as when
676 673 applying raw data from a peer repo.
677 674 """
678 675
679 676 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
680 677 """Process a series of deltas for storage.
681 678
682 679 ``deltas`` is an iterable of 7-tuples of
683 680 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
684 681 to add.
685 682
686 683 The ``delta`` field contains ``mpatch`` data to apply to a base
687 684 revision, identified by ``deltabase``. The base node can be
688 685 ``nullid``, in which case the header from the delta can be ignored
689 686 and the delta used as the fulltext.
690 687
691 688 ``addrevisioncb`` should be called for each node as it is committed.
692 689
693 690 Returns a list of nodes that were processed. A node will be in the list
694 691 even if it existed in the store previously.
695 692 """
696 693
697 694 def getstrippoint(minlink):
698 695 """Find the minimum revision that must be stripped to strip a linkrev.
699 696
700 697 Returns a 2-tuple containing the minimum revision number and a set
701 698 of all revisions numbers that would be broken by this strip.
702 699
703 700 TODO this is highly revlog centric and should be abstracted into
704 701 a higher-level deletion API. ``repair.strip()`` relies on this.
705 702 """
706 703
707 704 def strip(minlink, transaction):
708 705 """Remove storage of items starting at a linkrev.
709 706
710 707 This uses ``getstrippoint()`` to determine the first node to remove.
711 708 Then it effectively truncates storage for all revisions after that.
712 709
713 710 TODO this is highly revlog centric and should be abstracted into a
714 711 higher-level deletion API.
715 712 """
716 713
717 714 class ifilestorage(ifileindex, ifiledata, ifilemutation):
718 715 """Complete storage interface for a single tracked file."""
719 716
720 717 version = interfaceutil.Attribute(
721 718 """Version number of storage.
722 719
723 720 TODO this feels revlog centric and could likely be removed.
724 721 """)
725 722
726 723 _generaldelta = interfaceutil.Attribute(
727 724 """Whether deltas can be against any parent revision.
728 725
729 726 TODO this is used by changegroup code and it could probably be
730 727 folded into another API.
731 728 """)
732 729
733 730 def files():
734 731 """Obtain paths that are backing storage for this file.
735 732
736 733 TODO this is used heavily by verify code and there should probably
737 734 be a better API for that.
738 735 """
739 736
740 737 def checksize():
741 738 """Obtain the expected sizes of backing files.
742 739
743 740 TODO this is used by verify and it should not be part of the interface.
744 741 """
745 742
746 743 class idirs(interfaceutil.Interface):
747 744 """Interface representing a collection of directories from paths.
748 745
749 746 This interface is essentially a derived data structure representing
750 747 directories from a collection of paths.
751 748 """
752 749
753 750 def addpath(path):
754 751 """Add a path to the collection.
755 752
756 753 All directories in the path will be added to the collection.
757 754 """
758 755
759 756 def delpath(path):
760 757 """Remove a path from the collection.
761 758
762 759 If the removal was the last path in a particular directory, the
763 760 directory is removed from the collection.
764 761 """
765 762
766 763 def __iter__():
767 764 """Iterate over the directories in this collection of paths."""
768 765
769 766 def __contains__(path):
770 767 """Whether a specific directory is in this collection."""
771 768
772 769 class imanifestdict(interfaceutil.Interface):
773 770 """Interface representing a manifest data structure.
774 771
775 772 A manifest is effectively a dict mapping paths to entries. Each entry
776 773 consists of a binary node and extra flags affecting that entry.
777 774 """
778 775
779 776 def __getitem__(path):
780 777 """Returns the binary node value for a path in the manifest.
781 778
782 779 Raises ``KeyError`` if the path does not exist in the manifest.
783 780
784 781 Equivalent to ``self.find(path)[0]``.
785 782 """
786 783
787 784 def find(path):
788 785 """Returns the entry for a path in the manifest.
789 786
790 787 Returns a 2-tuple of (node, flags).
791 788
792 789 Raises ``KeyError`` if the path does not exist in the manifest.
793 790 """
794 791
795 792 def __len__():
796 793 """Return the number of entries in the manifest."""
797 794
798 795 def __nonzero__():
799 796 """Returns True if the manifest has entries, False otherwise."""
800 797
801 798 __bool__ = __nonzero__
802 799
803 800 def __setitem__(path, node):
804 801 """Define the node value for a path in the manifest.
805 802
806 803 If the path is already in the manifest, its flags will be copied to
807 804 the new entry.
808 805 """
809 806
810 807 def __contains__(path):
811 808 """Whether a path exists in the manifest."""
812 809
813 810 def __delitem__(path):
814 811 """Remove a path from the manifest.
815 812
816 813 Raises ``KeyError`` if the path is not in the manifest.
817 814 """
818 815
819 816 def __iter__():
820 817 """Iterate over paths in the manifest."""
821 818
822 819 def iterkeys():
823 820 """Iterate over paths in the manifest."""
824 821
825 822 def keys():
826 823 """Obtain a list of paths in the manifest."""
827 824
828 825 def filesnotin(other, match=None):
829 826 """Obtain the set of paths in this manifest but not in another.
830 827
831 828 ``match`` is an optional matcher function to be applied to both
832 829 manifests.
833 830
834 831 Returns a set of paths.
835 832 """
836 833
837 834 def dirs():
838 835 """Returns an object implementing the ``idirs`` interface."""
839 836
840 837 def hasdir(dir):
841 838 """Returns a bool indicating if a directory is in this manifest."""
842 839
843 840 def matches(match):
844 841 """Generate a new manifest filtered through a matcher.
845 842
846 843 Returns an object conforming to the ``imanifestdict`` interface.
847 844 """
848 845
849 846 def walk(match):
850 847 """Generator of paths in manifest satisfying a matcher.
851 848
852 849 This is equivalent to ``self.matches(match).iterkeys()`` except a new
853 850 manifest object is not created.
854 851
855 852 If the matcher has explicit files listed and they don't exist in
856 853 the manifest, ``match.bad()`` is called for each missing file.
857 854 """
858 855
859 856 def diff(other, match=None, clean=False):
860 857 """Find differences between this manifest and another.
861 858
862 859 This manifest is compared to ``other``.
863 860
864 861 If ``match`` is provided, the two manifests are filtered against this
865 862 matcher and only entries satisfying the matcher are compared.
866 863
867 864 If ``clean`` is True, unchanged files are included in the returned
868 865 object.
869 866
870 867 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
871 868 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
872 869 represents the node and flags for this manifest and ``(node2, flag2)``
873 870 are the same for the other manifest.
874 871 """
875 872
876 873 def setflag(path, flag):
877 874 """Set the flag value for a given path.
878 875
879 876 Raises ``KeyError`` if the path is not already in the manifest.
880 877 """
881 878
882 879 def get(path, default=None):
883 880 """Obtain the node value for a path or a default value if missing."""
884 881
885 882 def flags(path, default=''):
886 883 """Return the flags value for a path or a default value if missing."""
887 884
888 885 def copy():
889 886 """Return a copy of this manifest."""
890 887
891 888 def items():
892 889 """Returns an iterable of (path, node) for items in this manifest."""
893 890
894 891 def iteritems():
895 892 """Identical to items()."""
896 893
897 894 def iterentries():
898 895 """Returns an iterable of (path, node, flags) for this manifest.
899 896
900 897 Similar to ``iteritems()`` except items are a 3-tuple and include
901 898 flags.
902 899 """
903 900
904 901 def text():
905 902 """Obtain the raw data representation for this manifest.
906 903
907 904 Result is used to create a manifest revision.
908 905 """
909 906
910 907 def fastdelta(base, changes):
911 908 """Obtain a delta between this manifest and another given changes.
912 909
913 910 ``base`` in the raw data representation for another manifest.
914 911
915 912 ``changes`` is an iterable of ``(path, to_delete)``.
916 913
917 914 Returns a 2-tuple containing ``bytearray(self.text())`` and the
918 915 delta between ``base`` and this manifest.
919 916 """
920 917
921 918 class imanifestrevisionbase(interfaceutil.Interface):
922 919 """Base interface representing a single revision of a manifest.
923 920
924 921 Should not be used as a primary interface: should always be inherited
925 922 as part of a larger interface.
926 923 """
927 924
928 925 def new():
929 926 """Obtain a new manifest instance.
930 927
931 928 Returns an object conforming to the ``imanifestrevisionwritable``
932 929 interface. The instance will be associated with the same
933 930 ``imanifestlog`` collection as this instance.
934 931 """
935 932
936 933 def copy():
937 934 """Obtain a copy of this manifest instance.
938 935
939 936 Returns an object conforming to the ``imanifestrevisionwritable``
940 937 interface. The instance will be associated with the same
941 938 ``imanifestlog`` collection as this instance.
942 939 """
943 940
944 941 def read():
945 942 """Obtain the parsed manifest data structure.
946 943
947 944 The returned object conforms to the ``imanifestdict`` interface.
948 945 """
949 946
950 947 class imanifestrevisionstored(imanifestrevisionbase):
951 948 """Interface representing a manifest revision committed to storage."""
952 949
953 950 def node():
954 951 """The binary node for this manifest."""
955 952
956 953 parents = interfaceutil.Attribute(
957 954 """List of binary nodes that are parents for this manifest revision."""
958 955 )
959 956
960 957 def readdelta(shallow=False):
961 958 """Obtain the manifest data structure representing changes from parent.
962 959
963 960 This manifest is compared to its 1st parent. A new manifest representing
964 961 those differences is constructed.
965 962
966 963 The returned object conforms to the ``imanifestdict`` interface.
967 964 """
968 965
969 966 def readfast(shallow=False):
970 967 """Calls either ``read()`` or ``readdelta()``.
971 968
972 969 The faster of the two options is called.
973 970 """
974 971
975 972 def find(key):
976 973 """Calls self.read().find(key)``.
977 974
978 975 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
979 976 """
980 977
981 978 class imanifestrevisionwritable(imanifestrevisionbase):
982 979 """Interface representing a manifest revision that can be committed."""
983 980
984 981 def write(transaction, linkrev, p1node, p2node, added, removed):
985 982 """Add this revision to storage.
986 983
987 984 Takes a transaction object, the changeset revision number it will
988 985 be associated with, its parent nodes, and lists of added and
989 986 removed paths.
990 987
991 988 Returns the binary node of the created revision.
992 989 """
993 990
994 991 class imanifestlog(interfaceutil.Interface):
995 992 """Interface representing a collection of manifest snapshots."""
996 993
997 994 def __getitem__(node):
998 995 """Obtain a manifest instance for a given binary node.
999 996
1000 997 Equivalent to calling ``self.get('', node)``.
1001 998
1002 999 The returned object conforms to the ``imanifestrevisionstored``
1003 1000 interface.
1004 1001 """
1005 1002
1006 1003 def get(dir, node, verify=True):
1007 1004 """Retrieve the manifest instance for a given directory and binary node.
1008 1005
1009 1006 ``node`` always refers to the node of the root manifest (which will be
1010 1007 the only manifest if flat manifests are being used).
1011 1008
1012 1009 If ``dir`` is the empty string, the root manifest is returned. Otherwise
1013 1010 the manifest for the specified directory will be returned (requires
1014 1011 tree manifests).
1015 1012
1016 1013 If ``verify`` is True, ``LookupError`` is raised if the node is not
1017 1014 known.
1018 1015
1019 1016 The returned object conforms to the ``imanifestrevisionstored``
1020 1017 interface.
1021 1018 """
1022 1019
1023 1020 def clearcaches():
1024 1021 """Clear caches associated with this collection."""
1025 1022
1026 1023 def rev(node):
1027 1024 """Obtain the revision number for a binary node.
1028 1025
1029 1026 Raises ``error.LookupError`` if the node is not known.
1030 1027 """
1031 1028
1032 1029 def addgroup(deltas, linkmapper, transaction):
1033 1030 """Process a series of deltas for storage.
1034 1031
1035 1032 ``deltas`` is an iterable of 7-tuples of
1036 1033 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
1037 1034 to add.
1038 1035
1039 1036 The ``delta`` field contains ``mpatch`` data to apply to a base
1040 1037 revision, identified by ``deltabase``. The base node can be
1041 1038 ``nullid``, in which case the header from the delta can be ignored
1042 1039 and the delta used as the fulltext.
1043 1040
1044 1041 Returns a list of nodes that were processed. A node will be in the list
1045 1042 even if it existed in the store previously.
1046 1043 """
1047 1044
1048 1045 class completelocalrepository(interfaceutil.Interface):
1049 1046 """Monolithic interface for local repositories.
1050 1047
1051 1048 This currently captures the reality of things - not how things should be.
1052 1049 """
1053 1050
1054 1051 supportedformats = interfaceutil.Attribute(
1055 1052 """Set of requirements that apply to stream clone.
1056 1053
1057 1054 This is actually a class attribute and is shared among all instances.
1058 1055 """)
1059 1056
1060 1057 openerreqs = interfaceutil.Attribute(
1061 1058 """Set of requirements that are passed to the opener.
1062 1059
1063 1060 This is actually a class attribute and is shared among all instances.
1064 1061 """)
1065 1062
1066 1063 supported = interfaceutil.Attribute(
1067 1064 """Set of requirements that this repo is capable of opening.""")
1068 1065
1069 1066 requirements = interfaceutil.Attribute(
1070 1067 """Set of requirements this repo uses.""")
1071 1068
1072 1069 filtername = interfaceutil.Attribute(
1073 1070 """Name of the repoview that is active on this repo.""")
1074 1071
1075 1072 wvfs = interfaceutil.Attribute(
1076 1073 """VFS used to access the working directory.""")
1077 1074
1078 1075 vfs = interfaceutil.Attribute(
1079 1076 """VFS rooted at the .hg directory.
1080 1077
1081 1078 Used to access repository data not in the store.
1082 1079 """)
1083 1080
1084 1081 svfs = interfaceutil.Attribute(
1085 1082 """VFS rooted at the store.
1086 1083
1087 1084 Used to access repository data in the store. Typically .hg/store.
1088 1085 But can point elsewhere if the store is shared.
1089 1086 """)
1090 1087
1091 1088 root = interfaceutil.Attribute(
1092 1089 """Path to the root of the working directory.""")
1093 1090
1094 1091 path = interfaceutil.Attribute(
1095 1092 """Path to the .hg directory.""")
1096 1093
1097 1094 origroot = interfaceutil.Attribute(
1098 1095 """The filesystem path that was used to construct the repo.""")
1099 1096
1100 1097 auditor = interfaceutil.Attribute(
1101 1098 """A pathauditor for the working directory.
1102 1099
1103 1100 This checks if a path refers to a nested repository.
1104 1101
1105 1102 Operates on the filesystem.
1106 1103 """)
1107 1104
1108 1105 nofsauditor = interfaceutil.Attribute(
1109 1106 """A pathauditor for the working directory.
1110 1107
1111 1108 This is like ``auditor`` except it doesn't do filesystem checks.
1112 1109 """)
1113 1110
1114 1111 baseui = interfaceutil.Attribute(
1115 1112 """Original ui instance passed into constructor.""")
1116 1113
1117 1114 ui = interfaceutil.Attribute(
1118 1115 """Main ui instance for this instance.""")
1119 1116
1120 1117 sharedpath = interfaceutil.Attribute(
1121 1118 """Path to the .hg directory of the repo this repo was shared from.""")
1122 1119
1123 1120 store = interfaceutil.Attribute(
1124 1121 """A store instance.""")
1125 1122
1126 1123 spath = interfaceutil.Attribute(
1127 1124 """Path to the store.""")
1128 1125
1129 1126 sjoin = interfaceutil.Attribute(
1130 1127 """Alias to self.store.join.""")
1131 1128
1132 1129 cachevfs = interfaceutil.Attribute(
1133 1130 """A VFS used to access the cache directory.
1134 1131
1135 1132 Typically .hg/cache.
1136 1133 """)
1137 1134
1138 1135 filteredrevcache = interfaceutil.Attribute(
1139 1136 """Holds sets of revisions to be filtered.""")
1140 1137
1141 1138 names = interfaceutil.Attribute(
1142 1139 """A ``namespaces`` instance.""")
1143 1140
1144 1141 def close():
1145 1142 """Close the handle on this repository."""
1146 1143
1147 1144 def peer():
1148 1145 """Obtain an object conforming to the ``peer`` interface."""
1149 1146
1150 1147 def unfiltered():
1151 1148 """Obtain an unfiltered/raw view of this repo."""
1152 1149
1153 1150 def filtered(name, visibilityexceptions=None):
1154 1151 """Obtain a named view of this repository."""
1155 1152
1156 1153 obsstore = interfaceutil.Attribute(
1157 1154 """A store of obsolescence data.""")
1158 1155
1159 1156 changelog = interfaceutil.Attribute(
1160 1157 """A handle on the changelog revlog.""")
1161 1158
1162 1159 manifestlog = interfaceutil.Attribute(
1163 1160 """An instance conforming to the ``imanifestlog`` interface.
1164 1161
1165 1162 Provides access to manifests for the repository.
1166 1163 """)
1167 1164
1168 1165 dirstate = interfaceutil.Attribute(
1169 1166 """Working directory state.""")
1170 1167
1171 1168 narrowpats = interfaceutil.Attribute(
1172 1169 """Matcher patterns for this repository's narrowspec.""")
1173 1170
1174 1171 def narrowmatch():
1175 1172 """Obtain a matcher for the narrowspec."""
1176 1173
1177 1174 def setnarrowpats(newincludes, newexcludes):
1178 1175 """Define the narrowspec for this repository."""
1179 1176
1180 1177 def __getitem__(changeid):
1181 1178 """Try to resolve a changectx."""
1182 1179
1183 1180 def __contains__(changeid):
1184 1181 """Whether a changeset exists."""
1185 1182
1186 1183 def __nonzero__():
1187 1184 """Always returns True."""
1188 1185 return True
1189 1186
1190 1187 __bool__ = __nonzero__
1191 1188
1192 1189 def __len__():
1193 1190 """Returns the number of changesets in the repo."""
1194 1191
1195 1192 def __iter__():
1196 1193 """Iterate over revisions in the changelog."""
1197 1194
1198 1195 def revs(expr, *args):
1199 1196 """Evaluate a revset.
1200 1197
1201 1198 Emits revisions.
1202 1199 """
1203 1200
1204 1201 def set(expr, *args):
1205 1202 """Evaluate a revset.
1206 1203
1207 1204 Emits changectx instances.
1208 1205 """
1209 1206
1210 1207 def anyrevs(specs, user=False, localalias=None):
1211 1208 """Find revisions matching one of the given revsets."""
1212 1209
1213 1210 def url():
1214 1211 """Returns a string representing the location of this repo."""
1215 1212
1216 1213 def hook(name, throw=False, **args):
1217 1214 """Call a hook."""
1218 1215
1219 1216 def tags():
1220 1217 """Return a mapping of tag to node."""
1221 1218
1222 1219 def tagtype(tagname):
1223 1220 """Return the type of a given tag."""
1224 1221
1225 1222 def tagslist():
1226 1223 """Return a list of tags ordered by revision."""
1227 1224
1228 1225 def nodetags(node):
1229 1226 """Return the tags associated with a node."""
1230 1227
1231 1228 def nodebookmarks(node):
1232 1229 """Return the list of bookmarks pointing to the specified node."""
1233 1230
1234 1231 def branchmap():
1235 1232 """Return a mapping of branch to heads in that branch."""
1236 1233
1237 1234 def revbranchcache():
1238 1235 pass
1239 1236
1240 1237 def branchtip(branchtip, ignoremissing=False):
1241 1238 """Return the tip node for a given branch."""
1242 1239
1243 1240 def lookup(key):
1244 1241 """Resolve the node for a revision."""
1245 1242
1246 1243 def lookupbranch(key):
1247 1244 """Look up the branch name of the given revision or branch name."""
1248 1245
1249 1246 def known(nodes):
1250 1247 """Determine whether a series of nodes is known.
1251 1248
1252 1249 Returns a list of bools.
1253 1250 """
1254 1251
1255 1252 def local():
1256 1253 """Whether the repository is local."""
1257 1254 return True
1258 1255
1259 1256 def publishing():
1260 1257 """Whether the repository is a publishing repository."""
1261 1258
1262 1259 def cancopy():
1263 1260 pass
1264 1261
1265 1262 def shared():
1266 1263 """The type of shared repository or None."""
1267 1264
1268 1265 def wjoin(f, *insidef):
1269 1266 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1270 1267
1271 1268 def file(f):
1272 1269 """Obtain a filelog for a tracked path.
1273 1270
1274 1271 The returned type conforms to the ``ifilestorage`` interface.
1275 1272 """
1276 1273
1277 1274 def setparents(p1, p2):
1278 1275 """Set the parent nodes of the working directory."""
1279 1276
1280 1277 def filectx(path, changeid=None, fileid=None):
1281 1278 """Obtain a filectx for the given file revision."""
1282 1279
1283 1280 def getcwd():
1284 1281 """Obtain the current working directory from the dirstate."""
1285 1282
1286 1283 def pathto(f, cwd=None):
1287 1284 """Obtain the relative path to a file."""
1288 1285
1289 1286 def adddatafilter(name, fltr):
1290 1287 pass
1291 1288
1292 1289 def wread(filename):
1293 1290 """Read a file from wvfs, using data filters."""
1294 1291
1295 1292 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1296 1293 """Write data to a file in the wvfs, using data filters."""
1297 1294
1298 1295 def wwritedata(filename, data):
1299 1296 """Resolve data for writing to the wvfs, using data filters."""
1300 1297
1301 1298 def currenttransaction():
1302 1299 """Obtain the current transaction instance or None."""
1303 1300
1304 1301 def transaction(desc, report=None):
1305 1302 """Open a new transaction to write to the repository."""
1306 1303
1307 1304 def undofiles():
1308 1305 """Returns a list of (vfs, path) for files to undo transactions."""
1309 1306
1310 1307 def recover():
1311 1308 """Roll back an interrupted transaction."""
1312 1309
1313 1310 def rollback(dryrun=False, force=False):
1314 1311 """Undo the last transaction.
1315 1312
1316 1313 DANGEROUS.
1317 1314 """
1318 1315
1319 1316 def updatecaches(tr=None, full=False):
1320 1317 """Warm repo caches."""
1321 1318
1322 1319 def invalidatecaches():
1323 1320 """Invalidate cached data due to the repository mutating."""
1324 1321
1325 1322 def invalidatevolatilesets():
1326 1323 pass
1327 1324
1328 1325 def invalidatedirstate():
1329 1326 """Invalidate the dirstate."""
1330 1327
1331 1328 def invalidate(clearfilecache=False):
1332 1329 pass
1333 1330
1334 1331 def invalidateall():
1335 1332 pass
1336 1333
1337 1334 def lock(wait=True):
1338 1335 """Lock the repository store and return a lock instance."""
1339 1336
1340 1337 def wlock(wait=True):
1341 1338 """Lock the non-store parts of the repository."""
1342 1339
1343 1340 def currentwlock():
1344 1341 """Return the wlock if it's held or None."""
1345 1342
1346 1343 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1347 1344 pass
1348 1345
1349 1346 def commit(text='', user=None, date=None, match=None, force=False,
1350 1347 editor=False, extra=None):
1351 1348 """Add a new revision to the repository."""
1352 1349
1353 1350 def commitctx(ctx, error=False):
1354 1351 """Commit a commitctx instance to the repository."""
1355 1352
1356 1353 def destroying():
1357 1354 """Inform the repository that nodes are about to be destroyed."""
1358 1355
1359 1356 def destroyed():
1360 1357 """Inform the repository that nodes have been destroyed."""
1361 1358
1362 1359 def status(node1='.', node2=None, match=None, ignored=False,
1363 1360 clean=False, unknown=False, listsubrepos=False):
1364 1361 """Convenience method to call repo[x].status()."""
1365 1362
1366 1363 def addpostdsstatus(ps):
1367 1364 pass
1368 1365
1369 1366 def postdsstatus():
1370 1367 pass
1371 1368
1372 1369 def clearpostdsstatus():
1373 1370 pass
1374 1371
1375 1372 def heads(start=None):
1376 1373 """Obtain list of nodes that are DAG heads."""
1377 1374
1378 1375 def branchheads(branch=None, start=None, closed=False):
1379 1376 pass
1380 1377
1381 1378 def branches(nodes):
1382 1379 pass
1383 1380
1384 1381 def between(pairs):
1385 1382 pass
1386 1383
1387 1384 def checkpush(pushop):
1388 1385 pass
1389 1386
1390 1387 prepushoutgoinghooks = interfaceutil.Attribute(
1391 1388 """util.hooks instance.""")
1392 1389
1393 1390 def pushkey(namespace, key, old, new):
1394 1391 pass
1395 1392
1396 1393 def listkeys(namespace):
1397 1394 pass
1398 1395
1399 1396 def debugwireargs(one, two, three=None, four=None, five=None):
1400 1397 pass
1401 1398
1402 1399 def savecommitmessage(text):
1403 1400 pass
@@ -1,750 +1,750
1 1 # simplestorerepo.py - Extension that swaps in alternate repository storage.
2 2 #
3 3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 # To use this with the test suite:
9 9 #
10 10 # $ HGREPOFEATURES="simplestore" ./run-tests.py \
11 11 # --extra-config-opt extensions.simplestore=`pwd`/simplestorerepo.py
12 12
13 13 from __future__ import absolute_import
14 14
15 15 import stat
16 16
17 17 from mercurial.i18n import _
18 18 from mercurial.node import (
19 19 bin,
20 20 hex,
21 21 nullid,
22 22 nullrev,
23 23 )
24 24 from mercurial.thirdparty import (
25 25 attr,
26 26 cbor,
27 27 )
28 28 from mercurial import (
29 29 ancestor,
30 30 bundlerepo,
31 31 error,
32 32 extensions,
33 33 localrepo,
34 34 mdiff,
35 35 pycompat,
36 36 repository,
37 37 revlog,
38 38 store,
39 39 verify,
40 40 )
41 41 from mercurial.utils import (
42 42 interfaceutil,
43 43 )
44 44
45 45 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
46 46 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
47 47 # be specifying the version(s) of Mercurial they are tested with, or
48 48 # leave the attribute unspecified.
49 49 testedwith = 'ships-with-hg-core'
50 50
51 51 REQUIREMENT = 'testonly-simplestore'
52 52
53 53 def validatenode(node):
54 54 if isinstance(node, int):
55 55 raise ValueError('expected node; got int')
56 56
57 57 if len(node) != 20:
58 58 raise ValueError('expected 20 byte node')
59 59
60 60 def validaterev(rev):
61 61 if not isinstance(rev, int):
62 62 raise ValueError('expected int')
63 63
64 64 @interfaceutil.implementer(repository.irevisiondelta)
65 65 @attr.s(slots=True, frozen=True)
66 66 class simplestorerevisiondelta(object):
67 67 node = attr.ib()
68 68 p1node = attr.ib()
69 69 p2node = attr.ib()
70 70 basenode = attr.ib()
71 71 linknode = attr.ib()
72 72 flags = attr.ib()
73 73 baserevisionsize = attr.ib()
74 74 revision = attr.ib()
75 75 delta = attr.ib()
76 76
77 77 @interfaceutil.implementer(repository.ifilestorage)
78 78 class filestorage(object):
79 79 """Implements storage for a tracked path.
80 80
81 81 Data is stored in the VFS in a directory corresponding to the tracked
82 82 path.
83 83
84 84 Index data is stored in an ``index`` file using CBOR.
85 85
86 86 Fulltext data is stored in files having names of the node.
87 87 """
88 88
89 89 def __init__(self, svfs, path):
90 90 self._svfs = svfs
91 91 self._path = path
92 92
93 93 self._storepath = b'/'.join([b'data', path])
94 94 self._indexpath = b'/'.join([self._storepath, b'index'])
95 95
96 96 indexdata = self._svfs.tryread(self._indexpath)
97 97 if indexdata:
98 98 indexdata = cbor.loads(indexdata)
99 99
100 100 self._indexdata = indexdata or []
101 101 self._indexbynode = {}
102 102 self._indexbyrev = {}
103 103 self.index = []
104 104 self._refreshindex()
105 105
106 106 # This is used by changegroup code :/
107 107 self._generaldelta = True
108 108
109 109 self.version = 1
110 110
111 111 def _refreshindex(self):
112 112 self._indexbynode.clear()
113 113 self._indexbyrev.clear()
114 114 self.index = []
115 115
116 116 for i, entry in enumerate(self._indexdata):
117 117 self._indexbynode[entry[b'node']] = entry
118 118 self._indexbyrev[i] = entry
119 119
120 120 self._indexbynode[nullid] = {
121 121 b'node': nullid,
122 122 b'p1': nullid,
123 123 b'p2': nullid,
124 124 b'linkrev': nullrev,
125 125 b'flags': 0,
126 126 }
127 127
128 128 self._indexbyrev[nullrev] = {
129 129 b'node': nullid,
130 130 b'p1': nullid,
131 131 b'p2': nullid,
132 132 b'linkrev': nullrev,
133 133 b'flags': 0,
134 134 }
135 135
136 136 for i, entry in enumerate(self._indexdata):
137 137 p1rev, p2rev = self.parentrevs(self.rev(entry[b'node']))
138 138
139 139 # start, length, rawsize, chainbase, linkrev, p1, p2, node
140 140 self.index.append((0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev,
141 141 entry[b'node']))
142 142
143 143 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
144 144
145 145 def __len__(self):
146 146 return len(self._indexdata)
147 147
148 148 def __iter__(self):
149 149 return iter(range(len(self)))
150 150
151 151 def revs(self, start=0, stop=None):
152 152 step = 1
153 153 if stop is not None:
154 154 if start > stop:
155 155 step = -1
156 156
157 157 stop += step
158 158 else:
159 159 stop = len(self)
160 160
161 161 return range(start, stop, step)
162 162
163 163 def parents(self, node):
164 164 validatenode(node)
165 165
166 166 if node not in self._indexbynode:
167 167 raise KeyError('unknown node')
168 168
169 169 entry = self._indexbynode[node]
170 170
171 171 return entry[b'p1'], entry[b'p2']
172 172
173 173 def parentrevs(self, rev):
174 174 p1, p2 = self.parents(self._indexbyrev[rev][b'node'])
175 175 return self.rev(p1), self.rev(p2)
176 176
177 177 def rev(self, node):
178 178 validatenode(node)
179 179
180 180 try:
181 181 self._indexbynode[node]
182 182 except KeyError:
183 183 raise error.LookupError(node, self._indexpath, _('no node'))
184 184
185 185 for rev, entry in self._indexbyrev.items():
186 186 if entry[b'node'] == node:
187 187 return rev
188 188
189 189 raise error.ProgrammingError('this should not occur')
190 190
191 191 def node(self, rev):
192 192 validaterev(rev)
193 193
194 194 return self._indexbyrev[rev][b'node']
195 195
196 196 def lookup(self, node):
197 197 if isinstance(node, int):
198 198 return self.node(node)
199 199
200 200 if len(node) == 20:
201 201 self.rev(node)
202 202 return node
203 203
204 204 try:
205 205 rev = int(node)
206 206 if '%d' % rev != node:
207 207 raise ValueError
208 208
209 209 if rev < 0:
210 210 rev = len(self) + rev
211 211 if rev < 0 or rev >= len(self):
212 212 raise ValueError
213 213
214 214 return self.node(rev)
215 215 except (ValueError, OverflowError):
216 216 pass
217 217
218 218 if len(node) == 40:
219 219 try:
220 220 rawnode = bin(node)
221 221 self.rev(rawnode)
222 222 return rawnode
223 223 except TypeError:
224 224 pass
225 225
226 226 raise error.LookupError(node, self._path, _('invalid lookup input'))
227 227
228 228 def linkrev(self, rev):
229 229 validaterev(rev)
230 230
231 231 return self._indexbyrev[rev][b'linkrev']
232 232
233 233 def flags(self, rev):
234 234 validaterev(rev)
235 235
236 236 return self._indexbyrev[rev][b'flags']
237 237
238 238 def deltaparent(self, rev):
239 239 validaterev(rev)
240 240
241 241 p1node = self.parents(self.node(rev))[0]
242 242 return self.rev(p1node)
243 243
244 def candelta(self, baserev, rev):
244 def _candelta(self, baserev, rev):
245 245 validaterev(baserev)
246 246 validaterev(rev)
247 247
248 248 if ((self.flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)
249 249 or (self.flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)):
250 250 return False
251 251
252 252 return True
253 253
254 254 def rawsize(self, rev):
255 255 validaterev(rev)
256 256 node = self.node(rev)
257 257 return len(self.revision(node, raw=True))
258 258
259 259 def _processflags(self, text, flags, operation, raw=False):
260 260 if flags == 0:
261 261 return text, True
262 262
263 263 if flags & ~revlog.REVIDX_KNOWN_FLAGS:
264 264 raise error.RevlogError(_("incompatible revision flag '%#x'") %
265 265 (flags & ~revlog.REVIDX_KNOWN_FLAGS))
266 266
267 267 validatehash = True
268 268 # Depending on the operation (read or write), the order might be
269 269 # reversed due to non-commutative transforms.
270 270 orderedflags = revlog.REVIDX_FLAGS_ORDER
271 271 if operation == 'write':
272 272 orderedflags = reversed(orderedflags)
273 273
274 274 for flag in orderedflags:
275 275 # If a flagprocessor has been registered for a known flag, apply the
276 276 # related operation transform and update result tuple.
277 277 if flag & flags:
278 278 vhash = True
279 279
280 280 if flag not in revlog._flagprocessors:
281 281 message = _("missing processor for flag '%#x'") % (flag)
282 282 raise revlog.RevlogError(message)
283 283
284 284 processor = revlog._flagprocessors[flag]
285 285 if processor is not None:
286 286 readtransform, writetransform, rawtransform = processor
287 287
288 288 if raw:
289 289 vhash = rawtransform(self, text)
290 290 elif operation == 'read':
291 291 text, vhash = readtransform(self, text)
292 292 else: # write operation
293 293 text, vhash = writetransform(self, text)
294 294 validatehash = validatehash and vhash
295 295
296 296 return text, validatehash
297 297
298 298 def checkhash(self, text, node, p1=None, p2=None, rev=None):
299 299 if p1 is None and p2 is None:
300 300 p1, p2 = self.parents(node)
301 301 if node != revlog.hash(text, p1, p2):
302 302 raise error.RevlogError(_("integrity check failed on %s") %
303 303 self._path)
304 304
305 305 def revision(self, node, raw=False):
306 306 validatenode(node)
307 307
308 308 if node == nullid:
309 309 return b''
310 310
311 311 rev = self.rev(node)
312 312 flags = self.flags(rev)
313 313
314 314 path = b'/'.join([self._storepath, hex(node)])
315 315 rawtext = self._svfs.read(path)
316 316
317 317 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
318 318 if validatehash:
319 319 self.checkhash(text, node, rev=rev)
320 320
321 321 return text
322 322
323 323 def read(self, node):
324 324 validatenode(node)
325 325
326 326 revision = self.revision(node)
327 327
328 328 if not revision.startswith(b'\1\n'):
329 329 return revision
330 330
331 331 start = revision.index(b'\1\n', 2)
332 332 return revision[start + 2:]
333 333
334 334 def renamed(self, node):
335 335 validatenode(node)
336 336
337 337 if self.parents(node)[0] != nullid:
338 338 return False
339 339
340 340 fulltext = self.revision(node)
341 341 m = revlog.parsemeta(fulltext)[0]
342 342
343 343 if m and 'copy' in m:
344 344 return m['copy'], bin(m['copyrev'])
345 345
346 346 return False
347 347
348 348 def cmp(self, node, text):
349 349 validatenode(node)
350 350
351 351 t = text
352 352
353 353 if text.startswith(b'\1\n'):
354 354 t = b'\1\n\1\n' + text
355 355
356 356 p1, p2 = self.parents(node)
357 357
358 358 if revlog.hash(t, p1, p2) == node:
359 359 return False
360 360
361 361 if self.iscensored(self.rev(node)):
362 362 return text != b''
363 363
364 364 if self.renamed(node):
365 365 t2 = self.read(node)
366 366 return t2 != text
367 367
368 368 return True
369 369
370 370 def size(self, rev):
371 371 validaterev(rev)
372 372
373 373 node = self._indexbyrev[rev][b'node']
374 374
375 375 if self.renamed(node):
376 376 return len(self.read(node))
377 377
378 378 if self.iscensored(rev):
379 379 return 0
380 380
381 381 return len(self.revision(node))
382 382
383 383 def iscensored(self, rev):
384 384 validaterev(rev)
385 385
386 386 return self.flags(rev) & revlog.REVIDX_ISCENSORED
387 387
388 388 def commonancestorsheads(self, a, b):
389 389 validatenode(a)
390 390 validatenode(b)
391 391
392 392 a = self.rev(a)
393 393 b = self.rev(b)
394 394
395 395 ancestors = ancestor.commonancestorsheads(self.parentrevs, a, b)
396 396 return pycompat.maplist(self.node, ancestors)
397 397
398 398 def descendants(self, revs):
399 399 # This is a copy of revlog.descendants()
400 400 first = min(revs)
401 401 if first == nullrev:
402 402 for i in self:
403 403 yield i
404 404 return
405 405
406 406 seen = set(revs)
407 407 for i in self.revs(start=first + 1):
408 408 for x in self.parentrevs(i):
409 409 if x != nullrev and x in seen:
410 410 seen.add(i)
411 411 yield i
412 412 break
413 413
414 414 # Required by verify.
415 415 def files(self):
416 416 entries = self._svfs.listdir(self._storepath)
417 417
418 418 # Strip out undo.backup.* files created as part of transaction
419 419 # recording.
420 420 entries = [f for f in entries if not f.startswith('undo.backup.')]
421 421
422 422 return [b'/'.join((self._storepath, f)) for f in entries]
423 423
424 424 # Required by verify.
425 425 def checksize(self):
426 426 return 0, 0
427 427
428 428 def add(self, text, meta, transaction, linkrev, p1, p2):
429 429 if meta or text.startswith(b'\1\n'):
430 430 text = revlog.packmeta(meta, text)
431 431
432 432 return self.addrevision(text, transaction, linkrev, p1, p2)
433 433
434 434 def addrevision(self, text, transaction, linkrev, p1, p2, node=None,
435 435 flags=revlog.REVIDX_DEFAULT_FLAGS, cachedelta=None):
436 436 validatenode(p1)
437 437 validatenode(p2)
438 438
439 439 if flags:
440 440 node = node or revlog.hash(text, p1, p2)
441 441
442 442 rawtext, validatehash = self._processflags(text, flags, 'write')
443 443
444 444 node = node or revlog.hash(text, p1, p2)
445 445
446 446 if node in self._indexbynode:
447 447 return node
448 448
449 449 if validatehash:
450 450 self.checkhash(rawtext, node, p1=p1, p2=p2)
451 451
452 452 return self._addrawrevision(node, rawtext, transaction, linkrev, p1, p2,
453 453 flags)
454 454
455 455 def _addrawrevision(self, node, rawtext, transaction, link, p1, p2, flags):
456 456 transaction.addbackup(self._indexpath)
457 457
458 458 path = b'/'.join([self._storepath, hex(node)])
459 459
460 460 self._svfs.write(path, rawtext)
461 461
462 462 self._indexdata.append({
463 463 b'node': node,
464 464 b'p1': p1,
465 465 b'p2': p2,
466 466 b'linkrev': link,
467 467 b'flags': flags,
468 468 })
469 469
470 470 self._reflectindexupdate()
471 471
472 472 return node
473 473
474 474 def _reflectindexupdate(self):
475 475 self._refreshindex()
476 476 self._svfs.write(self._indexpath, cbor.dumps(self._indexdata))
477 477
478 478 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
479 479 nodes = []
480 480
481 481 transaction.addbackup(self._indexpath)
482 482
483 483 for node, p1, p2, linknode, deltabase, delta, flags in deltas:
484 484 linkrev = linkmapper(linknode)
485 485 flags = flags or revlog.REVIDX_DEFAULT_FLAGS
486 486
487 487 nodes.append(node)
488 488
489 489 if node in self._indexbynode:
490 490 continue
491 491
492 492 # Need to resolve the fulltext from the delta base.
493 493 if deltabase == nullid:
494 494 text = mdiff.patch(b'', delta)
495 495 else:
496 496 text = mdiff.patch(self.revision(deltabase), delta)
497 497
498 498 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
499 499 flags)
500 500
501 501 if addrevisioncb:
502 502 addrevisioncb(self, node)
503 503
504 504 return nodes
505 505
506 506 def revdiff(self, rev1, rev2):
507 507 validaterev(rev1)
508 508 validaterev(rev2)
509 509
510 510 node1 = self.node(rev1)
511 511 node2 = self.node(rev2)
512 512
513 513 return mdiff.textdiff(self.revision(node1, raw=True),
514 514 self.revision(node2, raw=True))
515 515
516 516 def emitrevisiondeltas(self, requests):
517 517 for request in requests:
518 518 node = request.node
519 519 rev = self.rev(node)
520 520
521 521 if request.basenode == nullid:
522 522 baserev = nullrev
523 523 elif request.basenode is not None:
524 524 baserev = self.rev(request.basenode)
525 525 else:
526 526 # This is a test extension and we can do simple things
527 527 # for choosing a delta parent.
528 528 baserev = self.deltaparent(rev)
529 529
530 if baserev != nullrev and not self.candelta(baserev, rev):
530 if baserev != nullrev and not self._candelta(baserev, rev):
531 531 baserev = nullrev
532 532
533 533 revision = None
534 534 delta = None
535 535 baserevisionsize = None
536 536
537 537 if self.iscensored(baserev) or self.iscensored(rev):
538 538 try:
539 539 revision = self.revision(node, raw=True)
540 540 except error.CensoredNodeError as e:
541 541 revision = e.tombstone
542 542
543 543 if baserev != nullrev:
544 544 baserevisionsize = self.rawsize(baserev)
545 545
546 546 elif baserev == nullrev:
547 547 revision = self.revision(node, raw=True)
548 548 else:
549 549 delta = self.revdiff(baserev, rev)
550 550
551 551 extraflags = revlog.REVIDX_ELLIPSIS if request.ellipsis else 0
552 552
553 553 yield simplestorerevisiondelta(
554 554 node=node,
555 555 p1node=request.p1node,
556 556 p2node=request.p2node,
557 557 linknode=request.linknode,
558 558 basenode=self.node(baserev),
559 559 flags=self.flags(rev) | extraflags,
560 560 baserevisionsize=baserevisionsize,
561 561 revision=revision,
562 562 delta=delta)
563 563
564 564 def headrevs(self):
565 565 # Assume all revisions are heads by default.
566 566 revishead = {rev: True for rev in self._indexbyrev}
567 567
568 568 for rev, entry in self._indexbyrev.items():
569 569 # Unset head flag for all seen parents.
570 570 revishead[self.rev(entry[b'p1'])] = False
571 571 revishead[self.rev(entry[b'p2'])] = False
572 572
573 573 return [rev for rev, ishead in sorted(revishead.items())
574 574 if ishead]
575 575
576 576 def heads(self, start=None, stop=None):
577 577 # This is copied from revlog.py.
578 578 if start is None and stop is None:
579 579 if not len(self):
580 580 return [nullid]
581 581 return [self.node(r) for r in self.headrevs()]
582 582
583 583 if start is None:
584 584 start = nullid
585 585 if stop is None:
586 586 stop = []
587 587 stoprevs = set([self.rev(n) for n in stop])
588 588 startrev = self.rev(start)
589 589 reachable = {startrev}
590 590 heads = {startrev}
591 591
592 592 parentrevs = self.parentrevs
593 593 for r in self.revs(start=startrev + 1):
594 594 for p in parentrevs(r):
595 595 if p in reachable:
596 596 if r not in stoprevs:
597 597 reachable.add(r)
598 598 heads.add(r)
599 599 if p in heads and p not in stoprevs:
600 600 heads.remove(p)
601 601
602 602 return [self.node(r) for r in heads]
603 603
604 604 def children(self, node):
605 605 validatenode(node)
606 606
607 607 # This is a copy of revlog.children().
608 608 c = []
609 609 p = self.rev(node)
610 610 for r in self.revs(start=p + 1):
611 611 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
612 612 if prevs:
613 613 for pr in prevs:
614 614 if pr == p:
615 615 c.append(self.node(r))
616 616 elif p == nullrev:
617 617 c.append(self.node(r))
618 618 return c
619 619
620 620 def getstrippoint(self, minlink):
621 621
622 622 # This is largely a copy of revlog.getstrippoint().
623 623 brokenrevs = set()
624 624 strippoint = len(self)
625 625
626 626 heads = {}
627 627 futurelargelinkrevs = set()
628 628 for head in self.headrevs():
629 629 headlinkrev = self.linkrev(head)
630 630 heads[head] = headlinkrev
631 631 if headlinkrev >= minlink:
632 632 futurelargelinkrevs.add(headlinkrev)
633 633
634 634 # This algorithm involves walking down the rev graph, starting at the
635 635 # heads. Since the revs are topologically sorted according to linkrev,
636 636 # once all head linkrevs are below the minlink, we know there are
637 637 # no more revs that could have a linkrev greater than minlink.
638 638 # So we can stop walking.
639 639 while futurelargelinkrevs:
640 640 strippoint -= 1
641 641 linkrev = heads.pop(strippoint)
642 642
643 643 if linkrev < minlink:
644 644 brokenrevs.add(strippoint)
645 645 else:
646 646 futurelargelinkrevs.remove(linkrev)
647 647
648 648 for p in self.parentrevs(strippoint):
649 649 if p != nullrev:
650 650 plinkrev = self.linkrev(p)
651 651 heads[p] = plinkrev
652 652 if plinkrev >= minlink:
653 653 futurelargelinkrevs.add(plinkrev)
654 654
655 655 return strippoint, brokenrevs
656 656
657 657 def strip(self, minlink, transaction):
658 658 if not len(self):
659 659 return
660 660
661 661 rev, _ignored = self.getstrippoint(minlink)
662 662 if rev == len(self):
663 663 return
664 664
665 665 # Purge index data starting at the requested revision.
666 666 self._indexdata[rev:] = []
667 667 self._reflectindexupdate()
668 668
669 669 def issimplestorefile(f, kind, st):
670 670 if kind != stat.S_IFREG:
671 671 return False
672 672
673 673 if store.isrevlog(f, kind, st):
674 674 return False
675 675
676 676 # Ignore transaction undo files.
677 677 if f.startswith('undo.'):
678 678 return False
679 679
680 680 # Otherwise assume it belongs to the simple store.
681 681 return True
682 682
683 683 class simplestore(store.encodedstore):
684 684 def datafiles(self):
685 685 for x in super(simplestore, self).datafiles():
686 686 yield x
687 687
688 688 # Supplement with non-revlog files.
689 689 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
690 690
691 691 for unencoded, encoded, size in extrafiles:
692 692 try:
693 693 unencoded = store.decodefilename(unencoded)
694 694 except KeyError:
695 695 unencoded = None
696 696
697 697 yield unencoded, encoded, size
698 698
699 699 def reposetup(ui, repo):
700 700 if not repo.local():
701 701 return
702 702
703 703 if isinstance(repo, bundlerepo.bundlerepository):
704 704 raise error.Abort(_('cannot use simple store with bundlerepo'))
705 705
706 706 class simplestorerepo(repo.__class__):
707 707 def file(self, f):
708 708 return filestorage(self.svfs, f)
709 709
710 710 repo.__class__ = simplestorerepo
711 711
712 712 def featuresetup(ui, supported):
713 713 supported.add(REQUIREMENT)
714 714
715 715 def newreporequirements(orig, repo):
716 716 """Modifies default requirements for new repos to use the simple store."""
717 717 requirements = orig(repo)
718 718
719 719 # These requirements are only used to affect creation of the store
720 720 # object. We have our own store. So we can remove them.
721 721 # TODO do this once we feel like taking the test hit.
722 722 #if 'fncache' in requirements:
723 723 # requirements.remove('fncache')
724 724 #if 'dotencode' in requirements:
725 725 # requirements.remove('dotencode')
726 726
727 727 requirements.add(REQUIREMENT)
728 728
729 729 return requirements
730 730
731 731 def makestore(orig, requirements, path, vfstype):
732 732 if REQUIREMENT not in requirements:
733 733 return orig(requirements, path, vfstype)
734 734
735 735 return simplestore(path, vfstype)
736 736
737 737 def verifierinit(orig, self, *args, **kwargs):
738 738 orig(self, *args, **kwargs)
739 739
740 740 # We don't care that files in the store don't align with what is
741 741 # advertised. So suppress these warnings.
742 742 self.warnorphanstorefiles = False
743 743
744 744 def extsetup(ui):
745 745 localrepo.featuresetupfuncs.add(featuresetup)
746 746
747 747 extensions.wrapfunction(localrepo, 'newreporequirements',
748 748 newreporequirements)
749 749 extensions.wrapfunction(store, 'store', makestore)
750 750 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
General Comments 0
You need to be logged in to leave comments. Login now