##// END OF EJS Templates
filelog: stop proxying headrevs() (API)...
Gregory Szorc -
r39821:979e9f12 default
parent child Browse files
Show More
@@ -1,278 +1,274
1 1 # filelog.py - file history class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from . import (
11 11 error,
12 12 repository,
13 13 revlog,
14 14 )
15 15 from .utils import (
16 16 interfaceutil,
17 17 )
18 18
19 19 @interfaceutil.implementer(repository.ifilestorage)
20 20 class filelog(object):
21 21 def __init__(self, opener, path):
22 22 self._revlog = revlog.revlog(opener,
23 23 '/'.join(('data', path + '.i')),
24 24 censorable=True)
25 25 # Full name of the user visible file, relative to the repository root.
26 26 # Used by LFS.
27 27 self.filename = path
28 28 # Used by repo upgrade.
29 29 self.index = self._revlog.index
30 30 # Used by verify.
31 31 self.version = self._revlog.version
32 32 # Used by changegroup generation.
33 33 self._generaldelta = self._revlog._generaldelta
34 34
35 35 def __len__(self):
36 36 return len(self._revlog)
37 37
38 38 def __iter__(self):
39 39 return self._revlog.__iter__()
40 40
41 41 def revs(self, start=0, stop=None):
42 42 return self._revlog.revs(start=start, stop=stop)
43 43
44 44 def parents(self, node):
45 45 return self._revlog.parents(node)
46 46
47 47 def parentrevs(self, rev):
48 48 return self._revlog.parentrevs(rev)
49 49
50 50 def rev(self, node):
51 51 return self._revlog.rev(node)
52 52
53 53 def node(self, rev):
54 54 return self._revlog.node(rev)
55 55
56 56 def lookup(self, node):
57 57 return self._revlog.lookup(node)
58 58
59 59 def linkrev(self, rev):
60 60 return self._revlog.linkrev(rev)
61 61
62 62 # Used by LFS, verify.
63 63 def flags(self, rev):
64 64 return self._revlog.flags(rev)
65 65
66 66 def commonancestorsheads(self, node1, node2):
67 67 return self._revlog.commonancestorsheads(node1, node2)
68 68
69 69 # Used by dagop.blockdescendants().
70 70 def descendants(self, revs):
71 71 return self._revlog.descendants(revs)
72 72
73 # Used by hgweb.
74 def headrevs(self):
75 return self._revlog.headrevs()
76
77 73 def heads(self, start=None, stop=None):
78 74 return self._revlog.heads(start, stop)
79 75
80 76 # Used by hgweb, children extension.
81 77 def children(self, node):
82 78 return self._revlog.children(node)
83 79
84 80 def deltaparent(self, rev):
85 81 return self._revlog.deltaparent(rev)
86 82
87 83 def iscensored(self, rev):
88 84 return self._revlog.iscensored(rev)
89 85
90 86 # Used by verify.
91 87 def rawsize(self, rev):
92 88 return self._revlog.rawsize(rev)
93 89
94 90 # Might be unused.
95 91 def checkhash(self, text, node, p1=None, p2=None, rev=None):
96 92 return self._revlog.checkhash(text, node, p1=p1, p2=p2, rev=rev)
97 93
98 94 def revision(self, node, _df=None, raw=False):
99 95 return self._revlog.revision(node, _df=_df, raw=raw)
100 96
101 97 def revdiff(self, rev1, rev2):
102 98 return self._revlog.revdiff(rev1, rev2)
103 99
104 100 def emitrevisiondeltas(self, requests):
105 101 return self._revlog.emitrevisiondeltas(requests)
106 102
107 103 def addrevision(self, revisiondata, transaction, linkrev, p1, p2,
108 104 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS,
109 105 cachedelta=None):
110 106 return self._revlog.addrevision(revisiondata, transaction, linkrev,
111 107 p1, p2, node=node, flags=flags,
112 108 cachedelta=cachedelta)
113 109
114 110 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
115 111 return self._revlog.addgroup(deltas, linkmapper, transaction,
116 112 addrevisioncb=addrevisioncb)
117 113
118 114 def getstrippoint(self, minlink):
119 115 return self._revlog.getstrippoint(minlink)
120 116
121 117 def strip(self, minlink, transaction):
122 118 return self._revlog.strip(minlink, transaction)
123 119
124 120 def censorrevision(self, tr, node, tombstone=b''):
125 121 return self._revlog.censorrevision(node, tombstone=tombstone)
126 122
127 123 def files(self):
128 124 return self._revlog.files()
129 125
130 126 # Used by verify.
131 127 def checksize(self):
132 128 return self._revlog.checksize()
133 129
134 130 def read(self, node):
135 131 t = self.revision(node)
136 132 if not t.startswith('\1\n'):
137 133 return t
138 134 s = t.index('\1\n', 2)
139 135 return t[s + 2:]
140 136
141 137 def add(self, text, meta, transaction, link, p1=None, p2=None):
142 138 if meta or text.startswith('\1\n'):
143 139 text = revlog.packmeta(meta, text)
144 140 return self.addrevision(text, transaction, link, p1, p2)
145 141
146 142 def renamed(self, node):
147 143 if self.parents(node)[0] != revlog.nullid:
148 144 return False
149 145 t = self.revision(node)
150 146 m = revlog.parsemeta(t)[0]
151 147 # copy and copyrev occur in pairs. In rare cases due to bugs,
152 148 # one can occur without the other.
153 149 if m and "copy" in m and "copyrev" in m:
154 150 return (m["copy"], revlog.bin(m["copyrev"]))
155 151 return False
156 152
157 153 def size(self, rev):
158 154 """return the size of a given revision"""
159 155
160 156 # for revisions with renames, we have to go the slow way
161 157 node = self.node(rev)
162 158 if self.renamed(node):
163 159 return len(self.read(node))
164 160 if self.iscensored(rev):
165 161 return 0
166 162
167 163 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
168 164 return self._revlog.size(rev)
169 165
170 166 def cmp(self, node, text):
171 167 """compare text with a given file revision
172 168
173 169 returns True if text is different than what is stored.
174 170 """
175 171
176 172 t = text
177 173 if text.startswith('\1\n'):
178 174 t = '\1\n\1\n' + text
179 175
180 176 samehashes = not self._revlog.cmp(node, t)
181 177 if samehashes:
182 178 return False
183 179
184 180 # censored files compare against the empty file
185 181 if self.iscensored(self.rev(node)):
186 182 return text != ''
187 183
188 184 # renaming a file produces a different hash, even if the data
189 185 # remains unchanged. Check if it's the case (slow):
190 186 if self.renamed(node):
191 187 t2 = self.read(node)
192 188 return t2 != text
193 189
194 190 return True
195 191
196 192 # TODO these aren't part of the interface and aren't internal methods.
197 193 # Callers should be fixed to not use them.
198 194
199 195 # Used by LFS.
200 196 @property
201 197 def filename(self):
202 198 return self._revlog.filename
203 199
204 200 @filename.setter
205 201 def filename(self, value):
206 202 self._revlog.filename = value
207 203
208 204 # Used by bundlefilelog, unionfilelog.
209 205 @property
210 206 def indexfile(self):
211 207 return self._revlog.indexfile
212 208
213 209 @indexfile.setter
214 210 def indexfile(self, value):
215 211 self._revlog.indexfile = value
216 212
217 213 # Used by LFS, repo upgrade.
218 214 @property
219 215 def opener(self):
220 216 return self._revlog.opener
221 217
222 218 # Used by repo upgrade.
223 219 def clone(self, tr, destrevlog, **kwargs):
224 220 if not isinstance(destrevlog, filelog):
225 221 raise error.ProgrammingError('expected filelog to clone()')
226 222
227 223 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
228 224
229 225 class narrowfilelog(filelog):
230 226 """Filelog variation to be used with narrow stores."""
231 227
232 228 def __init__(self, opener, path, narrowmatch):
233 229 super(narrowfilelog, self).__init__(opener, path)
234 230 self._narrowmatch = narrowmatch
235 231
236 232 def renamed(self, node):
237 233 res = super(narrowfilelog, self).renamed(node)
238 234
239 235 # Renames that come from outside the narrowspec are problematic
240 236 # because we may lack the base text for the rename. This can result
241 237 # in code attempting to walk the ancestry or compute a diff
242 238 # encountering a missing revision. We address this by silently
243 239 # removing rename metadata if the source file is outside the
244 240 # narrow spec.
245 241 #
246 242 # A better solution would be to see if the base revision is available,
247 243 # rather than assuming it isn't.
248 244 #
249 245 # An even better solution would be to teach all consumers of rename
250 246 # metadata that the base revision may not be available.
251 247 #
252 248 # TODO consider better ways of doing this.
253 249 if res and not self._narrowmatch(res[0]):
254 250 return None
255 251
256 252 return res
257 253
258 254 def size(self, rev):
259 255 # Because we have a custom renamed() that may lie, we need to call
260 256 # the base renamed() to report accurate results.
261 257 node = self.node(rev)
262 258 if super(narrowfilelog, self).renamed(node):
263 259 return len(self.read(node))
264 260 else:
265 261 return super(narrowfilelog, self).size(rev)
266 262
267 263 def cmp(self, node, text):
268 264 different = super(narrowfilelog, self).cmp(node, text)
269 265
270 266 # Because renamed() may lie, we may get false positives for
271 267 # different content. Check for this by comparing against the original
272 268 # renamed() implementation.
273 269 if different:
274 270 if super(narrowfilelog, self).renamed(node):
275 271 t2 = self.read(node)
276 272 return t2 != text
277 273
278 274 return different
@@ -1,1602 +1,1594
1 1 # repository.py - Interfaces and base classes for repositories and peers.
2 2 #
3 3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from .i18n import _
11 11 from . import (
12 12 error,
13 13 )
14 14 from .utils import (
15 15 interfaceutil,
16 16 )
17 17
18 18 # When narrowing is finalized and no longer subject to format changes,
19 19 # we should move this to just "narrow" or similar.
20 20 NARROW_REQUIREMENT = 'narrowhg-experimental'
21 21
22 22 class ipeerconnection(interfaceutil.Interface):
23 23 """Represents a "connection" to a repository.
24 24
25 25 This is the base interface for representing a connection to a repository.
26 26 It holds basic properties and methods applicable to all peer types.
27 27
28 28 This is not a complete interface definition and should not be used
29 29 outside of this module.
30 30 """
31 31 ui = interfaceutil.Attribute("""ui.ui instance""")
32 32
33 33 def url():
34 34 """Returns a URL string representing this peer.
35 35
36 36 Currently, implementations expose the raw URL used to construct the
37 37 instance. It may contain credentials as part of the URL. The
38 38 expectations of the value aren't well-defined and this could lead to
39 39 data leakage.
40 40
41 41 TODO audit/clean consumers and more clearly define the contents of this
42 42 value.
43 43 """
44 44
45 45 def local():
46 46 """Returns a local repository instance.
47 47
48 48 If the peer represents a local repository, returns an object that
49 49 can be used to interface with it. Otherwise returns ``None``.
50 50 """
51 51
52 52 def peer():
53 53 """Returns an object conforming to this interface.
54 54
55 55 Most implementations will ``return self``.
56 56 """
57 57
58 58 def canpush():
59 59 """Returns a boolean indicating if this peer can be pushed to."""
60 60
61 61 def close():
62 62 """Close the connection to this peer.
63 63
64 64 This is called when the peer will no longer be used. Resources
65 65 associated with the peer should be cleaned up.
66 66 """
67 67
68 68 class ipeercapabilities(interfaceutil.Interface):
69 69 """Peer sub-interface related to capabilities."""
70 70
71 71 def capable(name):
72 72 """Determine support for a named capability.
73 73
74 74 Returns ``False`` if capability not supported.
75 75
76 76 Returns ``True`` if boolean capability is supported. Returns a string
77 77 if capability support is non-boolean.
78 78
79 79 Capability strings may or may not map to wire protocol capabilities.
80 80 """
81 81
82 82 def requirecap(name, purpose):
83 83 """Require a capability to be present.
84 84
85 85 Raises a ``CapabilityError`` if the capability isn't present.
86 86 """
87 87
88 88 class ipeercommands(interfaceutil.Interface):
89 89 """Client-side interface for communicating over the wire protocol.
90 90
91 91 This interface is used as a gateway to the Mercurial wire protocol.
92 92 methods commonly call wire protocol commands of the same name.
93 93 """
94 94
95 95 def branchmap():
96 96 """Obtain heads in named branches.
97 97
98 98 Returns a dict mapping branch name to an iterable of nodes that are
99 99 heads on that branch.
100 100 """
101 101
102 102 def capabilities():
103 103 """Obtain capabilities of the peer.
104 104
105 105 Returns a set of string capabilities.
106 106 """
107 107
108 108 def clonebundles():
109 109 """Obtains the clone bundles manifest for the repo.
110 110
111 111 Returns the manifest as unparsed bytes.
112 112 """
113 113
114 114 def debugwireargs(one, two, three=None, four=None, five=None):
115 115 """Used to facilitate debugging of arguments passed over the wire."""
116 116
117 117 def getbundle(source, **kwargs):
118 118 """Obtain remote repository data as a bundle.
119 119
120 120 This command is how the bulk of repository data is transferred from
121 121 the peer to the local repository
122 122
123 123 Returns a generator of bundle data.
124 124 """
125 125
126 126 def heads():
127 127 """Determine all known head revisions in the peer.
128 128
129 129 Returns an iterable of binary nodes.
130 130 """
131 131
132 132 def known(nodes):
133 133 """Determine whether multiple nodes are known.
134 134
135 135 Accepts an iterable of nodes whose presence to check for.
136 136
137 137 Returns an iterable of booleans indicating of the corresponding node
138 138 at that index is known to the peer.
139 139 """
140 140
141 141 def listkeys(namespace):
142 142 """Obtain all keys in a pushkey namespace.
143 143
144 144 Returns an iterable of key names.
145 145 """
146 146
147 147 def lookup(key):
148 148 """Resolve a value to a known revision.
149 149
150 150 Returns a binary node of the resolved revision on success.
151 151 """
152 152
153 153 def pushkey(namespace, key, old, new):
154 154 """Set a value using the ``pushkey`` protocol.
155 155
156 156 Arguments correspond to the pushkey namespace and key to operate on and
157 157 the old and new values for that key.
158 158
159 159 Returns a string with the peer result. The value inside varies by the
160 160 namespace.
161 161 """
162 162
163 163 def stream_out():
164 164 """Obtain streaming clone data.
165 165
166 166 Successful result should be a generator of data chunks.
167 167 """
168 168
169 169 def unbundle(bundle, heads, url):
170 170 """Transfer repository data to the peer.
171 171
172 172 This is how the bulk of data during a push is transferred.
173 173
174 174 Returns the integer number of heads added to the peer.
175 175 """
176 176
177 177 class ipeerlegacycommands(interfaceutil.Interface):
178 178 """Interface for implementing support for legacy wire protocol commands.
179 179
180 180 Wire protocol commands transition to legacy status when they are no longer
181 181 used by modern clients. To facilitate identifying which commands are
182 182 legacy, the interfaces are split.
183 183 """
184 184
185 185 def between(pairs):
186 186 """Obtain nodes between pairs of nodes.
187 187
188 188 ``pairs`` is an iterable of node pairs.
189 189
190 190 Returns an iterable of iterables of nodes corresponding to each
191 191 requested pair.
192 192 """
193 193
194 194 def branches(nodes):
195 195 """Obtain ancestor changesets of specific nodes back to a branch point.
196 196
197 197 For each requested node, the peer finds the first ancestor node that is
198 198 a DAG root or is a merge.
199 199
200 200 Returns an iterable of iterables with the resolved values for each node.
201 201 """
202 202
203 203 def changegroup(nodes, source):
204 204 """Obtain a changegroup with data for descendants of specified nodes."""
205 205
206 206 def changegroupsubset(bases, heads, source):
207 207 pass
208 208
209 209 class ipeercommandexecutor(interfaceutil.Interface):
210 210 """Represents a mechanism to execute remote commands.
211 211
212 212 This is the primary interface for requesting that wire protocol commands
213 213 be executed. Instances of this interface are active in a context manager
214 214 and have a well-defined lifetime. When the context manager exits, all
215 215 outstanding requests are waited on.
216 216 """
217 217
218 218 def callcommand(name, args):
219 219 """Request that a named command be executed.
220 220
221 221 Receives the command name and a dictionary of command arguments.
222 222
223 223 Returns a ``concurrent.futures.Future`` that will resolve to the
224 224 result of that command request. That exact value is left up to
225 225 the implementation and possibly varies by command.
226 226
227 227 Not all commands can coexist with other commands in an executor
228 228 instance: it depends on the underlying wire protocol transport being
229 229 used and the command itself.
230 230
231 231 Implementations MAY call ``sendcommands()`` automatically if the
232 232 requested command can not coexist with other commands in this executor.
233 233
234 234 Implementations MAY call ``sendcommands()`` automatically when the
235 235 future's ``result()`` is called. So, consumers using multiple
236 236 commands with an executor MUST ensure that ``result()`` is not called
237 237 until all command requests have been issued.
238 238 """
239 239
240 240 def sendcommands():
241 241 """Trigger submission of queued command requests.
242 242
243 243 Not all transports submit commands as soon as they are requested to
244 244 run. When called, this method forces queued command requests to be
245 245 issued. It will no-op if all commands have already been sent.
246 246
247 247 When called, no more new commands may be issued with this executor.
248 248 """
249 249
250 250 def close():
251 251 """Signal that this command request is finished.
252 252
253 253 When called, no more new commands may be issued. All outstanding
254 254 commands that have previously been issued are waited on before
255 255 returning. This not only includes waiting for the futures to resolve,
256 256 but also waiting for all response data to arrive. In other words,
257 257 calling this waits for all on-wire state for issued command requests
258 258 to finish.
259 259
260 260 When used as a context manager, this method is called when exiting the
261 261 context manager.
262 262
263 263 This method may call ``sendcommands()`` if there are buffered commands.
264 264 """
265 265
266 266 class ipeerrequests(interfaceutil.Interface):
267 267 """Interface for executing commands on a peer."""
268 268
269 269 def commandexecutor():
270 270 """A context manager that resolves to an ipeercommandexecutor.
271 271
272 272 The object this resolves to can be used to issue command requests
273 273 to the peer.
274 274
275 275 Callers should call its ``callcommand`` method to issue command
276 276 requests.
277 277
278 278 A new executor should be obtained for each distinct set of commands
279 279 (possibly just a single command) that the consumer wants to execute
280 280 as part of a single operation or round trip. This is because some
281 281 peers are half-duplex and/or don't support persistent connections.
282 282 e.g. in the case of HTTP peers, commands sent to an executor represent
283 283 a single HTTP request. While some peers may support multiple command
284 284 sends over the wire per executor, consumers need to code to the least
285 285 capable peer. So it should be assumed that command executors buffer
286 286 called commands until they are told to send them and that each
287 287 command executor could result in a new connection or wire-level request
288 288 being issued.
289 289 """
290 290
291 291 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
292 292 """Unified interface for peer repositories.
293 293
294 294 All peer instances must conform to this interface.
295 295 """
296 296
297 297 @interfaceutil.implementer(ipeerbase)
298 298 class peer(object):
299 299 """Base class for peer repositories."""
300 300
301 301 def capable(self, name):
302 302 caps = self.capabilities()
303 303 if name in caps:
304 304 return True
305 305
306 306 name = '%s=' % name
307 307 for cap in caps:
308 308 if cap.startswith(name):
309 309 return cap[len(name):]
310 310
311 311 return False
312 312
313 313 def requirecap(self, name, purpose):
314 314 if self.capable(name):
315 315 return
316 316
317 317 raise error.CapabilityError(
318 318 _('cannot %s; remote repository does not support the %r '
319 319 'capability') % (purpose, name))
320 320
321 321 class irevisiondelta(interfaceutil.Interface):
322 322 """Represents a delta between one revision and another.
323 323
324 324 Instances convey enough information to allow a revision to be exchanged
325 325 with another repository.
326 326
327 327 Instances represent the fulltext revision data or a delta against
328 328 another revision. Therefore the ``revision`` and ``delta`` attributes
329 329 are mutually exclusive.
330 330
331 331 Typically used for changegroup generation.
332 332 """
333 333
334 334 node = interfaceutil.Attribute(
335 335 """20 byte node of this revision.""")
336 336
337 337 p1node = interfaceutil.Attribute(
338 338 """20 byte node of 1st parent of this revision.""")
339 339
340 340 p2node = interfaceutil.Attribute(
341 341 """20 byte node of 2nd parent of this revision.""")
342 342
343 343 linknode = interfaceutil.Attribute(
344 344 """20 byte node of the changelog revision this node is linked to.""")
345 345
346 346 flags = interfaceutil.Attribute(
347 347 """2 bytes of integer flags that apply to this revision.""")
348 348
349 349 basenode = interfaceutil.Attribute(
350 350 """20 byte node of the revision this data is a delta against.
351 351
352 352 ``nullid`` indicates that the revision is a full revision and not
353 353 a delta.
354 354 """)
355 355
356 356 baserevisionsize = interfaceutil.Attribute(
357 357 """Size of base revision this delta is against.
358 358
359 359 May be ``None`` if ``basenode`` is ``nullid``.
360 360 """)
361 361
362 362 revision = interfaceutil.Attribute(
363 363 """Raw fulltext of revision data for this node.""")
364 364
365 365 delta = interfaceutil.Attribute(
366 366 """Delta between ``basenode`` and ``node``.
367 367
368 368 Stored in the bdiff delta format.
369 369 """)
370 370
371 371 class irevisiondeltarequest(interfaceutil.Interface):
372 372 """Represents a request to generate an ``irevisiondelta``."""
373 373
374 374 node = interfaceutil.Attribute(
375 375 """20 byte node of revision being requested.""")
376 376
377 377 p1node = interfaceutil.Attribute(
378 378 """20 byte node of 1st parent of revision.""")
379 379
380 380 p2node = interfaceutil.Attribute(
381 381 """20 byte node of 2nd parent of revision.""")
382 382
383 383 linknode = interfaceutil.Attribute(
384 384 """20 byte node to store in ``linknode`` attribute.""")
385 385
386 386 basenode = interfaceutil.Attribute(
387 387 """Base revision that delta should be generated against.
388 388
389 389 If ``nullid``, the derived ``irevisiondelta`` should have its
390 390 ``revision`` field populated and no delta should be generated.
391 391
392 392 If ``None``, the delta may be generated against any revision that
393 393 is an ancestor of this revision. Or a full revision may be used.
394 394
395 395 If any other value, the delta should be produced against that
396 396 revision.
397 397 """)
398 398
399 399 ellipsis = interfaceutil.Attribute(
400 400 """Boolean on whether the ellipsis flag should be set.""")
401 401
402 402 class ifilerevisionssequence(interfaceutil.Interface):
403 403 """Contains index data for all revisions of a file.
404 404
405 405 Types implementing this behave like lists of tuples. The index
406 406 in the list corresponds to the revision number. The values contain
407 407 index metadata.
408 408
409 409 The *null* revision (revision number -1) is always the last item
410 410 in the index.
411 411 """
412 412
413 413 def __len__():
414 414 """The total number of revisions."""
415 415
416 416 def __getitem__(rev):
417 417 """Returns the object having a specific revision number.
418 418
419 419 Returns an 8-tuple with the following fields:
420 420
421 421 offset+flags
422 422 Contains the offset and flags for the revision. 64-bit unsigned
423 423 integer where first 6 bytes are the offset and the next 2 bytes
424 424 are flags. The offset can be 0 if it is not used by the store.
425 425 compressed size
426 426 Size of the revision data in the store. It can be 0 if it isn't
427 427 needed by the store.
428 428 uncompressed size
429 429 Fulltext size. It can be 0 if it isn't needed by the store.
430 430 base revision
431 431 Revision number of revision the delta for storage is encoded
432 432 against. -1 indicates not encoded against a base revision.
433 433 link revision
434 434 Revision number of changelog revision this entry is related to.
435 435 p1 revision
436 436 Revision number of 1st parent. -1 if no 1st parent.
437 437 p2 revision
438 438 Revision number of 2nd parent. -1 if no 1st parent.
439 439 node
440 440 Binary node value for this revision number.
441 441
442 442 Negative values should index off the end of the sequence. ``-1``
443 443 should return the null revision. ``-2`` should return the most
444 444 recent revision.
445 445 """
446 446
447 447 def __contains__(rev):
448 448 """Whether a revision number exists."""
449 449
450 450 def insert(self, i, entry):
451 451 """Add an item to the index at specific revision."""
452 452
453 453 class ifileindex(interfaceutil.Interface):
454 454 """Storage interface for index data of a single file.
455 455
456 456 File storage data is divided into index metadata and data storage.
457 457 This interface defines the index portion of the interface.
458 458
459 459 The index logically consists of:
460 460
461 461 * A mapping between revision numbers and nodes.
462 462 * DAG data (storing and querying the relationship between nodes).
463 463 * Metadata to facilitate storage.
464 464 """
465 465 index = interfaceutil.Attribute(
466 466 """An ``ifilerevisionssequence`` instance.""")
467 467
468 468 def __len__():
469 469 """Obtain the number of revisions stored for this file."""
470 470
471 471 def __iter__():
472 472 """Iterate over revision numbers for this file."""
473 473
474 474 def revs(start=0, stop=None):
475 475 """Iterate over revision numbers for this file, with control."""
476 476
477 477 def parents(node):
478 478 """Returns a 2-tuple of parent nodes for a revision.
479 479
480 480 Values will be ``nullid`` if the parent is empty.
481 481 """
482 482
483 483 def parentrevs(rev):
484 484 """Like parents() but operates on revision numbers."""
485 485
486 486 def rev(node):
487 487 """Obtain the revision number given a node.
488 488
489 489 Raises ``error.LookupError`` if the node is not known.
490 490 """
491 491
492 492 def node(rev):
493 493 """Obtain the node value given a revision number.
494 494
495 495 Raises ``IndexError`` if the node is not known.
496 496 """
497 497
498 498 def lookup(node):
499 499 """Attempt to resolve a value to a node.
500 500
501 501 Value can be a binary node, hex node, revision number, or a string
502 502 that can be converted to an integer.
503 503
504 504 Raises ``error.LookupError`` if a node could not be resolved.
505 505 """
506 506
507 507 def linkrev(rev):
508 508 """Obtain the changeset revision number a revision is linked to."""
509 509
510 510 def flags(rev):
511 511 """Obtain flags used to affect storage of a revision."""
512 512
513 513 def iscensored(rev):
514 514 """Return whether a revision's content has been censored."""
515 515
516 516 def commonancestorsheads(node1, node2):
517 517 """Obtain an iterable of nodes containing heads of common ancestors.
518 518
519 519 See ``ancestor.commonancestorsheads()``.
520 520 """
521 521
522 522 def descendants(revs):
523 523 """Obtain descendant revision numbers for a set of revision numbers.
524 524
525 525 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
526 526 """
527 527
528 def headrevs():
529 """Obtain a list of revision numbers that are DAG heads.
530
531 The list is sorted oldest to newest.
532
533 TODO determine if sorting is required.
534 """
535
536 528 def heads(start=None, stop=None):
537 529 """Obtain a list of nodes that are DAG heads, with control.
538 530
539 531 The set of revisions examined can be limited by specifying
540 532 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
541 533 iterable of nodes. DAG traversal starts at earlier revision
542 534 ``start`` and iterates forward until any node in ``stop`` is
543 535 encountered.
544 536 """
545 537
546 538 def children(node):
547 539 """Obtain nodes that are children of a node.
548 540
549 541 Returns a list of nodes.
550 542 """
551 543
552 544 def deltaparent(rev):
553 545 """"Return the revision that is a suitable parent to delta against."""
554 546
555 547 class ifiledata(interfaceutil.Interface):
556 548 """Storage interface for data storage of a specific file.
557 549
558 550 This complements ``ifileindex`` and provides an interface for accessing
559 551 data for a tracked file.
560 552 """
561 553 def rawsize(rev):
562 554 """The size of the fulltext data for a revision as stored."""
563 555
564 556 def size(rev):
565 557 """Obtain the fulltext size of file data.
566 558
567 559 Any metadata is excluded from size measurements. Use ``rawsize()`` if
568 560 metadata size is important.
569 561 """
570 562
571 563 def checkhash(fulltext, node, p1=None, p2=None, rev=None):
572 564 """Validate the stored hash of a given fulltext and node.
573 565
574 566 Raises ``error.StorageError`` is hash validation fails.
575 567 """
576 568
577 569 def revision(node, raw=False):
578 570 """"Obtain fulltext data for a node.
579 571
580 572 By default, any storage transformations are applied before the data
581 573 is returned. If ``raw`` is True, non-raw storage transformations
582 574 are not applied.
583 575
584 576 The fulltext data may contain a header containing metadata. Most
585 577 consumers should use ``read()`` to obtain the actual file data.
586 578 """
587 579
588 580 def read(node):
589 581 """Resolve file fulltext data.
590 582
591 583 This is similar to ``revision()`` except any metadata in the data
592 584 headers is stripped.
593 585 """
594 586
595 587 def renamed(node):
596 588 """Obtain copy metadata for a node.
597 589
598 590 Returns ``False`` if no copy metadata is stored or a 2-tuple of
599 591 (path, node) from which this revision was copied.
600 592 """
601 593
602 594 def cmp(node, fulltext):
603 595 """Compare fulltext to another revision.
604 596
605 597 Returns True if the fulltext is different from what is stored.
606 598
607 599 This takes copy metadata into account.
608 600
609 601 TODO better document the copy metadata and censoring logic.
610 602 """
611 603
612 604 def revdiff(rev1, rev2):
613 605 """Obtain a delta between two revision numbers.
614 606
615 607 Operates on raw data in the store (``revision(node, raw=True)``).
616 608
617 609 The returned data is the result of ``bdiff.bdiff`` on the raw
618 610 revision data.
619 611 """
620 612
621 613 def emitrevisiondeltas(requests):
622 614 """Produce ``irevisiondelta`` from ``irevisiondeltarequest``s.
623 615
624 616 Given an iterable of objects conforming to the ``irevisiondeltarequest``
625 617 interface, emits objects conforming to the ``irevisiondelta``
626 618 interface.
627 619
628 620 This method is a generator.
629 621
630 622 ``irevisiondelta`` should be emitted in the same order of
631 623 ``irevisiondeltarequest`` that was passed in.
632 624
633 625 The emitted objects MUST conform by the results of
634 626 ``irevisiondeltarequest``. Namely, they must respect any requests
635 627 for building a delta from a specific ``basenode`` if defined.
636 628
637 629 When sending deltas, implementations must take into account whether
638 630 the client has the base delta before encoding a delta against that
639 631 revision. A revision encountered previously in ``requests`` is
640 632 always a suitable base revision. An example of a bad delta is a delta
641 633 against a non-ancestor revision. Another example of a bad delta is a
642 634 delta against a censored revision.
643 635 """
644 636
645 637 class ifilemutation(interfaceutil.Interface):
646 638 """Storage interface for mutation events of a tracked file."""
647 639
648 640 def add(filedata, meta, transaction, linkrev, p1, p2):
649 641 """Add a new revision to the store.
650 642
651 643 Takes file data, dictionary of metadata, a transaction, linkrev,
652 644 and parent nodes.
653 645
654 646 Returns the node that was added.
655 647
656 648 May no-op if a revision matching the supplied data is already stored.
657 649 """
658 650
659 651 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
660 652 flags=0, cachedelta=None):
661 653 """Add a new revision to the store.
662 654
663 655 This is similar to ``add()`` except it operates at a lower level.
664 656
665 657 The data passed in already contains a metadata header, if any.
666 658
667 659 ``node`` and ``flags`` can be used to define the expected node and
668 660 the flags to use with storage.
669 661
670 662 ``add()`` is usually called when adding files from e.g. the working
671 663 directory. ``addrevision()`` is often called by ``add()`` and for
672 664 scenarios where revision data has already been computed, such as when
673 665 applying raw data from a peer repo.
674 666 """
675 667
676 668 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
677 669 """Process a series of deltas for storage.
678 670
679 671 ``deltas`` is an iterable of 7-tuples of
680 672 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
681 673 to add.
682 674
683 675 The ``delta`` field contains ``mpatch`` data to apply to a base
684 676 revision, identified by ``deltabase``. The base node can be
685 677 ``nullid``, in which case the header from the delta can be ignored
686 678 and the delta used as the fulltext.
687 679
688 680 ``addrevisioncb`` should be called for each node as it is committed.
689 681
690 682 Returns a list of nodes that were processed. A node will be in the list
691 683 even if it existed in the store previously.
692 684 """
693 685
694 686 def censorrevision(tr, node, tombstone=b''):
695 687 """Remove the content of a single revision.
696 688
697 689 The specified ``node`` will have its content purged from storage.
698 690 Future attempts to access the revision data for this node will
699 691 result in failure.
700 692
701 693 A ``tombstone`` message can optionally be stored. This message may be
702 694 displayed to users when they attempt to access the missing revision
703 695 data.
704 696
705 697 Storage backends may have stored deltas against the previous content
706 698 in this revision. As part of censoring a revision, these storage
707 699 backends are expected to rewrite any internally stored deltas such
708 700 that they no longer reference the deleted content.
709 701 """
710 702
711 703 def getstrippoint(minlink):
712 704 """Find the minimum revision that must be stripped to strip a linkrev.
713 705
714 706 Returns a 2-tuple containing the minimum revision number and a set
715 707 of all revisions numbers that would be broken by this strip.
716 708
717 709 TODO this is highly revlog centric and should be abstracted into
718 710 a higher-level deletion API. ``repair.strip()`` relies on this.
719 711 """
720 712
721 713 def strip(minlink, transaction):
722 714 """Remove storage of items starting at a linkrev.
723 715
724 716 This uses ``getstrippoint()`` to determine the first node to remove.
725 717 Then it effectively truncates storage for all revisions after that.
726 718
727 719 TODO this is highly revlog centric and should be abstracted into a
728 720 higher-level deletion API.
729 721 """
730 722
731 723 class ifilestorage(ifileindex, ifiledata, ifilemutation):
732 724 """Complete storage interface for a single tracked file."""
733 725
734 726 version = interfaceutil.Attribute(
735 727 """Version number of storage.
736 728
737 729 TODO this feels revlog centric and could likely be removed.
738 730 """)
739 731
740 732 _generaldelta = interfaceutil.Attribute(
741 733 """Whether deltas can be against any parent revision.
742 734
743 735 TODO this is used by changegroup code and it could probably be
744 736 folded into another API.
745 737 """)
746 738
747 739 def files():
748 740 """Obtain paths that are backing storage for this file.
749 741
750 742 TODO this is used heavily by verify code and there should probably
751 743 be a better API for that.
752 744 """
753 745
754 746 def checksize():
755 747 """Obtain the expected sizes of backing files.
756 748
757 749 TODO this is used by verify and it should not be part of the interface.
758 750 """
759 751
760 752 class idirs(interfaceutil.Interface):
761 753 """Interface representing a collection of directories from paths.
762 754
763 755 This interface is essentially a derived data structure representing
764 756 directories from a collection of paths.
765 757 """
766 758
767 759 def addpath(path):
768 760 """Add a path to the collection.
769 761
770 762 All directories in the path will be added to the collection.
771 763 """
772 764
773 765 def delpath(path):
774 766 """Remove a path from the collection.
775 767
776 768 If the removal was the last path in a particular directory, the
777 769 directory is removed from the collection.
778 770 """
779 771
780 772 def __iter__():
781 773 """Iterate over the directories in this collection of paths."""
782 774
783 775 def __contains__(path):
784 776 """Whether a specific directory is in this collection."""
785 777
786 778 class imanifestdict(interfaceutil.Interface):
787 779 """Interface representing a manifest data structure.
788 780
789 781 A manifest is effectively a dict mapping paths to entries. Each entry
790 782 consists of a binary node and extra flags affecting that entry.
791 783 """
792 784
793 785 def __getitem__(path):
794 786 """Returns the binary node value for a path in the manifest.
795 787
796 788 Raises ``KeyError`` if the path does not exist in the manifest.
797 789
798 790 Equivalent to ``self.find(path)[0]``.
799 791 """
800 792
801 793 def find(path):
802 794 """Returns the entry for a path in the manifest.
803 795
804 796 Returns a 2-tuple of (node, flags).
805 797
806 798 Raises ``KeyError`` if the path does not exist in the manifest.
807 799 """
808 800
809 801 def __len__():
810 802 """Return the number of entries in the manifest."""
811 803
812 804 def __nonzero__():
813 805 """Returns True if the manifest has entries, False otherwise."""
814 806
815 807 __bool__ = __nonzero__
816 808
817 809 def __setitem__(path, node):
818 810 """Define the node value for a path in the manifest.
819 811
820 812 If the path is already in the manifest, its flags will be copied to
821 813 the new entry.
822 814 """
823 815
824 816 def __contains__(path):
825 817 """Whether a path exists in the manifest."""
826 818
827 819 def __delitem__(path):
828 820 """Remove a path from the manifest.
829 821
830 822 Raises ``KeyError`` if the path is not in the manifest.
831 823 """
832 824
833 825 def __iter__():
834 826 """Iterate over paths in the manifest."""
835 827
836 828 def iterkeys():
837 829 """Iterate over paths in the manifest."""
838 830
839 831 def keys():
840 832 """Obtain a list of paths in the manifest."""
841 833
842 834 def filesnotin(other, match=None):
843 835 """Obtain the set of paths in this manifest but not in another.
844 836
845 837 ``match`` is an optional matcher function to be applied to both
846 838 manifests.
847 839
848 840 Returns a set of paths.
849 841 """
850 842
851 843 def dirs():
852 844 """Returns an object implementing the ``idirs`` interface."""
853 845
854 846 def hasdir(dir):
855 847 """Returns a bool indicating if a directory is in this manifest."""
856 848
857 849 def matches(match):
858 850 """Generate a new manifest filtered through a matcher.
859 851
860 852 Returns an object conforming to the ``imanifestdict`` interface.
861 853 """
862 854
863 855 def walk(match):
864 856 """Generator of paths in manifest satisfying a matcher.
865 857
866 858 This is equivalent to ``self.matches(match).iterkeys()`` except a new
867 859 manifest object is not created.
868 860
869 861 If the matcher has explicit files listed and they don't exist in
870 862 the manifest, ``match.bad()`` is called for each missing file.
871 863 """
872 864
873 865 def diff(other, match=None, clean=False):
874 866 """Find differences between this manifest and another.
875 867
876 868 This manifest is compared to ``other``.
877 869
878 870 If ``match`` is provided, the two manifests are filtered against this
879 871 matcher and only entries satisfying the matcher are compared.
880 872
881 873 If ``clean`` is True, unchanged files are included in the returned
882 874 object.
883 875
884 876 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
885 877 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
886 878 represents the node and flags for this manifest and ``(node2, flag2)``
887 879 are the same for the other manifest.
888 880 """
889 881
890 882 def setflag(path, flag):
891 883 """Set the flag value for a given path.
892 884
893 885 Raises ``KeyError`` if the path is not already in the manifest.
894 886 """
895 887
896 888 def get(path, default=None):
897 889 """Obtain the node value for a path or a default value if missing."""
898 890
899 891 def flags(path, default=''):
900 892 """Return the flags value for a path or a default value if missing."""
901 893
902 894 def copy():
903 895 """Return a copy of this manifest."""
904 896
905 897 def items():
906 898 """Returns an iterable of (path, node) for items in this manifest."""
907 899
908 900 def iteritems():
909 901 """Identical to items()."""
910 902
911 903 def iterentries():
912 904 """Returns an iterable of (path, node, flags) for this manifest.
913 905
914 906 Similar to ``iteritems()`` except items are a 3-tuple and include
915 907 flags.
916 908 """
917 909
918 910 def text():
919 911 """Obtain the raw data representation for this manifest.
920 912
921 913 Result is used to create a manifest revision.
922 914 """
923 915
924 916 def fastdelta(base, changes):
925 917 """Obtain a delta between this manifest and another given changes.
926 918
927 919 ``base`` in the raw data representation for another manifest.
928 920
929 921 ``changes`` is an iterable of ``(path, to_delete)``.
930 922
931 923 Returns a 2-tuple containing ``bytearray(self.text())`` and the
932 924 delta between ``base`` and this manifest.
933 925 """
934 926
935 927 class imanifestrevisionbase(interfaceutil.Interface):
936 928 """Base interface representing a single revision of a manifest.
937 929
938 930 Should not be used as a primary interface: should always be inherited
939 931 as part of a larger interface.
940 932 """
941 933
942 934 def new():
943 935 """Obtain a new manifest instance.
944 936
945 937 Returns an object conforming to the ``imanifestrevisionwritable``
946 938 interface. The instance will be associated with the same
947 939 ``imanifestlog`` collection as this instance.
948 940 """
949 941
950 942 def copy():
951 943 """Obtain a copy of this manifest instance.
952 944
953 945 Returns an object conforming to the ``imanifestrevisionwritable``
954 946 interface. The instance will be associated with the same
955 947 ``imanifestlog`` collection as this instance.
956 948 """
957 949
958 950 def read():
959 951 """Obtain the parsed manifest data structure.
960 952
961 953 The returned object conforms to the ``imanifestdict`` interface.
962 954 """
963 955
964 956 class imanifestrevisionstored(imanifestrevisionbase):
965 957 """Interface representing a manifest revision committed to storage."""
966 958
967 959 def node():
968 960 """The binary node for this manifest."""
969 961
970 962 parents = interfaceutil.Attribute(
971 963 """List of binary nodes that are parents for this manifest revision."""
972 964 )
973 965
974 966 def readdelta(shallow=False):
975 967 """Obtain the manifest data structure representing changes from parent.
976 968
977 969 This manifest is compared to its 1st parent. A new manifest representing
978 970 those differences is constructed.
979 971
980 972 The returned object conforms to the ``imanifestdict`` interface.
981 973 """
982 974
983 975 def readfast(shallow=False):
984 976 """Calls either ``read()`` or ``readdelta()``.
985 977
986 978 The faster of the two options is called.
987 979 """
988 980
989 981 def find(key):
990 982 """Calls self.read().find(key)``.
991 983
992 984 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
993 985 """
994 986
995 987 class imanifestrevisionwritable(imanifestrevisionbase):
996 988 """Interface representing a manifest revision that can be committed."""
997 989
998 990 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
999 991 """Add this revision to storage.
1000 992
1001 993 Takes a transaction object, the changeset revision number it will
1002 994 be associated with, its parent nodes, and lists of added and
1003 995 removed paths.
1004 996
1005 997 If match is provided, storage can choose not to inspect or write out
1006 998 items that do not match. Storage is still required to be able to provide
1007 999 the full manifest in the future for any directories written (these
1008 1000 manifests should not be "narrowed on disk").
1009 1001
1010 1002 Returns the binary node of the created revision.
1011 1003 """
1012 1004
1013 1005 class imanifeststorage(interfaceutil.Interface):
1014 1006 """Storage interface for manifest data."""
1015 1007
1016 1008 tree = interfaceutil.Attribute(
1017 1009 """The path to the directory this manifest tracks.
1018 1010
1019 1011 The empty bytestring represents the root manifest.
1020 1012 """)
1021 1013
1022 1014 index = interfaceutil.Attribute(
1023 1015 """An ``ifilerevisionssequence`` instance.""")
1024 1016
1025 1017 indexfile = interfaceutil.Attribute(
1026 1018 """Path of revlog index file.
1027 1019
1028 1020 TODO this is revlog specific and should not be exposed.
1029 1021 """)
1030 1022
1031 1023 opener = interfaceutil.Attribute(
1032 1024 """VFS opener to use to access underlying files used for storage.
1033 1025
1034 1026 TODO this is revlog specific and should not be exposed.
1035 1027 """)
1036 1028
1037 1029 version = interfaceutil.Attribute(
1038 1030 """Revlog version number.
1039 1031
1040 1032 TODO this is revlog specific and should not be exposed.
1041 1033 """)
1042 1034
1043 1035 _generaldelta = interfaceutil.Attribute(
1044 1036 """Whether generaldelta storage is being used.
1045 1037
1046 1038 TODO this is revlog specific and should not be exposed.
1047 1039 """)
1048 1040
1049 1041 fulltextcache = interfaceutil.Attribute(
1050 1042 """Dict with cache of fulltexts.
1051 1043
1052 1044 TODO this doesn't feel appropriate for the storage interface.
1053 1045 """)
1054 1046
1055 1047 def __len__():
1056 1048 """Obtain the number of revisions stored for this manifest."""
1057 1049
1058 1050 def __iter__():
1059 1051 """Iterate over revision numbers for this manifest."""
1060 1052
1061 1053 def rev(node):
1062 1054 """Obtain the revision number given a binary node.
1063 1055
1064 1056 Raises ``error.LookupError`` if the node is not known.
1065 1057 """
1066 1058
1067 1059 def node(rev):
1068 1060 """Obtain the node value given a revision number.
1069 1061
1070 1062 Raises ``error.LookupError`` if the revision is not known.
1071 1063 """
1072 1064
1073 1065 def lookup(value):
1074 1066 """Attempt to resolve a value to a node.
1075 1067
1076 1068 Value can be a binary node, hex node, revision number, or a bytes
1077 1069 that can be converted to an integer.
1078 1070
1079 1071 Raises ``error.LookupError`` if a ndoe could not be resolved.
1080 1072
1081 1073 TODO this is only used by debug* commands and can probably be deleted
1082 1074 easily.
1083 1075 """
1084 1076
1085 1077 def parents(node):
1086 1078 """Returns a 2-tuple of parent nodes for a node.
1087 1079
1088 1080 Values will be ``nullid`` if the parent is empty.
1089 1081 """
1090 1082
1091 1083 def parentrevs(rev):
1092 1084 """Like parents() but operates on revision numbers."""
1093 1085
1094 1086 def linkrev(rev):
1095 1087 """Obtain the changeset revision number a revision is linked to."""
1096 1088
1097 1089 def revision(node, _df=None, raw=False):
1098 1090 """Obtain fulltext data for a node."""
1099 1091
1100 1092 def revdiff(rev1, rev2):
1101 1093 """Obtain a delta between two revision numbers.
1102 1094
1103 1095 The returned data is the result of ``bdiff.bdiff()`` on the raw
1104 1096 revision data.
1105 1097 """
1106 1098
1107 1099 def cmp(node, fulltext):
1108 1100 """Compare fulltext to another revision.
1109 1101
1110 1102 Returns True if the fulltext is different from what is stored.
1111 1103 """
1112 1104
1113 1105 def emitrevisiondeltas(requests):
1114 1106 """Produce ``irevisiondelta`` from ``irevisiondeltarequest``s.
1115 1107
1116 1108 See the documentation for ``ifiledata`` for more.
1117 1109 """
1118 1110
1119 1111 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1120 1112 """Process a series of deltas for storage.
1121 1113
1122 1114 See the documentation in ``ifilemutation`` for more.
1123 1115 """
1124 1116
1125 1117 def getstrippoint(minlink):
1126 1118 """Find minimum revision that must be stripped to strip a linkrev.
1127 1119
1128 1120 See the documentation in ``ifilemutation`` for more.
1129 1121 """
1130 1122
1131 1123 def strip(minlink, transaction):
1132 1124 """Remove storage of items starting at a linkrev.
1133 1125
1134 1126 See the documentation in ``ifilemutation`` for more.
1135 1127 """
1136 1128
1137 1129 def checksize():
1138 1130 """Obtain the expected sizes of backing files.
1139 1131
1140 1132 TODO this is used by verify and it should not be part of the interface.
1141 1133 """
1142 1134
1143 1135 def files():
1144 1136 """Obtain paths that are backing storage for this manifest.
1145 1137
1146 1138 TODO this is used by verify and there should probably be a better API
1147 1139 for this functionality.
1148 1140 """
1149 1141
1150 1142 def deltaparent(rev):
1151 1143 """Obtain the revision that a revision is delta'd against.
1152 1144
1153 1145 TODO delta encoding is an implementation detail of storage and should
1154 1146 not be exposed to the storage interface.
1155 1147 """
1156 1148
1157 1149 def clone(tr, dest, **kwargs):
1158 1150 """Clone this instance to another."""
1159 1151
1160 1152 def clearcaches(clear_persisted_data=False):
1161 1153 """Clear any caches associated with this instance."""
1162 1154
1163 1155 def dirlog(d):
1164 1156 """Obtain a manifest storage instance for a tree."""
1165 1157
1166 1158 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1167 1159 match=None):
1168 1160 """Add a revision to storage.
1169 1161
1170 1162 ``m`` is an object conforming to ``imanifestdict``.
1171 1163
1172 1164 ``link`` is the linkrev revision number.
1173 1165
1174 1166 ``p1`` and ``p2`` are the parent revision numbers.
1175 1167
1176 1168 ``added`` and ``removed`` are iterables of added and removed paths,
1177 1169 respectively.
1178 1170
1179 1171 ``readtree`` is a function that can be used to read the child tree(s)
1180 1172 when recursively writing the full tree structure when using
1181 1173 treemanifets.
1182 1174
1183 1175 ``match`` is a matcher that can be used to hint to storage that not all
1184 1176 paths must be inspected; this is an optimization and can be safely
1185 1177 ignored. Note that the storage must still be able to reproduce a full
1186 1178 manifest including files that did not match.
1187 1179 """
1188 1180
1189 1181 class imanifestlog(interfaceutil.Interface):
1190 1182 """Interface representing a collection of manifest snapshots.
1191 1183
1192 1184 Represents the root manifest in a repository.
1193 1185
1194 1186 Also serves as a means to access nested tree manifests and to cache
1195 1187 tree manifests.
1196 1188 """
1197 1189
1198 1190 def __getitem__(node):
1199 1191 """Obtain a manifest instance for a given binary node.
1200 1192
1201 1193 Equivalent to calling ``self.get('', node)``.
1202 1194
1203 1195 The returned object conforms to the ``imanifestrevisionstored``
1204 1196 interface.
1205 1197 """
1206 1198
1207 1199 def get(tree, node, verify=True):
1208 1200 """Retrieve the manifest instance for a given directory and binary node.
1209 1201
1210 1202 ``node`` always refers to the node of the root manifest (which will be
1211 1203 the only manifest if flat manifests are being used).
1212 1204
1213 1205 If ``tree`` is the empty string, the root manifest is returned.
1214 1206 Otherwise the manifest for the specified directory will be returned
1215 1207 (requires tree manifests).
1216 1208
1217 1209 If ``verify`` is True, ``LookupError`` is raised if the node is not
1218 1210 known.
1219 1211
1220 1212 The returned object conforms to the ``imanifestrevisionstored``
1221 1213 interface.
1222 1214 """
1223 1215
1224 1216 def getstorage(tree):
1225 1217 """Retrieve an interface to storage for a particular tree.
1226 1218
1227 1219 If ``tree`` is the empty bytestring, storage for the root manifest will
1228 1220 be returned. Otherwise storage for a tree manifest is returned.
1229 1221
1230 1222 TODO formalize interface for returned object.
1231 1223 """
1232 1224
1233 1225 def clearcaches():
1234 1226 """Clear caches associated with this collection."""
1235 1227
1236 1228 def rev(node):
1237 1229 """Obtain the revision number for a binary node.
1238 1230
1239 1231 Raises ``error.LookupError`` if the node is not known.
1240 1232 """
1241 1233
1242 1234 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1243 1235 """Local repository sub-interface providing access to tracked file storage.
1244 1236
1245 1237 This interface defines how a repository accesses storage for a single
1246 1238 tracked file path.
1247 1239 """
1248 1240
1249 1241 def file(f):
1250 1242 """Obtain a filelog for a tracked path.
1251 1243
1252 1244 The returned type conforms to the ``ifilestorage`` interface.
1253 1245 """
1254 1246
1255 1247 class ilocalrepositorymain(interfaceutil.Interface):
1256 1248 """Main interface for local repositories.
1257 1249
1258 1250 This currently captures the reality of things - not how things should be.
1259 1251 """
1260 1252
1261 1253 supportedformats = interfaceutil.Attribute(
1262 1254 """Set of requirements that apply to stream clone.
1263 1255
1264 1256 This is actually a class attribute and is shared among all instances.
1265 1257 """)
1266 1258
1267 1259 supported = interfaceutil.Attribute(
1268 1260 """Set of requirements that this repo is capable of opening.""")
1269 1261
1270 1262 requirements = interfaceutil.Attribute(
1271 1263 """Set of requirements this repo uses.""")
1272 1264
1273 1265 filtername = interfaceutil.Attribute(
1274 1266 """Name of the repoview that is active on this repo.""")
1275 1267
1276 1268 wvfs = interfaceutil.Attribute(
1277 1269 """VFS used to access the working directory.""")
1278 1270
1279 1271 vfs = interfaceutil.Attribute(
1280 1272 """VFS rooted at the .hg directory.
1281 1273
1282 1274 Used to access repository data not in the store.
1283 1275 """)
1284 1276
1285 1277 svfs = interfaceutil.Attribute(
1286 1278 """VFS rooted at the store.
1287 1279
1288 1280 Used to access repository data in the store. Typically .hg/store.
1289 1281 But can point elsewhere if the store is shared.
1290 1282 """)
1291 1283
1292 1284 root = interfaceutil.Attribute(
1293 1285 """Path to the root of the working directory.""")
1294 1286
1295 1287 path = interfaceutil.Attribute(
1296 1288 """Path to the .hg directory.""")
1297 1289
1298 1290 origroot = interfaceutil.Attribute(
1299 1291 """The filesystem path that was used to construct the repo.""")
1300 1292
1301 1293 auditor = interfaceutil.Attribute(
1302 1294 """A pathauditor for the working directory.
1303 1295
1304 1296 This checks if a path refers to a nested repository.
1305 1297
1306 1298 Operates on the filesystem.
1307 1299 """)
1308 1300
1309 1301 nofsauditor = interfaceutil.Attribute(
1310 1302 """A pathauditor for the working directory.
1311 1303
1312 1304 This is like ``auditor`` except it doesn't do filesystem checks.
1313 1305 """)
1314 1306
1315 1307 baseui = interfaceutil.Attribute(
1316 1308 """Original ui instance passed into constructor.""")
1317 1309
1318 1310 ui = interfaceutil.Attribute(
1319 1311 """Main ui instance for this instance.""")
1320 1312
1321 1313 sharedpath = interfaceutil.Attribute(
1322 1314 """Path to the .hg directory of the repo this repo was shared from.""")
1323 1315
1324 1316 store = interfaceutil.Attribute(
1325 1317 """A store instance.""")
1326 1318
1327 1319 spath = interfaceutil.Attribute(
1328 1320 """Path to the store.""")
1329 1321
1330 1322 sjoin = interfaceutil.Attribute(
1331 1323 """Alias to self.store.join.""")
1332 1324
1333 1325 cachevfs = interfaceutil.Attribute(
1334 1326 """A VFS used to access the cache directory.
1335 1327
1336 1328 Typically .hg/cache.
1337 1329 """)
1338 1330
1339 1331 filteredrevcache = interfaceutil.Attribute(
1340 1332 """Holds sets of revisions to be filtered.""")
1341 1333
1342 1334 names = interfaceutil.Attribute(
1343 1335 """A ``namespaces`` instance.""")
1344 1336
1345 1337 def close():
1346 1338 """Close the handle on this repository."""
1347 1339
1348 1340 def peer():
1349 1341 """Obtain an object conforming to the ``peer`` interface."""
1350 1342
1351 1343 def unfiltered():
1352 1344 """Obtain an unfiltered/raw view of this repo."""
1353 1345
1354 1346 def filtered(name, visibilityexceptions=None):
1355 1347 """Obtain a named view of this repository."""
1356 1348
1357 1349 obsstore = interfaceutil.Attribute(
1358 1350 """A store of obsolescence data.""")
1359 1351
1360 1352 changelog = interfaceutil.Attribute(
1361 1353 """A handle on the changelog revlog.""")
1362 1354
1363 1355 manifestlog = interfaceutil.Attribute(
1364 1356 """An instance conforming to the ``imanifestlog`` interface.
1365 1357
1366 1358 Provides access to manifests for the repository.
1367 1359 """)
1368 1360
1369 1361 dirstate = interfaceutil.Attribute(
1370 1362 """Working directory state.""")
1371 1363
1372 1364 narrowpats = interfaceutil.Attribute(
1373 1365 """Matcher patterns for this repository's narrowspec.""")
1374 1366
1375 1367 def narrowmatch():
1376 1368 """Obtain a matcher for the narrowspec."""
1377 1369
1378 1370 def setnarrowpats(newincludes, newexcludes):
1379 1371 """Define the narrowspec for this repository."""
1380 1372
1381 1373 def __getitem__(changeid):
1382 1374 """Try to resolve a changectx."""
1383 1375
1384 1376 def __contains__(changeid):
1385 1377 """Whether a changeset exists."""
1386 1378
1387 1379 def __nonzero__():
1388 1380 """Always returns True."""
1389 1381 return True
1390 1382
1391 1383 __bool__ = __nonzero__
1392 1384
1393 1385 def __len__():
1394 1386 """Returns the number of changesets in the repo."""
1395 1387
1396 1388 def __iter__():
1397 1389 """Iterate over revisions in the changelog."""
1398 1390
1399 1391 def revs(expr, *args):
1400 1392 """Evaluate a revset.
1401 1393
1402 1394 Emits revisions.
1403 1395 """
1404 1396
1405 1397 def set(expr, *args):
1406 1398 """Evaluate a revset.
1407 1399
1408 1400 Emits changectx instances.
1409 1401 """
1410 1402
1411 1403 def anyrevs(specs, user=False, localalias=None):
1412 1404 """Find revisions matching one of the given revsets."""
1413 1405
1414 1406 def url():
1415 1407 """Returns a string representing the location of this repo."""
1416 1408
1417 1409 def hook(name, throw=False, **args):
1418 1410 """Call a hook."""
1419 1411
1420 1412 def tags():
1421 1413 """Return a mapping of tag to node."""
1422 1414
1423 1415 def tagtype(tagname):
1424 1416 """Return the type of a given tag."""
1425 1417
1426 1418 def tagslist():
1427 1419 """Return a list of tags ordered by revision."""
1428 1420
1429 1421 def nodetags(node):
1430 1422 """Return the tags associated with a node."""
1431 1423
1432 1424 def nodebookmarks(node):
1433 1425 """Return the list of bookmarks pointing to the specified node."""
1434 1426
1435 1427 def branchmap():
1436 1428 """Return a mapping of branch to heads in that branch."""
1437 1429
1438 1430 def revbranchcache():
1439 1431 pass
1440 1432
1441 1433 def branchtip(branchtip, ignoremissing=False):
1442 1434 """Return the tip node for a given branch."""
1443 1435
1444 1436 def lookup(key):
1445 1437 """Resolve the node for a revision."""
1446 1438
1447 1439 def lookupbranch(key):
1448 1440 """Look up the branch name of the given revision or branch name."""
1449 1441
1450 1442 def known(nodes):
1451 1443 """Determine whether a series of nodes is known.
1452 1444
1453 1445 Returns a list of bools.
1454 1446 """
1455 1447
1456 1448 def local():
1457 1449 """Whether the repository is local."""
1458 1450 return True
1459 1451
1460 1452 def publishing():
1461 1453 """Whether the repository is a publishing repository."""
1462 1454
1463 1455 def cancopy():
1464 1456 pass
1465 1457
1466 1458 def shared():
1467 1459 """The type of shared repository or None."""
1468 1460
1469 1461 def wjoin(f, *insidef):
1470 1462 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1471 1463
1472 1464 def setparents(p1, p2):
1473 1465 """Set the parent nodes of the working directory."""
1474 1466
1475 1467 def filectx(path, changeid=None, fileid=None):
1476 1468 """Obtain a filectx for the given file revision."""
1477 1469
1478 1470 def getcwd():
1479 1471 """Obtain the current working directory from the dirstate."""
1480 1472
1481 1473 def pathto(f, cwd=None):
1482 1474 """Obtain the relative path to a file."""
1483 1475
1484 1476 def adddatafilter(name, fltr):
1485 1477 pass
1486 1478
1487 1479 def wread(filename):
1488 1480 """Read a file from wvfs, using data filters."""
1489 1481
1490 1482 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1491 1483 """Write data to a file in the wvfs, using data filters."""
1492 1484
1493 1485 def wwritedata(filename, data):
1494 1486 """Resolve data for writing to the wvfs, using data filters."""
1495 1487
1496 1488 def currenttransaction():
1497 1489 """Obtain the current transaction instance or None."""
1498 1490
1499 1491 def transaction(desc, report=None):
1500 1492 """Open a new transaction to write to the repository."""
1501 1493
1502 1494 def undofiles():
1503 1495 """Returns a list of (vfs, path) for files to undo transactions."""
1504 1496
1505 1497 def recover():
1506 1498 """Roll back an interrupted transaction."""
1507 1499
1508 1500 def rollback(dryrun=False, force=False):
1509 1501 """Undo the last transaction.
1510 1502
1511 1503 DANGEROUS.
1512 1504 """
1513 1505
1514 1506 def updatecaches(tr=None, full=False):
1515 1507 """Warm repo caches."""
1516 1508
1517 1509 def invalidatecaches():
1518 1510 """Invalidate cached data due to the repository mutating."""
1519 1511
1520 1512 def invalidatevolatilesets():
1521 1513 pass
1522 1514
1523 1515 def invalidatedirstate():
1524 1516 """Invalidate the dirstate."""
1525 1517
1526 1518 def invalidate(clearfilecache=False):
1527 1519 pass
1528 1520
1529 1521 def invalidateall():
1530 1522 pass
1531 1523
1532 1524 def lock(wait=True):
1533 1525 """Lock the repository store and return a lock instance."""
1534 1526
1535 1527 def wlock(wait=True):
1536 1528 """Lock the non-store parts of the repository."""
1537 1529
1538 1530 def currentwlock():
1539 1531 """Return the wlock if it's held or None."""
1540 1532
1541 1533 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1542 1534 pass
1543 1535
1544 1536 def commit(text='', user=None, date=None, match=None, force=False,
1545 1537 editor=False, extra=None):
1546 1538 """Add a new revision to the repository."""
1547 1539
1548 1540 def commitctx(ctx, error=False):
1549 1541 """Commit a commitctx instance to the repository."""
1550 1542
1551 1543 def destroying():
1552 1544 """Inform the repository that nodes are about to be destroyed."""
1553 1545
1554 1546 def destroyed():
1555 1547 """Inform the repository that nodes have been destroyed."""
1556 1548
1557 1549 def status(node1='.', node2=None, match=None, ignored=False,
1558 1550 clean=False, unknown=False, listsubrepos=False):
1559 1551 """Convenience method to call repo[x].status()."""
1560 1552
1561 1553 def addpostdsstatus(ps):
1562 1554 pass
1563 1555
1564 1556 def postdsstatus():
1565 1557 pass
1566 1558
1567 1559 def clearpostdsstatus():
1568 1560 pass
1569 1561
1570 1562 def heads(start=None):
1571 1563 """Obtain list of nodes that are DAG heads."""
1572 1564
1573 1565 def branchheads(branch=None, start=None, closed=False):
1574 1566 pass
1575 1567
1576 1568 def branches(nodes):
1577 1569 pass
1578 1570
1579 1571 def between(pairs):
1580 1572 pass
1581 1573
1582 1574 def checkpush(pushop):
1583 1575 pass
1584 1576
1585 1577 prepushoutgoinghooks = interfaceutil.Attribute(
1586 1578 """util.hooks instance.""")
1587 1579
1588 1580 def pushkey(namespace, key, old, new):
1589 1581 pass
1590 1582
1591 1583 def listkeys(namespace):
1592 1584 pass
1593 1585
1594 1586 def debugwireargs(one, two, three=None, four=None, five=None):
1595 1587 pass
1596 1588
1597 1589 def savecommitmessage(text):
1598 1590 pass
1599 1591
1600 1592 class completelocalrepository(ilocalrepositorymain,
1601 1593 ilocalrepositoryfilestorage):
1602 1594 """Complete interface for a local repository."""
@@ -1,984 +1,977
1 1 # storage.py - Testing of storage primitives.
2 2 #
3 3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import unittest
11 11
12 12 from ..node import (
13 13 hex,
14 14 nullid,
15 15 nullrev,
16 16 )
17 17 from .. import (
18 18 error,
19 19 mdiff,
20 20 revlog,
21 21 )
22 22
23 23 class basetestcase(unittest.TestCase):
24 24 if not getattr(unittest.TestCase, r'assertRaisesRegex', False):
25 25 assertRaisesRegex = (# camelcase-required
26 26 unittest.TestCase.assertRaisesRegexp)
27 27
28 28 class revisiondeltarequest(object):
29 29 def __init__(self, node, p1, p2, linknode, basenode, ellipsis):
30 30 self.node = node
31 31 self.p1node = p1
32 32 self.p2node = p2
33 33 self.linknode = linknode
34 34 self.basenode = basenode
35 35 self.ellipsis = ellipsis
36 36
37 37 class ifileindextests(basetestcase):
38 38 """Generic tests for the ifileindex interface.
39 39
40 40 All file storage backends for index data should conform to the tests in this
41 41 class.
42 42
43 43 Use ``makeifileindextests()`` to create an instance of this type.
44 44 """
45 45 def testempty(self):
46 46 f = self._makefilefn()
47 47 self.assertEqual(len(f), 0, 'new file store has 0 length by default')
48 48 self.assertEqual(list(f), [], 'iter yields nothing by default')
49 49
50 50 gen = iter(f)
51 51 with self.assertRaises(StopIteration):
52 52 next(gen)
53 53
54 54 # revs() should evaluate to an empty list.
55 55 self.assertEqual(list(f.revs()), [])
56 56
57 57 revs = iter(f.revs())
58 58 with self.assertRaises(StopIteration):
59 59 next(revs)
60 60
61 61 self.assertEqual(list(f.revs(start=20)), [])
62 62
63 63 # parents() and parentrevs() work with nullid/nullrev.
64 64 self.assertEqual(f.parents(nullid), (nullid, nullid))
65 65 self.assertEqual(f.parentrevs(nullrev), (nullrev, nullrev))
66 66
67 67 with self.assertRaises(error.LookupError):
68 68 f.parents(b'\x01' * 20)
69 69
70 70 for i in range(-5, 5):
71 71 if i == nullrev:
72 72 continue
73 73
74 74 with self.assertRaises(IndexError):
75 75 f.parentrevs(i)
76 76
77 77 # nullid/nullrev lookup always works.
78 78 self.assertEqual(f.rev(nullid), nullrev)
79 79 self.assertEqual(f.node(nullrev), nullid)
80 80
81 81 with self.assertRaises(error.LookupError):
82 82 f.rev(b'\x01' * 20)
83 83
84 84 for i in range(-5, 5):
85 85 if i == nullrev:
86 86 continue
87 87
88 88 with self.assertRaises(IndexError):
89 89 f.node(i)
90 90
91 91 self.assertEqual(f.lookup(nullid), nullid)
92 92 self.assertEqual(f.lookup(nullrev), nullid)
93 93 self.assertEqual(f.lookup(hex(nullid)), nullid)
94 94
95 95 # String converted to integer doesn't work for nullrev.
96 96 with self.assertRaises(error.LookupError):
97 97 f.lookup(b'%d' % nullrev)
98 98
99 99 self.assertEqual(f.linkrev(nullrev), nullrev)
100 100
101 101 for i in range(-5, 5):
102 102 if i == nullrev:
103 103 continue
104 104
105 105 with self.assertRaises(IndexError):
106 106 f.linkrev(i)
107 107
108 108 self.assertEqual(f.flags(nullrev), 0)
109 109
110 110 for i in range(-5, 5):
111 111 if i == nullrev:
112 112 continue
113 113
114 114 with self.assertRaises(IndexError):
115 115 f.flags(i)
116 116
117 117 self.assertFalse(f.iscensored(nullrev))
118 118
119 119 for i in range(-5, 5):
120 120 if i == nullrev:
121 121 continue
122 122
123 123 with self.assertRaises(IndexError):
124 124 f.iscensored(i)
125 125
126 126 self.assertEqual(list(f.commonancestorsheads(nullid, nullid)), [])
127 127
128 128 with self.assertRaises(ValueError):
129 129 self.assertEqual(list(f.descendants([])), [])
130 130
131 131 self.assertEqual(list(f.descendants([nullrev])), [])
132 132
133 self.assertEqual(f.headrevs(), [nullrev])
134 133 self.assertEqual(f.heads(), [nullid])
135 134 self.assertEqual(f.heads(nullid), [nullid])
136 135 self.assertEqual(f.heads(None, [nullid]), [nullid])
137 136 self.assertEqual(f.heads(nullid, [nullid]), [nullid])
138 137
139 138 self.assertEqual(f.children(nullid), [])
140 139
141 140 with self.assertRaises(error.LookupError):
142 141 f.children(b'\x01' * 20)
143 142
144 143 self.assertEqual(f.deltaparent(nullrev), nullrev)
145 144
146 145 for i in range(-5, 5):
147 146 if i == nullrev:
148 147 continue
149 148
150 149 with self.assertRaises(IndexError):
151 150 f.deltaparent(i)
152 151
153 152 def testsinglerevision(self):
154 153 f = self._makefilefn()
155 154 with self._maketransactionfn() as tr:
156 155 node = f.add(b'initial', None, tr, 0, nullid, nullid)
157 156
158 157 self.assertEqual(len(f), 1)
159 158 self.assertEqual(list(f), [0])
160 159
161 160 gen = iter(f)
162 161 self.assertEqual(next(gen), 0)
163 162
164 163 with self.assertRaises(StopIteration):
165 164 next(gen)
166 165
167 166 self.assertEqual(list(f.revs()), [0])
168 167 self.assertEqual(list(f.revs(start=1)), [])
169 168 self.assertEqual(list(f.revs(start=0)), [0])
170 169 self.assertEqual(list(f.revs(stop=0)), [0])
171 170 self.assertEqual(list(f.revs(stop=1)), [0])
172 171 self.assertEqual(list(f.revs(1, 1)), [])
173 172 # TODO buggy
174 173 self.assertEqual(list(f.revs(1, 0)), [1, 0])
175 174 self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
176 175
177 176 self.assertEqual(f.parents(node), (nullid, nullid))
178 177 self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
179 178
180 179 with self.assertRaises(error.LookupError):
181 180 f.parents(b'\x01' * 20)
182 181
183 182 with self.assertRaises(IndexError):
184 183 f.parentrevs(1)
185 184
186 185 self.assertEqual(f.rev(node), 0)
187 186
188 187 with self.assertRaises(error.LookupError):
189 188 f.rev(b'\x01' * 20)
190 189
191 190 self.assertEqual(f.node(0), node)
192 191
193 192 with self.assertRaises(IndexError):
194 193 f.node(1)
195 194
196 195 self.assertEqual(f.lookup(node), node)
197 196 self.assertEqual(f.lookup(0), node)
198 197 self.assertEqual(f.lookup(b'0'), node)
199 198 self.assertEqual(f.lookup(hex(node)), node)
200 199
201 200 self.assertEqual(f.linkrev(0), 0)
202 201
203 202 with self.assertRaises(IndexError):
204 203 f.linkrev(1)
205 204
206 205 self.assertEqual(f.flags(0), 0)
207 206
208 207 with self.assertRaises(IndexError):
209 208 f.flags(1)
210 209
211 210 self.assertFalse(f.iscensored(0))
212 211
213 212 with self.assertRaises(IndexError):
214 213 f.iscensored(1)
215 214
216 215 self.assertEqual(list(f.descendants([0])), [])
217 216
218 self.assertEqual(f.headrevs(), [0])
219
220 217 self.assertEqual(f.heads(), [node])
221 218 self.assertEqual(f.heads(node), [node])
222 219 self.assertEqual(f.heads(stop=[node]), [node])
223 220
224 221 with self.assertRaises(error.LookupError):
225 222 f.heads(stop=[b'\x01' * 20])
226 223
227 224 self.assertEqual(f.children(node), [])
228 225
229 226 self.assertEqual(f.deltaparent(0), nullrev)
230 227
231 228 def testmultiplerevisions(self):
232 229 fulltext0 = b'x' * 1024
233 230 fulltext1 = fulltext0 + b'y'
234 231 fulltext2 = b'y' + fulltext0 + b'z'
235 232
236 233 f = self._makefilefn()
237 234 with self._maketransactionfn() as tr:
238 235 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
239 236 node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
240 237 node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
241 238
242 239 self.assertEqual(len(f), 3)
243 240 self.assertEqual(list(f), [0, 1, 2])
244 241
245 242 gen = iter(f)
246 243 self.assertEqual(next(gen), 0)
247 244 self.assertEqual(next(gen), 1)
248 245 self.assertEqual(next(gen), 2)
249 246
250 247 with self.assertRaises(StopIteration):
251 248 next(gen)
252 249
253 250 self.assertEqual(list(f.revs()), [0, 1, 2])
254 251 self.assertEqual(list(f.revs(0)), [0, 1, 2])
255 252 self.assertEqual(list(f.revs(1)), [1, 2])
256 253 self.assertEqual(list(f.revs(2)), [2])
257 254 self.assertEqual(list(f.revs(3)), [])
258 255 self.assertEqual(list(f.revs(stop=1)), [0, 1])
259 256 self.assertEqual(list(f.revs(stop=2)), [0, 1, 2])
260 257 self.assertEqual(list(f.revs(stop=3)), [0, 1, 2])
261 258 self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
262 259 self.assertEqual(list(f.revs(2, 1)), [2, 1])
263 260 # TODO this is wrong
264 261 self.assertEqual(list(f.revs(3, 2)), [3, 2])
265 262
266 263 self.assertEqual(f.parents(node0), (nullid, nullid))
267 264 self.assertEqual(f.parents(node1), (node0, nullid))
268 265 self.assertEqual(f.parents(node2), (node1, nullid))
269 266
270 267 self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
271 268 self.assertEqual(f.parentrevs(1), (0, nullrev))
272 269 self.assertEqual(f.parentrevs(2), (1, nullrev))
273 270
274 271 self.assertEqual(f.rev(node0), 0)
275 272 self.assertEqual(f.rev(node1), 1)
276 273 self.assertEqual(f.rev(node2), 2)
277 274
278 275 with self.assertRaises(error.LookupError):
279 276 f.rev(b'\x01' * 20)
280 277
281 278 self.assertEqual(f.node(0), node0)
282 279 self.assertEqual(f.node(1), node1)
283 280 self.assertEqual(f.node(2), node2)
284 281
285 282 with self.assertRaises(IndexError):
286 283 f.node(3)
287 284
288 285 self.assertEqual(f.lookup(node0), node0)
289 286 self.assertEqual(f.lookup(0), node0)
290 287 self.assertEqual(f.lookup(b'0'), node0)
291 288 self.assertEqual(f.lookup(hex(node0)), node0)
292 289
293 290 self.assertEqual(f.lookup(node1), node1)
294 291 self.assertEqual(f.lookup(1), node1)
295 292 self.assertEqual(f.lookup(b'1'), node1)
296 293 self.assertEqual(f.lookup(hex(node1)), node1)
297 294
298 295 self.assertEqual(f.linkrev(0), 0)
299 296 self.assertEqual(f.linkrev(1), 1)
300 297 self.assertEqual(f.linkrev(2), 3)
301 298
302 299 with self.assertRaises(IndexError):
303 300 f.linkrev(3)
304 301
305 302 self.assertEqual(f.flags(0), 0)
306 303 self.assertEqual(f.flags(1), 0)
307 304 self.assertEqual(f.flags(2), 0)
308 305
309 306 with self.assertRaises(IndexError):
310 307 f.flags(3)
311 308
312 309 self.assertFalse(f.iscensored(0))
313 310 self.assertFalse(f.iscensored(1))
314 311 self.assertFalse(f.iscensored(2))
315 312
316 313 with self.assertRaises(IndexError):
317 314 f.iscensored(3)
318 315
319 316 self.assertEqual(f.commonancestorsheads(node1, nullid), [])
320 317 self.assertEqual(f.commonancestorsheads(node1, node0), [node0])
321 318 self.assertEqual(f.commonancestorsheads(node1, node1), [node1])
322 319 self.assertEqual(f.commonancestorsheads(node0, node1), [node0])
323 320 self.assertEqual(f.commonancestorsheads(node1, node2), [node1])
324 321 self.assertEqual(f.commonancestorsheads(node2, node1), [node1])
325 322
326 323 self.assertEqual(list(f.descendants([0])), [1, 2])
327 324 self.assertEqual(list(f.descendants([1])), [2])
328 325 self.assertEqual(list(f.descendants([0, 1])), [1, 2])
329 326
330 self.assertEqual(f.headrevs(), [2])
331
332 327 self.assertEqual(f.heads(), [node2])
333 328 self.assertEqual(f.heads(node0), [node2])
334 329 self.assertEqual(f.heads(node1), [node2])
335 330 self.assertEqual(f.heads(node2), [node2])
336 331
337 332 # TODO this behavior seems wonky. Is it correct? If so, the
338 333 # docstring for heads() should be updated to reflect desired
339 334 # behavior.
340 335 self.assertEqual(f.heads(stop=[node1]), [node1, node2])
341 336 self.assertEqual(f.heads(stop=[node0]), [node0, node2])
342 337 self.assertEqual(f.heads(stop=[node1, node2]), [node1, node2])
343 338
344 339 with self.assertRaises(error.LookupError):
345 340 f.heads(stop=[b'\x01' * 20])
346 341
347 342 self.assertEqual(f.children(node0), [node1])
348 343 self.assertEqual(f.children(node1), [node2])
349 344 self.assertEqual(f.children(node2), [])
350 345
351 346 self.assertEqual(f.deltaparent(0), nullrev)
352 347 self.assertEqual(f.deltaparent(1), 0)
353 348 self.assertEqual(f.deltaparent(2), 1)
354 349
355 350 def testmultipleheads(self):
356 351 f = self._makefilefn()
357 352
358 353 with self._maketransactionfn() as tr:
359 354 node0 = f.add(b'0', None, tr, 0, nullid, nullid)
360 355 node1 = f.add(b'1', None, tr, 1, node0, nullid)
361 356 node2 = f.add(b'2', None, tr, 2, node1, nullid)
362 357 node3 = f.add(b'3', None, tr, 3, node0, nullid)
363 358 node4 = f.add(b'4', None, tr, 4, node3, nullid)
364 359 node5 = f.add(b'5', None, tr, 5, node0, nullid)
365 360
366 361 self.assertEqual(len(f), 6)
367 362
368 363 self.assertEqual(list(f.descendants([0])), [1, 2, 3, 4, 5])
369 364 self.assertEqual(list(f.descendants([1])), [2])
370 365 self.assertEqual(list(f.descendants([2])), [])
371 366 self.assertEqual(list(f.descendants([3])), [4])
372 367 self.assertEqual(list(f.descendants([0, 1])), [1, 2, 3, 4, 5])
373 368 self.assertEqual(list(f.descendants([1, 3])), [2, 4])
374 369
375 self.assertEqual(f.headrevs(), [2, 4, 5])
376
377 370 self.assertEqual(f.heads(), [node2, node4, node5])
378 371 self.assertEqual(f.heads(node0), [node2, node4, node5])
379 372 self.assertEqual(f.heads(node1), [node2])
380 373 self.assertEqual(f.heads(node2), [node2])
381 374 self.assertEqual(f.heads(node3), [node4])
382 375 self.assertEqual(f.heads(node4), [node4])
383 376 self.assertEqual(f.heads(node5), [node5])
384 377
385 378 # TODO this seems wrong.
386 379 self.assertEqual(f.heads(stop=[node0]), [node0, node2, node4, node5])
387 380 self.assertEqual(f.heads(stop=[node1]), [node1, node2, node4, node5])
388 381
389 382 self.assertEqual(f.children(node0), [node1, node3, node5])
390 383 self.assertEqual(f.children(node1), [node2])
391 384 self.assertEqual(f.children(node2), [])
392 385 self.assertEqual(f.children(node3), [node4])
393 386 self.assertEqual(f.children(node4), [])
394 387 self.assertEqual(f.children(node5), [])
395 388
396 389 class ifiledatatests(basetestcase):
397 390 """Generic tests for the ifiledata interface.
398 391
399 392 All file storage backends for data should conform to the tests in this
400 393 class.
401 394
402 395 Use ``makeifiledatatests()`` to create an instance of this type.
403 396 """
404 397 def testempty(self):
405 398 f = self._makefilefn()
406 399
407 400 self.assertEqual(f.rawsize(nullrev), 0)
408 401
409 402 for i in range(-5, 5):
410 403 if i == nullrev:
411 404 continue
412 405
413 406 with self.assertRaises(IndexError):
414 407 f.rawsize(i)
415 408
416 409 self.assertEqual(f.size(nullrev), 0)
417 410
418 411 for i in range(-5, 5):
419 412 if i == nullrev:
420 413 continue
421 414
422 415 with self.assertRaises(IndexError):
423 416 f.size(i)
424 417
425 418 with self.assertRaises(error.StorageError):
426 419 f.checkhash(b'', nullid)
427 420
428 421 with self.assertRaises(error.LookupError):
429 422 f.checkhash(b'', b'\x01' * 20)
430 423
431 424 self.assertEqual(f.revision(nullid), b'')
432 425 self.assertEqual(f.revision(nullid, raw=True), b'')
433 426
434 427 with self.assertRaises(error.LookupError):
435 428 f.revision(b'\x01' * 20)
436 429
437 430 self.assertEqual(f.read(nullid), b'')
438 431
439 432 with self.assertRaises(error.LookupError):
440 433 f.read(b'\x01' * 20)
441 434
442 435 self.assertFalse(f.renamed(nullid))
443 436
444 437 with self.assertRaises(error.LookupError):
445 438 f.read(b'\x01' * 20)
446 439
447 440 self.assertTrue(f.cmp(nullid, b''))
448 441 self.assertTrue(f.cmp(nullid, b'foo'))
449 442
450 443 with self.assertRaises(error.LookupError):
451 444 f.cmp(b'\x01' * 20, b'irrelevant')
452 445
453 446 self.assertEqual(f.revdiff(nullrev, nullrev), b'')
454 447
455 448 with self.assertRaises(IndexError):
456 449 f.revdiff(0, nullrev)
457 450
458 451 with self.assertRaises(IndexError):
459 452 f.revdiff(nullrev, 0)
460 453
461 454 with self.assertRaises(IndexError):
462 455 f.revdiff(0, 0)
463 456
464 457 gen = f.emitrevisiondeltas([])
465 458 with self.assertRaises(StopIteration):
466 459 next(gen)
467 460
468 461 requests = [
469 462 revisiondeltarequest(nullid, nullid, nullid, nullid, nullid, False),
470 463 ]
471 464 gen = f.emitrevisiondeltas(requests)
472 465
473 466 delta = next(gen)
474 467
475 468 self.assertEqual(delta.node, nullid)
476 469 self.assertEqual(delta.p1node, nullid)
477 470 self.assertEqual(delta.p2node, nullid)
478 471 self.assertEqual(delta.linknode, nullid)
479 472 self.assertEqual(delta.basenode, nullid)
480 473 self.assertIsNone(delta.baserevisionsize)
481 474 self.assertEqual(delta.revision, b'')
482 475 self.assertIsNone(delta.delta)
483 476
484 477 with self.assertRaises(StopIteration):
485 478 next(gen)
486 479
487 480 requests = [
488 481 revisiondeltarequest(nullid, nullid, nullid, nullid, nullid, False),
489 482 revisiondeltarequest(nullid, b'\x01' * 20, b'\x02' * 20,
490 483 b'\x03' * 20, nullid, False)
491 484 ]
492 485
493 486 gen = f.emitrevisiondeltas(requests)
494 487
495 488 next(gen)
496 489 delta = next(gen)
497 490
498 491 self.assertEqual(delta.node, nullid)
499 492 self.assertEqual(delta.p1node, b'\x01' * 20)
500 493 self.assertEqual(delta.p2node, b'\x02' * 20)
501 494 self.assertEqual(delta.linknode, b'\x03' * 20)
502 495 self.assertEqual(delta.basenode, nullid)
503 496 self.assertIsNone(delta.baserevisionsize)
504 497 self.assertEqual(delta.revision, b'')
505 498 self.assertIsNone(delta.delta)
506 499
507 500 with self.assertRaises(StopIteration):
508 501 next(gen)
509 502
510 503 def testsinglerevision(self):
511 504 fulltext = b'initial'
512 505
513 506 f = self._makefilefn()
514 507 with self._maketransactionfn() as tr:
515 508 node = f.add(fulltext, None, tr, 0, nullid, nullid)
516 509
517 510 self.assertEqual(f.rawsize(0), len(fulltext))
518 511
519 512 with self.assertRaises(IndexError):
520 513 f.rawsize(1)
521 514
522 515 self.assertEqual(f.size(0), len(fulltext))
523 516
524 517 with self.assertRaises(IndexError):
525 518 f.size(1)
526 519
527 520 f.checkhash(fulltext, node)
528 521 f.checkhash(fulltext, node, nullid, nullid)
529 522
530 523 with self.assertRaises(error.StorageError):
531 524 f.checkhash(fulltext + b'extra', node)
532 525
533 526 with self.assertRaises(error.StorageError):
534 527 f.checkhash(fulltext, node, b'\x01' * 20, nullid)
535 528
536 529 with self.assertRaises(error.StorageError):
537 530 f.checkhash(fulltext, node, nullid, b'\x01' * 20)
538 531
539 532 self.assertEqual(f.revision(node), fulltext)
540 533 self.assertEqual(f.revision(node, raw=True), fulltext)
541 534
542 535 self.assertEqual(f.read(node), fulltext)
543 536
544 537 self.assertFalse(f.renamed(node))
545 538
546 539 self.assertFalse(f.cmp(node, fulltext))
547 540 self.assertTrue(f.cmp(node, fulltext + b'extra'))
548 541
549 542 self.assertEqual(f.revdiff(0, 0), b'')
550 543 self.assertEqual(f.revdiff(nullrev, 0),
551 544 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07%s' %
552 545 fulltext)
553 546
554 547 self.assertEqual(f.revdiff(0, nullrev),
555 548 b'\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00')
556 549
557 550 requests = [
558 551 revisiondeltarequest(node, nullid, nullid, nullid, nullid, False),
559 552 ]
560 553 gen = f.emitrevisiondeltas(requests)
561 554
562 555 delta = next(gen)
563 556
564 557 self.assertEqual(delta.node, node)
565 558 self.assertEqual(delta.p1node, nullid)
566 559 self.assertEqual(delta.p2node, nullid)
567 560 self.assertEqual(delta.linknode, nullid)
568 561 self.assertEqual(delta.basenode, nullid)
569 562 self.assertIsNone(delta.baserevisionsize)
570 563 self.assertEqual(delta.revision, fulltext)
571 564 self.assertIsNone(delta.delta)
572 565
573 566 with self.assertRaises(StopIteration):
574 567 next(gen)
575 568
576 569 def testmultiplerevisions(self):
577 570 fulltext0 = b'x' * 1024
578 571 fulltext1 = fulltext0 + b'y'
579 572 fulltext2 = b'y' + fulltext0 + b'z'
580 573
581 574 f = self._makefilefn()
582 575 with self._maketransactionfn() as tr:
583 576 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
584 577 node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
585 578 node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
586 579
587 580 self.assertEqual(f.rawsize(0), len(fulltext0))
588 581 self.assertEqual(f.rawsize(1), len(fulltext1))
589 582 self.assertEqual(f.rawsize(2), len(fulltext2))
590 583
591 584 with self.assertRaises(IndexError):
592 585 f.rawsize(3)
593 586
594 587 self.assertEqual(f.size(0), len(fulltext0))
595 588 self.assertEqual(f.size(1), len(fulltext1))
596 589 self.assertEqual(f.size(2), len(fulltext2))
597 590
598 591 with self.assertRaises(IndexError):
599 592 f.size(3)
600 593
601 594 f.checkhash(fulltext0, node0)
602 595 f.checkhash(fulltext1, node1)
603 596 f.checkhash(fulltext1, node1, node0, nullid)
604 597 f.checkhash(fulltext2, node2, node1, nullid)
605 598
606 599 with self.assertRaises(error.StorageError):
607 600 f.checkhash(fulltext1, b'\x01' * 20)
608 601
609 602 with self.assertRaises(error.StorageError):
610 603 f.checkhash(fulltext1 + b'extra', node1, node0, nullid)
611 604
612 605 with self.assertRaises(error.StorageError):
613 606 f.checkhash(fulltext1, node1, node0, node0)
614 607
615 608 self.assertEqual(f.revision(node0), fulltext0)
616 609 self.assertEqual(f.revision(node0, raw=True), fulltext0)
617 610 self.assertEqual(f.revision(node1), fulltext1)
618 611 self.assertEqual(f.revision(node1, raw=True), fulltext1)
619 612 self.assertEqual(f.revision(node2), fulltext2)
620 613 self.assertEqual(f.revision(node2, raw=True), fulltext2)
621 614
622 615 with self.assertRaises(error.LookupError):
623 616 f.revision(b'\x01' * 20)
624 617
625 618 self.assertEqual(f.read(node0), fulltext0)
626 619 self.assertEqual(f.read(node1), fulltext1)
627 620 self.assertEqual(f.read(node2), fulltext2)
628 621
629 622 with self.assertRaises(error.LookupError):
630 623 f.read(b'\x01' * 20)
631 624
632 625 self.assertFalse(f.renamed(node0))
633 626 self.assertFalse(f.renamed(node1))
634 627 self.assertFalse(f.renamed(node2))
635 628
636 629 with self.assertRaises(error.LookupError):
637 630 f.renamed(b'\x01' * 20)
638 631
639 632 self.assertFalse(f.cmp(node0, fulltext0))
640 633 self.assertFalse(f.cmp(node1, fulltext1))
641 634 self.assertFalse(f.cmp(node2, fulltext2))
642 635
643 636 self.assertTrue(f.cmp(node1, fulltext0))
644 637 self.assertTrue(f.cmp(node2, fulltext1))
645 638
646 639 with self.assertRaises(error.LookupError):
647 640 f.cmp(b'\x01' * 20, b'irrelevant')
648 641
649 642 self.assertEqual(f.revdiff(0, 1),
650 643 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
651 644 fulltext1)
652 645
653 646 self.assertEqual(f.revdiff(0, 2),
654 647 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x02' +
655 648 fulltext2)
656 649
657 650 requests = [
658 651 revisiondeltarequest(node0, nullid, nullid, b'\x01' * 20, nullid,
659 652 False),
660 653 revisiondeltarequest(node1, node0, nullid, b'\x02' * 20, node0,
661 654 False),
662 655 revisiondeltarequest(node2, node1, nullid, b'\x03' * 20, node1,
663 656 False),
664 657 ]
665 658 gen = f.emitrevisiondeltas(requests)
666 659
667 660 delta = next(gen)
668 661
669 662 self.assertEqual(delta.node, node0)
670 663 self.assertEqual(delta.p1node, nullid)
671 664 self.assertEqual(delta.p2node, nullid)
672 665 self.assertEqual(delta.linknode, b'\x01' * 20)
673 666 self.assertEqual(delta.basenode, nullid)
674 667 self.assertIsNone(delta.baserevisionsize)
675 668 self.assertEqual(delta.revision, fulltext0)
676 669 self.assertIsNone(delta.delta)
677 670
678 671 delta = next(gen)
679 672
680 673 self.assertEqual(delta.node, node1)
681 674 self.assertEqual(delta.p1node, node0)
682 675 self.assertEqual(delta.p2node, nullid)
683 676 self.assertEqual(delta.linknode, b'\x02' * 20)
684 677 self.assertEqual(delta.basenode, node0)
685 678 self.assertIsNone(delta.baserevisionsize)
686 679 self.assertIsNone(delta.revision)
687 680 self.assertEqual(delta.delta,
688 681 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
689 682 fulltext1)
690 683
691 684 delta = next(gen)
692 685
693 686 self.assertEqual(delta.node, node2)
694 687 self.assertEqual(delta.p1node, node1)
695 688 self.assertEqual(delta.p2node, nullid)
696 689 self.assertEqual(delta.linknode, b'\x03' * 20)
697 690 self.assertEqual(delta.basenode, node1)
698 691 self.assertIsNone(delta.baserevisionsize)
699 692 self.assertIsNone(delta.revision)
700 693 self.assertEqual(delta.delta,
701 694 b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
702 695 fulltext2)
703 696
704 697 with self.assertRaises(StopIteration):
705 698 next(gen)
706 699
707 700 def testrenamed(self):
708 701 fulltext0 = b'foo'
709 702 fulltext1 = b'bar'
710 703 fulltext2 = b'baz'
711 704
712 705 meta1 = {
713 706 b'copy': b'source0',
714 707 b'copyrev': b'a' * 40,
715 708 }
716 709
717 710 meta2 = {
718 711 b'copy': b'source1',
719 712 b'copyrev': b'b' * 40,
720 713 }
721 714
722 715 stored1 = b''.join([
723 716 b'\x01\ncopy: source0\n',
724 717 b'copyrev: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n\x01\n',
725 718 fulltext1,
726 719 ])
727 720
728 721 stored2 = b''.join([
729 722 b'\x01\ncopy: source1\n',
730 723 b'copyrev: bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n\x01\n',
731 724 fulltext2,
732 725 ])
733 726
734 727 f = self._makefilefn()
735 728 with self._maketransactionfn() as tr:
736 729 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
737 730 node1 = f.add(fulltext1, meta1, tr, 1, node0, nullid)
738 731 node2 = f.add(fulltext2, meta2, tr, 2, nullid, nullid)
739 732
740 733 self.assertEqual(f.rawsize(1), len(stored1))
741 734 self.assertEqual(f.rawsize(2), len(stored2))
742 735
743 736 # Metadata header isn't recognized when parent isn't nullid.
744 737 self.assertEqual(f.size(1), len(stored1))
745 738 self.assertEqual(f.size(2), len(fulltext2))
746 739
747 740 self.assertEqual(f.revision(node1), stored1)
748 741 self.assertEqual(f.revision(node1, raw=True), stored1)
749 742 self.assertEqual(f.revision(node2), stored2)
750 743 self.assertEqual(f.revision(node2, raw=True), stored2)
751 744
752 745 self.assertEqual(f.read(node1), fulltext1)
753 746 self.assertEqual(f.read(node2), fulltext2)
754 747
755 748 # Returns False when first parent is set.
756 749 self.assertFalse(f.renamed(node1))
757 750 self.assertEqual(f.renamed(node2), (b'source1', b'\xbb' * 20))
758 751
759 752 self.assertTrue(f.cmp(node1, fulltext1))
760 753 self.assertTrue(f.cmp(node1, stored1))
761 754 self.assertFalse(f.cmp(node2, fulltext2))
762 755 self.assertTrue(f.cmp(node2, stored2))
763 756
764 757 def testmetadataprefix(self):
765 758 # Content with metadata prefix has extra prefix inserted in storage.
766 759 fulltext0 = b'\x01\nfoo'
767 760 stored0 = b'\x01\n\x01\n\x01\nfoo'
768 761
769 762 fulltext1 = b'\x01\nbar'
770 763 meta1 = {
771 764 b'copy': b'source0',
772 765 b'copyrev': b'b' * 40,
773 766 }
774 767 stored1 = b''.join([
775 768 b'\x01\ncopy: source0\n',
776 769 b'copyrev: %s\n' % (b'b' * 40),
777 770 b'\x01\n\x01\nbar',
778 771 ])
779 772
780 773 f = self._makefilefn()
781 774 with self._maketransactionfn() as tr:
782 775 node0 = f.add(fulltext0, {}, tr, 0, nullid, nullid)
783 776 node1 = f.add(fulltext1, meta1, tr, 1, nullid, nullid)
784 777
785 778 self.assertEqual(f.rawsize(0), len(stored0))
786 779 self.assertEqual(f.rawsize(1), len(stored1))
787 780
788 781 # TODO this is buggy.
789 782 self.assertEqual(f.size(0), len(fulltext0) + 4)
790 783
791 784 self.assertEqual(f.size(1), len(fulltext1))
792 785
793 786 self.assertEqual(f.revision(node0), stored0)
794 787 self.assertEqual(f.revision(node0, raw=True), stored0)
795 788
796 789 self.assertEqual(f.revision(node1), stored1)
797 790 self.assertEqual(f.revision(node1, raw=True), stored1)
798 791
799 792 self.assertEqual(f.read(node0), fulltext0)
800 793 self.assertEqual(f.read(node1), fulltext1)
801 794
802 795 self.assertFalse(f.cmp(node0, fulltext0))
803 796 self.assertTrue(f.cmp(node0, stored0))
804 797
805 798 self.assertFalse(f.cmp(node1, fulltext1))
806 799 self.assertTrue(f.cmp(node1, stored0))
807 800
808 801 def testcensored(self):
809 802 f = self._makefilefn()
810 803
811 804 stored1 = revlog.packmeta({
812 805 b'censored': b'tombstone',
813 806 }, b'')
814 807
815 808 # TODO tests are incomplete because we need the node to be
816 809 # different due to presence of censor metadata. But we can't
817 810 # do this with addrevision().
818 811 with self._maketransactionfn() as tr:
819 812 node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
820 813 f.addrevision(stored1, tr, 1, node0, nullid,
821 814 flags=revlog.REVIDX_ISCENSORED)
822 815
823 816 self.assertEqual(f.flags(1), revlog.REVIDX_ISCENSORED)
824 817 self.assertTrue(f.iscensored(1))
825 818
826 819 self.assertEqual(f.revision(1), stored1)
827 820 self.assertEqual(f.revision(1, raw=True), stored1)
828 821
829 822 self.assertEqual(f.read(1), b'')
830 823
831 824 class ifilemutationtests(basetestcase):
832 825 """Generic tests for the ifilemutation interface.
833 826
834 827 All file storage backends that support writing should conform to this
835 828 interface.
836 829
837 830 Use ``makeifilemutationtests()`` to create an instance of this type.
838 831 """
839 832 def testaddnoop(self):
840 833 f = self._makefilefn()
841 834 with self._maketransactionfn() as tr:
842 835 node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
843 836 node1 = f.add(b'foo', None, tr, 0, nullid, nullid)
844 837 # Varying by linkrev shouldn't impact hash.
845 838 node2 = f.add(b'foo', None, tr, 1, nullid, nullid)
846 839
847 840 self.assertEqual(node1, node0)
848 841 self.assertEqual(node2, node0)
849 842 self.assertEqual(len(f), 1)
850 843
851 844 def testaddrevisionbadnode(self):
852 845 f = self._makefilefn()
853 846 with self._maketransactionfn() as tr:
854 847 # Adding a revision with bad node value fails.
855 848 with self.assertRaises(error.StorageError):
856 849 f.addrevision(b'foo', tr, 0, nullid, nullid, node=b'\x01' * 20)
857 850
858 851 def testaddrevisionunknownflag(self):
859 852 f = self._makefilefn()
860 853 with self._maketransactionfn() as tr:
861 854 for i in range(15, 0, -1):
862 855 if (1 << i) & ~revlog.REVIDX_KNOWN_FLAGS:
863 856 flags = 1 << i
864 857 break
865 858
866 859 with self.assertRaises(error.StorageError):
867 860 f.addrevision(b'foo', tr, 0, nullid, nullid, flags=flags)
868 861
869 862 def testaddgroupsimple(self):
870 863 f = self._makefilefn()
871 864
872 865 callbackargs = []
873 866 def cb(*args, **kwargs):
874 867 callbackargs.append((args, kwargs))
875 868
876 869 def linkmapper(node):
877 870 return 0
878 871
879 872 with self._maketransactionfn() as tr:
880 873 nodes = f.addgroup([], None, tr, addrevisioncb=cb)
881 874
882 875 self.assertEqual(nodes, [])
883 876 self.assertEqual(callbackargs, [])
884 877 self.assertEqual(len(f), 0)
885 878
886 879 fulltext0 = b'foo'
887 880 delta0 = mdiff.trivialdiffheader(len(fulltext0)) + fulltext0
888 881
889 882 deltas = [
890 883 (b'\x01' * 20, nullid, nullid, nullid, nullid, delta0, 0),
891 884 ]
892 885
893 886 with self._maketransactionfn() as tr:
894 887 with self.assertRaises(error.StorageError):
895 888 f.addgroup(deltas, linkmapper, tr, addrevisioncb=cb)
896 889
897 890 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
898 891
899 892 f = self._makefilefn()
900 893
901 894 deltas = [
902 895 (node0, nullid, nullid, nullid, nullid, delta0, 0),
903 896 ]
904 897
905 898 with self._maketransactionfn() as tr:
906 899 nodes = f.addgroup(deltas, linkmapper, tr, addrevisioncb=cb)
907 900
908 901 self.assertEqual(nodes, [
909 902 b'\x49\xd8\xcb\xb1\x5c\xe2\x57\x92\x04\x47'
910 903 b'\x00\x6b\x46\x97\x8b\x7a\xf9\x80\xa9\x79'])
911 904
912 905 self.assertEqual(len(callbackargs), 1)
913 906 self.assertEqual(callbackargs[0][0][1], nodes[0])
914 907
915 908 self.assertEqual(list(f.revs()), [0])
916 909 self.assertEqual(f.rev(nodes[0]), 0)
917 910 self.assertEqual(f.node(0), nodes[0])
918 911
919 912 def testaddgroupmultiple(self):
920 913 f = self._makefilefn()
921 914
922 915 fulltexts = [
923 916 b'foo',
924 917 b'bar',
925 918 b'x' * 1024,
926 919 ]
927 920
928 921 nodes = []
929 922 with self._maketransactionfn() as tr:
930 923 for fulltext in fulltexts:
931 924 nodes.append(f.add(fulltext, None, tr, 0, nullid, nullid))
932 925
933 926 f = self._makefilefn()
934 927 deltas = []
935 928 for i, fulltext in enumerate(fulltexts):
936 929 delta = mdiff.trivialdiffheader(len(fulltext)) + fulltext
937 930
938 931 deltas.append((nodes[i], nullid, nullid, nullid, nullid, delta, 0))
939 932
940 933 with self._maketransactionfn() as tr:
941 934 self.assertEqual(f.addgroup(deltas, lambda x: 0, tr), nodes)
942 935
943 936 self.assertEqual(len(f), len(deltas))
944 937 self.assertEqual(list(f.revs()), [0, 1, 2])
945 938 self.assertEqual(f.rev(nodes[0]), 0)
946 939 self.assertEqual(f.rev(nodes[1]), 1)
947 940 self.assertEqual(f.rev(nodes[2]), 2)
948 941 self.assertEqual(f.node(0), nodes[0])
949 942 self.assertEqual(f.node(1), nodes[1])
950 943 self.assertEqual(f.node(2), nodes[2])
951 944
952 945 def makeifileindextests(makefilefn, maketransactionfn):
953 946 """Create a unittest.TestCase class suitable for testing file storage.
954 947
955 948 ``makefilefn`` is a callable which receives the test case as an
956 949 argument and returns an object implementing the ``ifilestorage`` interface.
957 950
958 951 ``maketransactionfn`` is a callable which receives the test case as an
959 952 argument and returns a transaction object.
960 953
961 954 Returns a type that is a ``unittest.TestCase`` that can be used for
962 955 testing the object implementing the file storage interface. Simply
963 956 assign the returned value to a module-level attribute and a test loader
964 957 should find and run it automatically.
965 958 """
966 959 d = {
967 960 r'_makefilefn': makefilefn,
968 961 r'_maketransactionfn': maketransactionfn,
969 962 }
970 963 return type(r'ifileindextests', (ifileindextests,), d)
971 964
972 965 def makeifiledatatests(makefilefn, maketransactionfn):
973 966 d = {
974 967 r'_makefilefn': makefilefn,
975 968 r'_maketransactionfn': maketransactionfn,
976 969 }
977 970 return type(r'ifiledatatests', (ifiledatatests,), d)
978 971
979 972 def makeifilemutationtests(makefilefn, maketransactionfn):
980 973 d = {
981 974 r'_makefilefn': makefilefn,
982 975 r'_maketransactionfn': maketransactionfn,
983 976 }
984 977 return type(r'ifilemutationtests', (ifilemutationtests,), d)
@@ -1,753 +1,741
1 1 # simplestorerepo.py - Extension that swaps in alternate repository storage.
2 2 #
3 3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 # To use this with the test suite:
9 9 #
10 10 # $ HGREPOFEATURES="simplestore" ./run-tests.py \
11 11 # --extra-config-opt extensions.simplestore=`pwd`/simplestorerepo.py
12 12
13 13 from __future__ import absolute_import
14 14
15 15 import stat
16 16
17 17 from mercurial.i18n import _
18 18 from mercurial.node import (
19 19 bin,
20 20 hex,
21 21 nullid,
22 22 nullrev,
23 23 )
24 24 from mercurial.thirdparty import (
25 25 attr,
26 26 cbor,
27 27 )
28 28 from mercurial import (
29 29 ancestor,
30 30 bundlerepo,
31 31 error,
32 32 extensions,
33 33 localrepo,
34 34 mdiff,
35 35 pycompat,
36 36 repository,
37 37 revlog,
38 38 store,
39 39 verify,
40 40 )
41 41 from mercurial.utils import (
42 42 interfaceutil,
43 43 )
44 44
45 45 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
46 46 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
47 47 # be specifying the version(s) of Mercurial they are tested with, or
48 48 # leave the attribute unspecified.
49 49 testedwith = 'ships-with-hg-core'
50 50
51 51 REQUIREMENT = 'testonly-simplestore'
52 52
53 53 def validatenode(node):
54 54 if isinstance(node, int):
55 55 raise ValueError('expected node; got int')
56 56
57 57 if len(node) != 20:
58 58 raise ValueError('expected 20 byte node')
59 59
60 60 def validaterev(rev):
61 61 if not isinstance(rev, int):
62 62 raise ValueError('expected int')
63 63
64 64 class simplestoreerror(error.StorageError):
65 65 pass
66 66
67 67 @interfaceutil.implementer(repository.irevisiondelta)
68 68 @attr.s(slots=True, frozen=True)
69 69 class simplestorerevisiondelta(object):
70 70 node = attr.ib()
71 71 p1node = attr.ib()
72 72 p2node = attr.ib()
73 73 basenode = attr.ib()
74 74 linknode = attr.ib()
75 75 flags = attr.ib()
76 76 baserevisionsize = attr.ib()
77 77 revision = attr.ib()
78 78 delta = attr.ib()
79 79
80 80 @interfaceutil.implementer(repository.ifilestorage)
81 81 class filestorage(object):
82 82 """Implements storage for a tracked path.
83 83
84 84 Data is stored in the VFS in a directory corresponding to the tracked
85 85 path.
86 86
87 87 Index data is stored in an ``index`` file using CBOR.
88 88
89 89 Fulltext data is stored in files having names of the node.
90 90 """
91 91
92 92 def __init__(self, svfs, path):
93 93 self._svfs = svfs
94 94 self._path = path
95 95
96 96 self._storepath = b'/'.join([b'data', path])
97 97 self._indexpath = b'/'.join([self._storepath, b'index'])
98 98
99 99 indexdata = self._svfs.tryread(self._indexpath)
100 100 if indexdata:
101 101 indexdata = cbor.loads(indexdata)
102 102
103 103 self._indexdata = indexdata or []
104 104 self._indexbynode = {}
105 105 self._indexbyrev = {}
106 106 self.index = []
107 107 self._refreshindex()
108 108
109 109 # This is used by changegroup code :/
110 110 self._generaldelta = True
111 111
112 112 self.version = 1
113 113
114 114 def _refreshindex(self):
115 115 self._indexbynode.clear()
116 116 self._indexbyrev.clear()
117 117 self.index = []
118 118
119 119 for i, entry in enumerate(self._indexdata):
120 120 self._indexbynode[entry[b'node']] = entry
121 121 self._indexbyrev[i] = entry
122 122
123 123 self._indexbynode[nullid] = {
124 124 b'node': nullid,
125 125 b'p1': nullid,
126 126 b'p2': nullid,
127 127 b'linkrev': nullrev,
128 128 b'flags': 0,
129 129 }
130 130
131 131 self._indexbyrev[nullrev] = {
132 132 b'node': nullid,
133 133 b'p1': nullid,
134 134 b'p2': nullid,
135 135 b'linkrev': nullrev,
136 136 b'flags': 0,
137 137 }
138 138
139 139 for i, entry in enumerate(self._indexdata):
140 140 p1rev, p2rev = self.parentrevs(self.rev(entry[b'node']))
141 141
142 142 # start, length, rawsize, chainbase, linkrev, p1, p2, node
143 143 self.index.append((0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev,
144 144 entry[b'node']))
145 145
146 146 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
147 147
148 148 def __len__(self):
149 149 return len(self._indexdata)
150 150
151 151 def __iter__(self):
152 152 return iter(range(len(self)))
153 153
154 154 def revs(self, start=0, stop=None):
155 155 step = 1
156 156 if stop is not None:
157 157 if start > stop:
158 158 step = -1
159 159
160 160 stop += step
161 161 else:
162 162 stop = len(self)
163 163
164 164 return range(start, stop, step)
165 165
166 166 def parents(self, node):
167 167 validatenode(node)
168 168
169 169 if node not in self._indexbynode:
170 170 raise KeyError('unknown node')
171 171
172 172 entry = self._indexbynode[node]
173 173
174 174 return entry[b'p1'], entry[b'p2']
175 175
176 176 def parentrevs(self, rev):
177 177 p1, p2 = self.parents(self._indexbyrev[rev][b'node'])
178 178 return self.rev(p1), self.rev(p2)
179 179
180 180 def rev(self, node):
181 181 validatenode(node)
182 182
183 183 try:
184 184 self._indexbynode[node]
185 185 except KeyError:
186 186 raise error.LookupError(node, self._indexpath, _('no node'))
187 187
188 188 for rev, entry in self._indexbyrev.items():
189 189 if entry[b'node'] == node:
190 190 return rev
191 191
192 192 raise error.ProgrammingError('this should not occur')
193 193
194 194 def node(self, rev):
195 195 validaterev(rev)
196 196
197 197 return self._indexbyrev[rev][b'node']
198 198
199 199 def lookup(self, node):
200 200 if isinstance(node, int):
201 201 return self.node(node)
202 202
203 203 if len(node) == 20:
204 204 self.rev(node)
205 205 return node
206 206
207 207 try:
208 208 rev = int(node)
209 209 if '%d' % rev != node:
210 210 raise ValueError
211 211
212 212 if rev < 0:
213 213 rev = len(self) + rev
214 214 if rev < 0 or rev >= len(self):
215 215 raise ValueError
216 216
217 217 return self.node(rev)
218 218 except (ValueError, OverflowError):
219 219 pass
220 220
221 221 if len(node) == 40:
222 222 try:
223 223 rawnode = bin(node)
224 224 self.rev(rawnode)
225 225 return rawnode
226 226 except TypeError:
227 227 pass
228 228
229 229 raise error.LookupError(node, self._path, _('invalid lookup input'))
230 230
231 231 def linkrev(self, rev):
232 232 validaterev(rev)
233 233
234 234 return self._indexbyrev[rev][b'linkrev']
235 235
236 236 def flags(self, rev):
237 237 validaterev(rev)
238 238
239 239 return self._indexbyrev[rev][b'flags']
240 240
241 241 def deltaparent(self, rev):
242 242 validaterev(rev)
243 243
244 244 p1node = self.parents(self.node(rev))[0]
245 245 return self.rev(p1node)
246 246
247 247 def _candelta(self, baserev, rev):
248 248 validaterev(baserev)
249 249 validaterev(rev)
250 250
251 251 if ((self.flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)
252 252 or (self.flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)):
253 253 return False
254 254
255 255 return True
256 256
257 257 def rawsize(self, rev):
258 258 validaterev(rev)
259 259 node = self.node(rev)
260 260 return len(self.revision(node, raw=True))
261 261
262 262 def _processflags(self, text, flags, operation, raw=False):
263 263 if flags == 0:
264 264 return text, True
265 265
266 266 if flags & ~revlog.REVIDX_KNOWN_FLAGS:
267 267 raise simplestoreerror(_("incompatible revision flag '%#x'") %
268 268 (flags & ~revlog.REVIDX_KNOWN_FLAGS))
269 269
270 270 validatehash = True
271 271 # Depending on the operation (read or write), the order might be
272 272 # reversed due to non-commutative transforms.
273 273 orderedflags = revlog.REVIDX_FLAGS_ORDER
274 274 if operation == 'write':
275 275 orderedflags = reversed(orderedflags)
276 276
277 277 for flag in orderedflags:
278 278 # If a flagprocessor has been registered for a known flag, apply the
279 279 # related operation transform and update result tuple.
280 280 if flag & flags:
281 281 vhash = True
282 282
283 283 if flag not in revlog._flagprocessors:
284 284 message = _("missing processor for flag '%#x'") % (flag)
285 285 raise simplestoreerror(message)
286 286
287 287 processor = revlog._flagprocessors[flag]
288 288 if processor is not None:
289 289 readtransform, writetransform, rawtransform = processor
290 290
291 291 if raw:
292 292 vhash = rawtransform(self, text)
293 293 elif operation == 'read':
294 294 text, vhash = readtransform(self, text)
295 295 else: # write operation
296 296 text, vhash = writetransform(self, text)
297 297 validatehash = validatehash and vhash
298 298
299 299 return text, validatehash
300 300
301 301 def checkhash(self, text, node, p1=None, p2=None, rev=None):
302 302 if p1 is None and p2 is None:
303 303 p1, p2 = self.parents(node)
304 304 if node != revlog.hash(text, p1, p2):
305 305 raise simplestoreerror(_("integrity check failed on %s") %
306 306 self._path)
307 307
308 308 def revision(self, node, raw=False):
309 309 validatenode(node)
310 310
311 311 if node == nullid:
312 312 return b''
313 313
314 314 rev = self.rev(node)
315 315 flags = self.flags(rev)
316 316
317 317 path = b'/'.join([self._storepath, hex(node)])
318 318 rawtext = self._svfs.read(path)
319 319
320 320 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
321 321 if validatehash:
322 322 self.checkhash(text, node, rev=rev)
323 323
324 324 return text
325 325
326 326 def read(self, node):
327 327 validatenode(node)
328 328
329 329 revision = self.revision(node)
330 330
331 331 if not revision.startswith(b'\1\n'):
332 332 return revision
333 333
334 334 start = revision.index(b'\1\n', 2)
335 335 return revision[start + 2:]
336 336
337 337 def renamed(self, node):
338 338 validatenode(node)
339 339
340 340 if self.parents(node)[0] != nullid:
341 341 return False
342 342
343 343 fulltext = self.revision(node)
344 344 m = revlog.parsemeta(fulltext)[0]
345 345
346 346 if m and 'copy' in m:
347 347 return m['copy'], bin(m['copyrev'])
348 348
349 349 return False
350 350
351 351 def cmp(self, node, text):
352 352 validatenode(node)
353 353
354 354 t = text
355 355
356 356 if text.startswith(b'\1\n'):
357 357 t = b'\1\n\1\n' + text
358 358
359 359 p1, p2 = self.parents(node)
360 360
361 361 if revlog.hash(t, p1, p2) == node:
362 362 return False
363 363
364 364 if self.iscensored(self.rev(node)):
365 365 return text != b''
366 366
367 367 if self.renamed(node):
368 368 t2 = self.read(node)
369 369 return t2 != text
370 370
371 371 return True
372 372
373 373 def size(self, rev):
374 374 validaterev(rev)
375 375
376 376 node = self._indexbyrev[rev][b'node']
377 377
378 378 if self.renamed(node):
379 379 return len(self.read(node))
380 380
381 381 if self.iscensored(rev):
382 382 return 0
383 383
384 384 return len(self.revision(node))
385 385
386 386 def iscensored(self, rev):
387 387 validaterev(rev)
388 388
389 389 return self.flags(rev) & revlog.REVIDX_ISCENSORED
390 390
391 391 def commonancestorsheads(self, a, b):
392 392 validatenode(a)
393 393 validatenode(b)
394 394
395 395 a = self.rev(a)
396 396 b = self.rev(b)
397 397
398 398 ancestors = ancestor.commonancestorsheads(self.parentrevs, a, b)
399 399 return pycompat.maplist(self.node, ancestors)
400 400
401 401 def descendants(self, revs):
402 402 # This is a copy of revlog.descendants()
403 403 first = min(revs)
404 404 if first == nullrev:
405 405 for i in self:
406 406 yield i
407 407 return
408 408
409 409 seen = set(revs)
410 410 for i in self.revs(start=first + 1):
411 411 for x in self.parentrevs(i):
412 412 if x != nullrev and x in seen:
413 413 seen.add(i)
414 414 yield i
415 415 break
416 416
417 417 # Required by verify.
418 418 def files(self):
419 419 entries = self._svfs.listdir(self._storepath)
420 420
421 421 # Strip out undo.backup.* files created as part of transaction
422 422 # recording.
423 423 entries = [f for f in entries if not f.startswith('undo.backup.')]
424 424
425 425 return [b'/'.join((self._storepath, f)) for f in entries]
426 426
427 427 # Required by verify.
428 428 def checksize(self):
429 429 return 0, 0
430 430
431 431 def add(self, text, meta, transaction, linkrev, p1, p2):
432 432 if meta or text.startswith(b'\1\n'):
433 433 text = revlog.packmeta(meta, text)
434 434
435 435 return self.addrevision(text, transaction, linkrev, p1, p2)
436 436
437 437 def addrevision(self, text, transaction, linkrev, p1, p2, node=None,
438 438 flags=revlog.REVIDX_DEFAULT_FLAGS, cachedelta=None):
439 439 validatenode(p1)
440 440 validatenode(p2)
441 441
442 442 if flags:
443 443 node = node or revlog.hash(text, p1, p2)
444 444
445 445 rawtext, validatehash = self._processflags(text, flags, 'write')
446 446
447 447 node = node or revlog.hash(text, p1, p2)
448 448
449 449 if node in self._indexbynode:
450 450 return node
451 451
452 452 if validatehash:
453 453 self.checkhash(rawtext, node, p1=p1, p2=p2)
454 454
455 455 return self._addrawrevision(node, rawtext, transaction, linkrev, p1, p2,
456 456 flags)
457 457
458 458 def _addrawrevision(self, node, rawtext, transaction, link, p1, p2, flags):
459 459 transaction.addbackup(self._indexpath)
460 460
461 461 path = b'/'.join([self._storepath, hex(node)])
462 462
463 463 self._svfs.write(path, rawtext)
464 464
465 465 self._indexdata.append({
466 466 b'node': node,
467 467 b'p1': p1,
468 468 b'p2': p2,
469 469 b'linkrev': link,
470 470 b'flags': flags,
471 471 })
472 472
473 473 self._reflectindexupdate()
474 474
475 475 return node
476 476
477 477 def _reflectindexupdate(self):
478 478 self._refreshindex()
479 479 self._svfs.write(self._indexpath, cbor.dumps(self._indexdata))
480 480
481 481 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
482 482 nodes = []
483 483
484 484 transaction.addbackup(self._indexpath)
485 485
486 486 for node, p1, p2, linknode, deltabase, delta, flags in deltas:
487 487 linkrev = linkmapper(linknode)
488 488 flags = flags or revlog.REVIDX_DEFAULT_FLAGS
489 489
490 490 nodes.append(node)
491 491
492 492 if node in self._indexbynode:
493 493 continue
494 494
495 495 # Need to resolve the fulltext from the delta base.
496 496 if deltabase == nullid:
497 497 text = mdiff.patch(b'', delta)
498 498 else:
499 499 text = mdiff.patch(self.revision(deltabase), delta)
500 500
501 501 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
502 502 flags)
503 503
504 504 if addrevisioncb:
505 505 addrevisioncb(self, node)
506 506
507 507 return nodes
508 508
509 509 def revdiff(self, rev1, rev2):
510 510 validaterev(rev1)
511 511 validaterev(rev2)
512 512
513 513 node1 = self.node(rev1)
514 514 node2 = self.node(rev2)
515 515
516 516 return mdiff.textdiff(self.revision(node1, raw=True),
517 517 self.revision(node2, raw=True))
518 518
519 519 def emitrevisiondeltas(self, requests):
520 520 for request in requests:
521 521 node = request.node
522 522 rev = self.rev(node)
523 523
524 524 if request.basenode == nullid:
525 525 baserev = nullrev
526 526 elif request.basenode is not None:
527 527 baserev = self.rev(request.basenode)
528 528 else:
529 529 # This is a test extension and we can do simple things
530 530 # for choosing a delta parent.
531 531 baserev = self.deltaparent(rev)
532 532
533 533 if baserev != nullrev and not self._candelta(baserev, rev):
534 534 baserev = nullrev
535 535
536 536 revision = None
537 537 delta = None
538 538 baserevisionsize = None
539 539
540 540 if self.iscensored(baserev) or self.iscensored(rev):
541 541 try:
542 542 revision = self.revision(node, raw=True)
543 543 except error.CensoredNodeError as e:
544 544 revision = e.tombstone
545 545
546 546 if baserev != nullrev:
547 547 baserevisionsize = self.rawsize(baserev)
548 548
549 549 elif baserev == nullrev:
550 550 revision = self.revision(node, raw=True)
551 551 else:
552 552 delta = self.revdiff(baserev, rev)
553 553
554 554 extraflags = revlog.REVIDX_ELLIPSIS if request.ellipsis else 0
555 555
556 556 yield simplestorerevisiondelta(
557 557 node=node,
558 558 p1node=request.p1node,
559 559 p2node=request.p2node,
560 560 linknode=request.linknode,
561 561 basenode=self.node(baserev),
562 562 flags=self.flags(rev) | extraflags,
563 563 baserevisionsize=baserevisionsize,
564 564 revision=revision,
565 565 delta=delta)
566 566
567 def headrevs(self):
568 # Assume all revisions are heads by default.
569 revishead = {rev: True for rev in self._indexbyrev}
570
571 for rev, entry in self._indexbyrev.items():
572 # Unset head flag for all seen parents.
573 revishead[self.rev(entry[b'p1'])] = False
574 revishead[self.rev(entry[b'p2'])] = False
575
576 return [rev for rev, ishead in sorted(revishead.items())
577 if ishead]
578
579 567 def heads(self, start=None, stop=None):
580 568 # This is copied from revlog.py.
581 569 if start is None and stop is None:
582 570 if not len(self):
583 571 return [nullid]
584 572 return [self.node(r) for r in self.headrevs()]
585 573
586 574 if start is None:
587 575 start = nullid
588 576 if stop is None:
589 577 stop = []
590 578 stoprevs = set([self.rev(n) for n in stop])
591 579 startrev = self.rev(start)
592 580 reachable = {startrev}
593 581 heads = {startrev}
594 582
595 583 parentrevs = self.parentrevs
596 584 for r in self.revs(start=startrev + 1):
597 585 for p in parentrevs(r):
598 586 if p in reachable:
599 587 if r not in stoprevs:
600 588 reachable.add(r)
601 589 heads.add(r)
602 590 if p in heads and p not in stoprevs:
603 591 heads.remove(p)
604 592
605 593 return [self.node(r) for r in heads]
606 594
607 595 def children(self, node):
608 596 validatenode(node)
609 597
610 598 # This is a copy of revlog.children().
611 599 c = []
612 600 p = self.rev(node)
613 601 for r in self.revs(start=p + 1):
614 602 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
615 603 if prevs:
616 604 for pr in prevs:
617 605 if pr == p:
618 606 c.append(self.node(r))
619 607 elif p == nullrev:
620 608 c.append(self.node(r))
621 609 return c
622 610
623 611 def getstrippoint(self, minlink):
624 612
625 613 # This is largely a copy of revlog.getstrippoint().
626 614 brokenrevs = set()
627 615 strippoint = len(self)
628 616
629 617 heads = {}
630 618 futurelargelinkrevs = set()
631 for head in self.headrevs():
632 headlinkrev = self.linkrev(head)
619 for head in self.heads():
620 headlinkrev = self.linkrev(self.rev(head))
633 621 heads[head] = headlinkrev
634 622 if headlinkrev >= minlink:
635 623 futurelargelinkrevs.add(headlinkrev)
636 624
637 625 # This algorithm involves walking down the rev graph, starting at the
638 626 # heads. Since the revs are topologically sorted according to linkrev,
639 627 # once all head linkrevs are below the minlink, we know there are
640 628 # no more revs that could have a linkrev greater than minlink.
641 629 # So we can stop walking.
642 630 while futurelargelinkrevs:
643 631 strippoint -= 1
644 632 linkrev = heads.pop(strippoint)
645 633
646 634 if linkrev < minlink:
647 635 brokenrevs.add(strippoint)
648 636 else:
649 637 futurelargelinkrevs.remove(linkrev)
650 638
651 639 for p in self.parentrevs(strippoint):
652 640 if p != nullrev:
653 641 plinkrev = self.linkrev(p)
654 642 heads[p] = plinkrev
655 643 if plinkrev >= minlink:
656 644 futurelargelinkrevs.add(plinkrev)
657 645
658 646 return strippoint, brokenrevs
659 647
660 648 def strip(self, minlink, transaction):
661 649 if not len(self):
662 650 return
663 651
664 652 rev, _ignored = self.getstrippoint(minlink)
665 653 if rev == len(self):
666 654 return
667 655
668 656 # Purge index data starting at the requested revision.
669 657 self._indexdata[rev:] = []
670 658 self._reflectindexupdate()
671 659
672 660 def issimplestorefile(f, kind, st):
673 661 if kind != stat.S_IFREG:
674 662 return False
675 663
676 664 if store.isrevlog(f, kind, st):
677 665 return False
678 666
679 667 # Ignore transaction undo files.
680 668 if f.startswith('undo.'):
681 669 return False
682 670
683 671 # Otherwise assume it belongs to the simple store.
684 672 return True
685 673
686 674 class simplestore(store.encodedstore):
687 675 def datafiles(self):
688 676 for x in super(simplestore, self).datafiles():
689 677 yield x
690 678
691 679 # Supplement with non-revlog files.
692 680 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
693 681
694 682 for unencoded, encoded, size in extrafiles:
695 683 try:
696 684 unencoded = store.decodefilename(unencoded)
697 685 except KeyError:
698 686 unencoded = None
699 687
700 688 yield unencoded, encoded, size
701 689
702 690 def reposetup(ui, repo):
703 691 if not repo.local():
704 692 return
705 693
706 694 if isinstance(repo, bundlerepo.bundlerepository):
707 695 raise error.Abort(_('cannot use simple store with bundlerepo'))
708 696
709 697 class simplestorerepo(repo.__class__):
710 698 def file(self, f):
711 699 return filestorage(self.svfs, f)
712 700
713 701 repo.__class__ = simplestorerepo
714 702
715 703 def featuresetup(ui, supported):
716 704 supported.add(REQUIREMENT)
717 705
718 706 def newreporequirements(orig, ui):
719 707 """Modifies default requirements for new repos to use the simple store."""
720 708 requirements = orig(ui)
721 709
722 710 # These requirements are only used to affect creation of the store
723 711 # object. We have our own store. So we can remove them.
724 712 # TODO do this once we feel like taking the test hit.
725 713 #if 'fncache' in requirements:
726 714 # requirements.remove('fncache')
727 715 #if 'dotencode' in requirements:
728 716 # requirements.remove('dotencode')
729 717
730 718 requirements.add(REQUIREMENT)
731 719
732 720 return requirements
733 721
734 722 def makestore(orig, requirements, path, vfstype):
735 723 if REQUIREMENT not in requirements:
736 724 return orig(requirements, path, vfstype)
737 725
738 726 return simplestore(path, vfstype)
739 727
740 728 def verifierinit(orig, self, *args, **kwargs):
741 729 orig(self, *args, **kwargs)
742 730
743 731 # We don't care that files in the store don't align with what is
744 732 # advertised. So suppress these warnings.
745 733 self.warnorphanstorefiles = False
746 734
747 735 def extsetup(ui):
748 736 localrepo.featuresetupfuncs.add(featuresetup)
749 737
750 738 extensions.wrapfunction(localrepo, 'newreporequirements',
751 739 newreporequirements)
752 740 extensions.wrapfunction(store, 'store', makestore)
753 741 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
General Comments 0
You need to be logged in to leave comments. Login now