##// END OF EJS Templates
tests: synchronize `simplestorerevisiondelta` with modern `irevisiondelta`...
Matt Harbison -
r53368:4a332b23 default
parent child Browse files
Show More
@@ -1,746 +1,748
1 # simplestorerepo.py - Extension that swaps in alternate repository storage.
1 # simplestorerepo.py - Extension that swaps in alternate repository storage.
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 # To use this with the test suite:
8 # To use this with the test suite:
9 #
9 #
10 # $ HGREPOFEATURES="simplestore" ./run-tests.py \
10 # $ HGREPOFEATURES="simplestore" ./run-tests.py \
11 # --extra-config-opt extensions.simplestore=`pwd`/simplestorerepo.py
11 # --extra-config-opt extensions.simplestore=`pwd`/simplestorerepo.py
12
12
13
13
14 import stat
14 import stat
15
15
16 from typing import (
16 from typing import (
17 Optional,
17 Optional,
18 )
18 )
19
19
20 from mercurial.i18n import _
20 from mercurial.i18n import _
21 from mercurial.node import (
21 from mercurial.node import (
22 bin,
22 bin,
23 hex,
23 hex,
24 nullrev,
24 nullrev,
25 )
25 )
26 from mercurial.thirdparty import attr
26 from mercurial.thirdparty import attr
27 from mercurial import (
27 from mercurial import (
28 ancestor,
28 ancestor,
29 bundlerepo,
29 bundlerepo,
30 error,
30 error,
31 extensions,
31 extensions,
32 localrepo,
32 localrepo,
33 mdiff,
33 mdiff,
34 pycompat,
34 pycompat,
35 revlog,
35 revlog,
36 store,
36 store,
37 verify,
37 verify,
38 )
38 )
39 from mercurial.interfaces import (
39 from mercurial.interfaces import (
40 repository,
40 repository,
41 util as interfaceutil,
41 util as interfaceutil,
42 )
42 )
43 from mercurial.utils import (
43 from mercurial.utils import (
44 cborutil,
44 cborutil,
45 storageutil,
45 storageutil,
46 )
46 )
47 from mercurial.revlogutils import flagutil
47 from mercurial.revlogutils import flagutil
48
48
49 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
49 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
50 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
50 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
51 # be specifying the version(s) of Mercurial they are tested with, or
51 # be specifying the version(s) of Mercurial they are tested with, or
52 # leave the attribute unspecified.
52 # leave the attribute unspecified.
53 testedwith = b'ships-with-hg-core'
53 testedwith = b'ships-with-hg-core'
54
54
55 REQUIREMENT = b'testonly-simplestore'
55 REQUIREMENT = b'testonly-simplestore'
56
56
57
57
58 def validatenode(node):
58 def validatenode(node):
59 if isinstance(node, int):
59 if isinstance(node, int):
60 raise ValueError('expected node; got int')
60 raise ValueError('expected node; got int')
61
61
62 if len(node) != 20:
62 if len(node) != 20:
63 raise ValueError('expected 20 byte node')
63 raise ValueError('expected 20 byte node')
64
64
65
65
66 def validaterev(rev):
66 def validaterev(rev):
67 if not isinstance(rev, int):
67 if not isinstance(rev, int):
68 raise ValueError('expected int')
68 raise ValueError('expected int')
69
69
70
70
71 class simplestoreerror(error.StorageError):
71 class simplestoreerror(error.StorageError):
72 pass
72 pass
73
73
74
74
75 @attr.s(slots=True)
75 @attr.s(slots=True)
76 class simplestorerevisiondelta(repository.irevisiondelta):
76 class simplestorerevisiondelta(repository.irevisiondelta):
77 node = attr.ib(type=bytes)
77 node = attr.ib(type=bytes)
78 p1node = attr.ib(type=bytes)
78 p1node = attr.ib(type=bytes)
79 p2node = attr.ib(type=bytes)
79 p2node = attr.ib(type=bytes)
80 basenode = attr.ib(type=bytes)
80 basenode = attr.ib(type=bytes)
81 flags = attr.ib(type=int)
81 flags = attr.ib(type=int)
82 baserevisionsize = attr.ib(type=Optional[int])
82 baserevisionsize = attr.ib(type=Optional[int])
83 revision = attr.ib(type=Optional[bytes])
83 revision = attr.ib(type=Optional[bytes])
84 delta = attr.ib(type=Optional[bytes])
84 delta = attr.ib(type=Optional[bytes])
85 sidedata = attr.ib(type=Optional[bytes])
86 protocol_flags = attr.ib(type=int)
85 linknode = attr.ib(default=None, type=Optional[bytes])
87 linknode = attr.ib(default=None, type=Optional[bytes])
86
88
87
89
88 @attr.s(frozen=True)
90 @attr.s(frozen=True)
89 class simplefilestoreproblem(repository.iverifyproblem):
91 class simplefilestoreproblem(repository.iverifyproblem):
90 warning = attr.ib(default=None, type=Optional[bytes])
92 warning = attr.ib(default=None, type=Optional[bytes])
91 error = attr.ib(default=None, type=Optional[bytes])
93 error = attr.ib(default=None, type=Optional[bytes])
92 node = attr.ib(default=None, type=Optional[bytes])
94 node = attr.ib(default=None, type=Optional[bytes])
93
95
94
96
95 @interfaceutil.implementer(repository.ifilestorage)
97 @interfaceutil.implementer(repository.ifilestorage)
96 class filestorage:
98 class filestorage:
97 """Implements storage for a tracked path.
99 """Implements storage for a tracked path.
98
100
99 Data is stored in the VFS in a directory corresponding to the tracked
101 Data is stored in the VFS in a directory corresponding to the tracked
100 path.
102 path.
101
103
102 Index data is stored in an ``index`` file using CBOR.
104 Index data is stored in an ``index`` file using CBOR.
103
105
104 Fulltext data is stored in files having names of the node.
106 Fulltext data is stored in files having names of the node.
105 """
107 """
106
108
107 _flagserrorclass = simplestoreerror
109 _flagserrorclass = simplestoreerror
108
110
109 def __init__(self, repo, svfs, path):
111 def __init__(self, repo, svfs, path):
110 self.nullid = repo.nullid
112 self.nullid = repo.nullid
111 self._repo = repo
113 self._repo = repo
112 self._svfs = svfs
114 self._svfs = svfs
113 self._path = path
115 self._path = path
114
116
115 self._storepath = b'/'.join([b'data', path])
117 self._storepath = b'/'.join([b'data', path])
116 self._indexpath = b'/'.join([self._storepath, b'index'])
118 self._indexpath = b'/'.join([self._storepath, b'index'])
117
119
118 indexdata = self._svfs.tryread(self._indexpath)
120 indexdata = self._svfs.tryread(self._indexpath)
119 if indexdata:
121 if indexdata:
120 indexdata = cborutil.decodeall(indexdata)
122 indexdata = cborutil.decodeall(indexdata)
121
123
122 self._indexdata = indexdata or []
124 self._indexdata = indexdata or []
123 self._indexbynode = {}
125 self._indexbynode = {}
124 self._indexbyrev = {}
126 self._indexbyrev = {}
125 self._index = []
127 self._index = []
126 self._refreshindex()
128 self._refreshindex()
127
129
128 self._flagprocessors = dict(flagutil.flagprocessors)
130 self._flagprocessors = dict(flagutil.flagprocessors)
129
131
130 def _refreshindex(self):
132 def _refreshindex(self):
131 self._indexbynode.clear()
133 self._indexbynode.clear()
132 self._indexbyrev.clear()
134 self._indexbyrev.clear()
133 self._index = []
135 self._index = []
134
136
135 for i, entry in enumerate(self._indexdata):
137 for i, entry in enumerate(self._indexdata):
136 self._indexbynode[entry[b'node']] = entry
138 self._indexbynode[entry[b'node']] = entry
137 self._indexbyrev[i] = entry
139 self._indexbyrev[i] = entry
138
140
139 self._indexbynode[self._repo.nullid] = {
141 self._indexbynode[self._repo.nullid] = {
140 b'node': self._repo.nullid,
142 b'node': self._repo.nullid,
141 b'p1': self._repo.nullid,
143 b'p1': self._repo.nullid,
142 b'p2': self._repo.nullid,
144 b'p2': self._repo.nullid,
143 b'linkrev': nullrev,
145 b'linkrev': nullrev,
144 b'flags': 0,
146 b'flags': 0,
145 }
147 }
146
148
147 self._indexbyrev[nullrev] = {
149 self._indexbyrev[nullrev] = {
148 b'node': self._repo.nullid,
150 b'node': self._repo.nullid,
149 b'p1': self._repo.nullid,
151 b'p1': self._repo.nullid,
150 b'p2': self._repo.nullid,
152 b'p2': self._repo.nullid,
151 b'linkrev': nullrev,
153 b'linkrev': nullrev,
152 b'flags': 0,
154 b'flags': 0,
153 }
155 }
154
156
155 for i, entry in enumerate(self._indexdata):
157 for i, entry in enumerate(self._indexdata):
156 p1rev, p2rev = self.parentrevs(self.rev(entry[b'node']))
158 p1rev, p2rev = self.parentrevs(self.rev(entry[b'node']))
157
159
158 # start, length, rawsize, chainbase, linkrev, p1, p2, node
160 # start, length, rawsize, chainbase, linkrev, p1, p2, node
159 self._index.append(
161 self._index.append(
160 (0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev, entry[b'node'])
162 (0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev, entry[b'node'])
161 )
163 )
162
164
163 self._index.append((0, 0, 0, -1, -1, -1, -1, self._repo.nullid))
165 self._index.append((0, 0, 0, -1, -1, -1, -1, self._repo.nullid))
164
166
165 def __len__(self):
167 def __len__(self):
166 return len(self._indexdata)
168 return len(self._indexdata)
167
169
168 def __iter__(self):
170 def __iter__(self):
169 return iter(range(len(self)))
171 return iter(range(len(self)))
170
172
171 def revs(self, start=0, stop=None):
173 def revs(self, start=0, stop=None):
172 step = 1
174 step = 1
173 if stop is not None:
175 if stop is not None:
174 if start > stop:
176 if start > stop:
175 step = -1
177 step = -1
176
178
177 stop += step
179 stop += step
178 else:
180 else:
179 stop = len(self)
181 stop = len(self)
180
182
181 return range(start, stop, step)
183 return range(start, stop, step)
182
184
183 def parents(self, node):
185 def parents(self, node):
184 validatenode(node)
186 validatenode(node)
185
187
186 if node not in self._indexbynode:
188 if node not in self._indexbynode:
187 raise KeyError('unknown node')
189 raise KeyError('unknown node')
188
190
189 entry = self._indexbynode[node]
191 entry = self._indexbynode[node]
190
192
191 return entry[b'p1'], entry[b'p2']
193 return entry[b'p1'], entry[b'p2']
192
194
193 def parentrevs(self, rev):
195 def parentrevs(self, rev):
194 p1, p2 = self.parents(self._indexbyrev[rev][b'node'])
196 p1, p2 = self.parents(self._indexbyrev[rev][b'node'])
195 return self.rev(p1), self.rev(p2)
197 return self.rev(p1), self.rev(p2)
196
198
197 def rev(self, node):
199 def rev(self, node):
198 validatenode(node)
200 validatenode(node)
199
201
200 try:
202 try:
201 self._indexbynode[node]
203 self._indexbynode[node]
202 except KeyError:
204 except KeyError:
203 raise error.LookupError(node, self._indexpath, _('no node'))
205 raise error.LookupError(node, self._indexpath, _('no node'))
204
206
205 for rev, entry in self._indexbyrev.items():
207 for rev, entry in self._indexbyrev.items():
206 if entry[b'node'] == node:
208 if entry[b'node'] == node:
207 return rev
209 return rev
208
210
209 raise error.ProgrammingError(b'this should not occur')
211 raise error.ProgrammingError(b'this should not occur')
210
212
211 def node(self, rev):
213 def node(self, rev):
212 validaterev(rev)
214 validaterev(rev)
213
215
214 return self._indexbyrev[rev][b'node']
216 return self._indexbyrev[rev][b'node']
215
217
216 def hasnode(self, node):
218 def hasnode(self, node):
217 validatenode(node)
219 validatenode(node)
218 return node in self._indexbynode
220 return node in self._indexbynode
219
221
220 def censorrevision(self, tr, censornode, tombstone=b''):
222 def censorrevision(self, tr, censornode, tombstone=b''):
221 raise NotImplementedError('TODO')
223 raise NotImplementedError('TODO')
222
224
223 def lookup(self, node):
225 def lookup(self, node):
224 if isinstance(node, int):
226 if isinstance(node, int):
225 return self.node(node)
227 return self.node(node)
226
228
227 if len(node) == 20:
229 if len(node) == 20:
228 self.rev(node)
230 self.rev(node)
229 return node
231 return node
230
232
231 try:
233 try:
232 rev = int(node)
234 rev = int(node)
233 if '%d' % rev != node:
235 if '%d' % rev != node:
234 raise ValueError
236 raise ValueError
235
237
236 if rev < 0:
238 if rev < 0:
237 rev = len(self) + rev
239 rev = len(self) + rev
238 if rev < 0 or rev >= len(self):
240 if rev < 0 or rev >= len(self):
239 raise ValueError
241 raise ValueError
240
242
241 return self.node(rev)
243 return self.node(rev)
242 except (ValueError, OverflowError):
244 except (ValueError, OverflowError):
243 pass
245 pass
244
246
245 if len(node) == 40:
247 if len(node) == 40:
246 try:
248 try:
247 rawnode = bin(node)
249 rawnode = bin(node)
248 self.rev(rawnode)
250 self.rev(rawnode)
249 return rawnode
251 return rawnode
250 except TypeError:
252 except TypeError:
251 pass
253 pass
252
254
253 raise error.LookupError(node, self._path, _('invalid lookup input'))
255 raise error.LookupError(node, self._path, _('invalid lookup input'))
254
256
255 def linkrev(self, rev):
257 def linkrev(self, rev):
256 validaterev(rev)
258 validaterev(rev)
257
259
258 return self._indexbyrev[rev][b'linkrev']
260 return self._indexbyrev[rev][b'linkrev']
259
261
260 def _flags(self, rev):
262 def _flags(self, rev):
261 validaterev(rev)
263 validaterev(rev)
262
264
263 return self._indexbyrev[rev][b'flags']
265 return self._indexbyrev[rev][b'flags']
264
266
265 def _candelta(self, baserev, rev):
267 def _candelta(self, baserev, rev):
266 validaterev(baserev)
268 validaterev(baserev)
267 validaterev(rev)
269 validaterev(rev)
268
270
269 if (self._flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS) or (
271 if (self._flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS) or (
270 self._flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS
272 self._flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS
271 ):
273 ):
272 return False
274 return False
273
275
274 return True
276 return True
275
277
276 def checkhash(self, text, node, p1=None, p2=None, rev=None):
278 def checkhash(self, text, node, p1=None, p2=None, rev=None):
277 if p1 is None and p2 is None:
279 if p1 is None and p2 is None:
278 p1, p2 = self.parents(node)
280 p1, p2 = self.parents(node)
279 if node != storageutil.hashrevisionsha1(text, p1, p2):
281 if node != storageutil.hashrevisionsha1(text, p1, p2):
280 raise simplestoreerror(
282 raise simplestoreerror(
281 _("integrity check failed on %s") % self._path
283 _("integrity check failed on %s") % self._path
282 )
284 )
283
285
284 def revision(self, nodeorrev, raw=False):
286 def revision(self, nodeorrev, raw=False):
285 if isinstance(nodeorrev, int):
287 if isinstance(nodeorrev, int):
286 node = self.node(nodeorrev)
288 node = self.node(nodeorrev)
287 else:
289 else:
288 node = nodeorrev
290 node = nodeorrev
289 validatenode(node)
291 validatenode(node)
290
292
291 if node == self._repo.nullid:
293 if node == self._repo.nullid:
292 return b''
294 return b''
293
295
294 rev = self.rev(node)
296 rev = self.rev(node)
295 flags = self._flags(rev)
297 flags = self._flags(rev)
296
298
297 path = b'/'.join([self._storepath, hex(node)])
299 path = b'/'.join([self._storepath, hex(node)])
298 rawtext = self._svfs.read(path)
300 rawtext = self._svfs.read(path)
299
301
300 if raw:
302 if raw:
301 validatehash = flagutil.processflagsraw(self, rawtext, flags)
303 validatehash = flagutil.processflagsraw(self, rawtext, flags)
302 text = rawtext
304 text = rawtext
303 else:
305 else:
304 r = flagutil.processflagsread(self, rawtext, flags)
306 r = flagutil.processflagsread(self, rawtext, flags)
305 text, validatehash = r
307 text, validatehash = r
306 if validatehash:
308 if validatehash:
307 self.checkhash(text, node, rev=rev)
309 self.checkhash(text, node, rev=rev)
308
310
309 return text
311 return text
310
312
311 def rawdata(self, nodeorrev):
313 def rawdata(self, nodeorrev):
312 return self.revision(raw=True)
314 return self.revision(raw=True)
313
315
314 def read(self, node):
316 def read(self, node):
315 validatenode(node)
317 validatenode(node)
316
318
317 revision = self.revision(node)
319 revision = self.revision(node)
318
320
319 if not revision.startswith(b'\1\n'):
321 if not revision.startswith(b'\1\n'):
320 return revision
322 return revision
321
323
322 start = revision.index(b'\1\n', 2)
324 start = revision.index(b'\1\n', 2)
323 return revision[start + 2 :]
325 return revision[start + 2 :]
324
326
325 def renamed(self, node):
327 def renamed(self, node):
326 validatenode(node)
328 validatenode(node)
327
329
328 if self.parents(node)[0] != self._repo.nullid:
330 if self.parents(node)[0] != self._repo.nullid:
329 return False
331 return False
330
332
331 fulltext = self.revision(node)
333 fulltext = self.revision(node)
332 m = storageutil.parsemeta(fulltext)[0]
334 m = storageutil.parsemeta(fulltext)[0]
333
335
334 if m and 'copy' in m:
336 if m and 'copy' in m:
335 return m['copy'], bin(m['copyrev'])
337 return m['copy'], bin(m['copyrev'])
336
338
337 return False
339 return False
338
340
339 def cmp(self, node, text):
341 def cmp(self, node, text):
340 validatenode(node)
342 validatenode(node)
341
343
342 t = text
344 t = text
343
345
344 if text.startswith(b'\1\n'):
346 if text.startswith(b'\1\n'):
345 t = b'\1\n\1\n' + text
347 t = b'\1\n\1\n' + text
346
348
347 p1, p2 = self.parents(node)
349 p1, p2 = self.parents(node)
348
350
349 if storageutil.hashrevisionsha1(t, p1, p2) == node:
351 if storageutil.hashrevisionsha1(t, p1, p2) == node:
350 return False
352 return False
351
353
352 if self.iscensored(self.rev(node)):
354 if self.iscensored(self.rev(node)):
353 return text != b''
355 return text != b''
354
356
355 if self.renamed(node):
357 if self.renamed(node):
356 t2 = self.read(node)
358 t2 = self.read(node)
357 return t2 != text
359 return t2 != text
358
360
359 return True
361 return True
360
362
361 def size(self, rev):
363 def size(self, rev):
362 validaterev(rev)
364 validaterev(rev)
363
365
364 node = self._indexbyrev[rev][b'node']
366 node = self._indexbyrev[rev][b'node']
365
367
366 if self.renamed(node):
368 if self.renamed(node):
367 return len(self.read(node))
369 return len(self.read(node))
368
370
369 if self.iscensored(rev):
371 if self.iscensored(rev):
370 return 0
372 return 0
371
373
372 return len(self.revision(node))
374 return len(self.revision(node))
373
375
374 def iscensored(self, rev):
376 def iscensored(self, rev):
375 validaterev(rev)
377 validaterev(rev)
376
378
377 return self._flags(rev) & repository.REVISION_FLAG_CENSORED
379 return self._flags(rev) & repository.REVISION_FLAG_CENSORED
378
380
379 def commonancestorsheads(self, a, b):
381 def commonancestorsheads(self, a, b):
380 validatenode(a)
382 validatenode(a)
381 validatenode(b)
383 validatenode(b)
382
384
383 a = self.rev(a)
385 a = self.rev(a)
384 b = self.rev(b)
386 b = self.rev(b)
385
387
386 ancestors = ancestor.commonancestorsheads(self.parentrevs, a, b)
388 ancestors = ancestor.commonancestorsheads(self.parentrevs, a, b)
387 return pycompat.maplist(self.node, ancestors)
389 return pycompat.maplist(self.node, ancestors)
388
390
389 def descendants(self, revs):
391 def descendants(self, revs):
390 # This is a copy of revlog.descendants()
392 # This is a copy of revlog.descendants()
391 first = min(revs)
393 first = min(revs)
392 if first == nullrev:
394 if first == nullrev:
393 for i in self:
395 for i in self:
394 yield i
396 yield i
395 return
397 return
396
398
397 seen = set(revs)
399 seen = set(revs)
398 for i in self.revs(start=first + 1):
400 for i in self.revs(start=first + 1):
399 for x in self.parentrevs(i):
401 for x in self.parentrevs(i):
400 if x != nullrev and x in seen:
402 if x != nullrev and x in seen:
401 seen.add(i)
403 seen.add(i)
402 yield i
404 yield i
403 break
405 break
404
406
405 # Required by verify.
407 # Required by verify.
406 def files(self):
408 def files(self):
407 entries = self._svfs.listdir(self._storepath)
409 entries = self._svfs.listdir(self._storepath)
408
410
409 # Strip out undo.backup.* files created as part of transaction
411 # Strip out undo.backup.* files created as part of transaction
410 # recording.
412 # recording.
411 entries = [f for f in entries if not f.startswith('undo.backup.')]
413 entries = [f for f in entries if not f.startswith('undo.backup.')]
412
414
413 return [b'/'.join((self._storepath, f)) for f in entries]
415 return [b'/'.join((self._storepath, f)) for f in entries]
414
416
415 def storageinfo(
417 def storageinfo(
416 self,
418 self,
417 exclusivefiles=False,
419 exclusivefiles=False,
418 sharedfiles=False,
420 sharedfiles=False,
419 revisionscount=False,
421 revisionscount=False,
420 trackedsize=False,
422 trackedsize=False,
421 storedsize=False,
423 storedsize=False,
422 ):
424 ):
423 # TODO do a real implementation of this
425 # TODO do a real implementation of this
424 return {
426 return {
425 'exclusivefiles': [],
427 'exclusivefiles': [],
426 'sharedfiles': [],
428 'sharedfiles': [],
427 'revisionscount': len(self),
429 'revisionscount': len(self),
428 'trackedsize': 0,
430 'trackedsize': 0,
429 'storedsize': None,
431 'storedsize': None,
430 }
432 }
431
433
432 def verifyintegrity(self, state):
434 def verifyintegrity(self, state):
433 state['skipread'] = set()
435 state['skipread'] = set()
434 for rev in self:
436 for rev in self:
435 node = self.node(rev)
437 node = self.node(rev)
436 try:
438 try:
437 self.revision(node)
439 self.revision(node)
438 except Exception as e:
440 except Exception as e:
439 yield simplefilestoreproblem(
441 yield simplefilestoreproblem(
440 error='unpacking %s: %s' % (node, e), node=node
442 error='unpacking %s: %s' % (node, e), node=node
441 )
443 )
442 state['skipread'].add(node)
444 state['skipread'].add(node)
443
445
444 def emitrevisions(
446 def emitrevisions(
445 self,
447 self,
446 nodes,
448 nodes,
447 nodesorder=None,
449 nodesorder=None,
448 revisiondata=False,
450 revisiondata=False,
449 assumehaveparentrevisions=False,
451 assumehaveparentrevisions=False,
450 deltamode=repository.CG_DELTAMODE_STD,
452 deltamode=repository.CG_DELTAMODE_STD,
451 sidedata_helpers=None,
453 sidedata_helpers=None,
452 ):
454 ):
453 # TODO this will probably break on some ordering options.
455 # TODO this will probably break on some ordering options.
454 nodes = [n for n in nodes if n != self._repo.nullid]
456 nodes = [n for n in nodes if n != self._repo.nullid]
455 if not nodes:
457 if not nodes:
456 return
458 return
457 for delta in storageutil.emitrevisions(
459 for delta in storageutil.emitrevisions(
458 self,
460 self,
459 nodes,
461 nodes,
460 nodesorder,
462 nodesorder,
461 simplestorerevisiondelta,
463 simplestorerevisiondelta,
462 revisiondata=revisiondata,
464 revisiondata=revisiondata,
463 assumehaveparentrevisions=assumehaveparentrevisions,
465 assumehaveparentrevisions=assumehaveparentrevisions,
464 deltamode=deltamode,
466 deltamode=deltamode,
465 sidedata_helpers=sidedata_helpers,
467 sidedata_helpers=sidedata_helpers,
466 ):
468 ):
467 yield delta
469 yield delta
468
470
469 def add(self, text, meta, transaction, linkrev, p1, p2):
471 def add(self, text, meta, transaction, linkrev, p1, p2):
470 if meta or text.startswith(b'\1\n'):
472 if meta or text.startswith(b'\1\n'):
471 text = storageutil.packmeta(meta, text)
473 text = storageutil.packmeta(meta, text)
472
474
473 return self.addrevision(text, transaction, linkrev, p1, p2)
475 return self.addrevision(text, transaction, linkrev, p1, p2)
474
476
475 def addrevision(
477 def addrevision(
476 self,
478 self,
477 text,
479 text,
478 transaction,
480 transaction,
479 linkrev,
481 linkrev,
480 p1,
482 p1,
481 p2,
483 p2,
482 node=None,
484 node=None,
483 flags=revlog.REVIDX_DEFAULT_FLAGS,
485 flags=revlog.REVIDX_DEFAULT_FLAGS,
484 cachedelta=None,
486 cachedelta=None,
485 ):
487 ):
486 validatenode(p1)
488 validatenode(p1)
487 validatenode(p2)
489 validatenode(p2)
488
490
489 if flags:
491 if flags:
490 node = node or storageutil.hashrevisionsha1(text, p1, p2)
492 node = node or storageutil.hashrevisionsha1(text, p1, p2)
491
493
492 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
494 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
493
495
494 node = node or storageutil.hashrevisionsha1(text, p1, p2)
496 node = node or storageutil.hashrevisionsha1(text, p1, p2)
495
497
496 if node in self._indexbynode:
498 if node in self._indexbynode:
497 return node
499 return node
498
500
499 if validatehash:
501 if validatehash:
500 self.checkhash(rawtext, node, p1=p1, p2=p2)
502 self.checkhash(rawtext, node, p1=p1, p2=p2)
501
503
502 return self._addrawrevision(
504 return self._addrawrevision(
503 node, rawtext, transaction, linkrev, p1, p2, flags
505 node, rawtext, transaction, linkrev, p1, p2, flags
504 )
506 )
505
507
506 def _addrawrevision(self, node, rawtext, transaction, link, p1, p2, flags):
508 def _addrawrevision(self, node, rawtext, transaction, link, p1, p2, flags):
507 transaction.addbackup(self._indexpath)
509 transaction.addbackup(self._indexpath)
508
510
509 path = b'/'.join([self._storepath, hex(node)])
511 path = b'/'.join([self._storepath, hex(node)])
510
512
511 self._svfs.write(path, rawtext)
513 self._svfs.write(path, rawtext)
512
514
513 self._indexdata.append(
515 self._indexdata.append(
514 {
516 {
515 b'node': node,
517 b'node': node,
516 b'p1': p1,
518 b'p1': p1,
517 b'p2': p2,
519 b'p2': p2,
518 b'linkrev': link,
520 b'linkrev': link,
519 b'flags': flags,
521 b'flags': flags,
520 }
522 }
521 )
523 )
522
524
523 self._reflectindexupdate()
525 self._reflectindexupdate()
524
526
525 return node
527 return node
526
528
527 def _reflectindexupdate(self):
529 def _reflectindexupdate(self):
528 self._refreshindex()
530 self._refreshindex()
529 self._svfs.write(
531 self._svfs.write(
530 self._indexpath, ''.join(cborutil.streamencode(self._indexdata))
532 self._indexpath, ''.join(cborutil.streamencode(self._indexdata))
531 )
533 )
532
534
533 def addgroup(
535 def addgroup(
534 self,
536 self,
535 deltas,
537 deltas,
536 linkmapper,
538 linkmapper,
537 transaction,
539 transaction,
538 addrevisioncb=None,
540 addrevisioncb=None,
539 duplicaterevisioncb=None,
541 duplicaterevisioncb=None,
540 maybemissingparents=False,
542 maybemissingparents=False,
541 ):
543 ):
542 if maybemissingparents:
544 if maybemissingparents:
543 raise error.Abort(
545 raise error.Abort(
544 _('simple store does not support missing parents ' 'write mode')
546 _('simple store does not support missing parents ' 'write mode')
545 )
547 )
546
548
547 empty = True
549 empty = True
548
550
549 transaction.addbackup(self._indexpath)
551 transaction.addbackup(self._indexpath)
550
552
551 for node, p1, p2, linknode, deltabase, delta, flags in deltas:
553 for node, p1, p2, linknode, deltabase, delta, flags in deltas:
552 linkrev = linkmapper(linknode)
554 linkrev = linkmapper(linknode)
553 flags = flags or revlog.REVIDX_DEFAULT_FLAGS
555 flags = flags or revlog.REVIDX_DEFAULT_FLAGS
554
556
555 if node in self._indexbynode:
557 if node in self._indexbynode:
556 if duplicaterevisioncb:
558 if duplicaterevisioncb:
557 duplicaterevisioncb(self, self.rev(node))
559 duplicaterevisioncb(self, self.rev(node))
558 empty = False
560 empty = False
559 continue
561 continue
560
562
561 # Need to resolve the fulltext from the delta base.
563 # Need to resolve the fulltext from the delta base.
562 if deltabase == self._repo.nullid:
564 if deltabase == self._repo.nullid:
563 text = mdiff.patch(b'', delta)
565 text = mdiff.patch(b'', delta)
564 else:
566 else:
565 text = mdiff.patch(self.revision(deltabase), delta)
567 text = mdiff.patch(self.revision(deltabase), delta)
566
568
567 rev = self._addrawrevision(
569 rev = self._addrawrevision(
568 node, text, transaction, linkrev, p1, p2, flags
570 node, text, transaction, linkrev, p1, p2, flags
569 )
571 )
570
572
571 if addrevisioncb:
573 if addrevisioncb:
572 addrevisioncb(self, rev)
574 addrevisioncb(self, rev)
573 empty = False
575 empty = False
574 return not empty
576 return not empty
575
577
576 def _headrevs(self):
578 def _headrevs(self):
577 # Assume all revisions are heads by default.
579 # Assume all revisions are heads by default.
578 revishead = {rev: True for rev in self._indexbyrev}
580 revishead = {rev: True for rev in self._indexbyrev}
579
581
580 for rev, entry in self._indexbyrev.items():
582 for rev, entry in self._indexbyrev.items():
581 # Unset head flag for all seen parents.
583 # Unset head flag for all seen parents.
582 revishead[self.rev(entry[b'p1'])] = False
584 revishead[self.rev(entry[b'p1'])] = False
583 revishead[self.rev(entry[b'p2'])] = False
585 revishead[self.rev(entry[b'p2'])] = False
584
586
585 return [rev for rev, ishead in sorted(revishead.items()) if ishead]
587 return [rev for rev, ishead in sorted(revishead.items()) if ishead]
586
588
587 def heads(self, start=None, stop=None):
589 def heads(self, start=None, stop=None):
588 # This is copied from revlog.py.
590 # This is copied from revlog.py.
589 if start is None and stop is None:
591 if start is None and stop is None:
590 if not len(self):
592 if not len(self):
591 return [self._repo.nullid]
593 return [self._repo.nullid]
592 return [self.node(r) for r in self._headrevs()]
594 return [self.node(r) for r in self._headrevs()]
593
595
594 if start is None:
596 if start is None:
595 start = self._repo.nullid
597 start = self._repo.nullid
596 if stop is None:
598 if stop is None:
597 stop = []
599 stop = []
598 stoprevs = {self.rev(n) for n in stop}
600 stoprevs = {self.rev(n) for n in stop}
599 startrev = self.rev(start)
601 startrev = self.rev(start)
600 reachable = {startrev}
602 reachable = {startrev}
601 heads = {startrev}
603 heads = {startrev}
602
604
603 parentrevs = self.parentrevs
605 parentrevs = self.parentrevs
604 for r in self.revs(start=startrev + 1):
606 for r in self.revs(start=startrev + 1):
605 for p in parentrevs(r):
607 for p in parentrevs(r):
606 if p in reachable:
608 if p in reachable:
607 if r not in stoprevs:
609 if r not in stoprevs:
608 reachable.add(r)
610 reachable.add(r)
609 heads.add(r)
611 heads.add(r)
610 if p in heads and p not in stoprevs:
612 if p in heads and p not in stoprevs:
611 heads.remove(p)
613 heads.remove(p)
612
614
613 return [self.node(r) for r in heads]
615 return [self.node(r) for r in heads]
614
616
615 def children(self, node):
617 def children(self, node):
616 validatenode(node)
618 validatenode(node)
617
619
618 # This is a copy of revlog.children().
620 # This is a copy of revlog.children().
619 c = []
621 c = []
620 p = self.rev(node)
622 p = self.rev(node)
621 for r in self.revs(start=p + 1):
623 for r in self.revs(start=p + 1):
622 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
624 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
623 if prevs:
625 if prevs:
624 for pr in prevs:
626 for pr in prevs:
625 if pr == p:
627 if pr == p:
626 c.append(self.node(r))
628 c.append(self.node(r))
627 elif p == nullrev:
629 elif p == nullrev:
628 c.append(self.node(r))
630 c.append(self.node(r))
629 return c
631 return c
630
632
631 def getstrippoint(self, minlink):
633 def getstrippoint(self, minlink):
632 return storageutil.resolvestripinfo(
634 return storageutil.resolvestripinfo(
633 minlink,
635 minlink,
634 len(self) - 1,
636 len(self) - 1,
635 self._headrevs(),
637 self._headrevs(),
636 self.linkrev,
638 self.linkrev,
637 self.parentrevs,
639 self.parentrevs,
638 )
640 )
639
641
640 def strip(self, minlink, transaction):
642 def strip(self, minlink, transaction):
641 if not len(self):
643 if not len(self):
642 return
644 return
643
645
644 rev, _ignored = self.getstrippoint(minlink)
646 rev, _ignored = self.getstrippoint(minlink)
645 if rev == len(self):
647 if rev == len(self):
646 return
648 return
647
649
648 # Purge index data starting at the requested revision.
650 # Purge index data starting at the requested revision.
649 self._indexdata[rev:] = []
651 self._indexdata[rev:] = []
650 self._reflectindexupdate()
652 self._reflectindexupdate()
651
653
652
654
653 def issimplestorefile(f, kind, st):
655 def issimplestorefile(f, kind, st):
654 if kind != stat.S_IFREG:
656 if kind != stat.S_IFREG:
655 return False
657 return False
656
658
657 if store.isrevlog(f, kind, st):
659 if store.isrevlog(f, kind, st):
658 return False
660 return False
659
661
660 # Ignore transaction undo files.
662 # Ignore transaction undo files.
661 if f.startswith('undo.'):
663 if f.startswith('undo.'):
662 return False
664 return False
663
665
664 # Otherwise assume it belongs to the simple store.
666 # Otherwise assume it belongs to the simple store.
665 return True
667 return True
666
668
667
669
668 class simplestore(store.encodedstore):
670 class simplestore(store.encodedstore):
669 def data_entries(self, undecodable=None):
671 def data_entries(self, undecodable=None):
670 for x in super(simplestore, self).data_entries():
672 for x in super(simplestore, self).data_entries():
671 yield x
673 yield x
672
674
673 # Supplement with non-revlog files.
675 # Supplement with non-revlog files.
674 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
676 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
675
677
676 for f1, size in extrafiles:
678 for f1, size in extrafiles:
677 try:
679 try:
678 f2 = store.decodefilename(f1)
680 f2 = store.decodefilename(f1)
679 except KeyError:
681 except KeyError:
680 if undecodable is None:
682 if undecodable is None:
681 raise error.StorageError(b'undecodable revlog name %s' % f1)
683 raise error.StorageError(b'undecodable revlog name %s' % f1)
682 else:
684 else:
683 undecodable.append(f1)
685 undecodable.append(f1)
684 continue
686 continue
685
687
686 yield f2, size
688 yield f2, size
687
689
688
690
689 def reposetup(ui, repo):
691 def reposetup(ui, repo):
690 if not repo.local():
692 if not repo.local():
691 return
693 return
692
694
693 if isinstance(repo, bundlerepo.bundlerepository):
695 if isinstance(repo, bundlerepo.bundlerepository):
694 raise error.Abort(_('cannot use simple store with bundlerepo'))
696 raise error.Abort(_('cannot use simple store with bundlerepo'))
695
697
696 class simplestorerepo(repo.__class__):
698 class simplestorerepo(repo.__class__):
697 def file(self, f):
699 def file(self, f):
698 return filestorage(repo, self.svfs, f)
700 return filestorage(repo, self.svfs, f)
699
701
700 repo.__class__ = simplestorerepo
702 repo.__class__ = simplestorerepo
701
703
702
704
703 def featuresetup(ui, supported):
705 def featuresetup(ui, supported):
704 supported.add(REQUIREMENT)
706 supported.add(REQUIREMENT)
705
707
706
708
707 def newreporequirements(orig, ui, createopts):
709 def newreporequirements(orig, ui, createopts):
708 """Modifies default requirements for new repos to use the simple store."""
710 """Modifies default requirements for new repos to use the simple store."""
709 requirements = orig(ui, createopts)
711 requirements = orig(ui, createopts)
710
712
711 # These requirements are only used to affect creation of the store
713 # These requirements are only used to affect creation of the store
712 # object. We have our own store. So we can remove them.
714 # object. We have our own store. So we can remove them.
713 # TODO do this once we feel like taking the test hit.
715 # TODO do this once we feel like taking the test hit.
714 # if 'fncache' in requirements:
716 # if 'fncache' in requirements:
715 # requirements.remove('fncache')
717 # requirements.remove('fncache')
716 # if 'dotencode' in requirements:
718 # if 'dotencode' in requirements:
717 # requirements.remove('dotencode')
719 # requirements.remove('dotencode')
718
720
719 requirements.add(REQUIREMENT)
721 requirements.add(REQUIREMENT)
720
722
721 return requirements
723 return requirements
722
724
723
725
724 def makestore(orig, requirements, path, vfstype):
726 def makestore(orig, requirements, path, vfstype):
725 if REQUIREMENT not in requirements:
727 if REQUIREMENT not in requirements:
726 return orig(requirements, path, vfstype)
728 return orig(requirements, path, vfstype)
727
729
728 return simplestore(path, vfstype)
730 return simplestore(path, vfstype)
729
731
730
732
731 def verifierinit(orig, self, *args, **kwargs):
733 def verifierinit(orig, self, *args, **kwargs):
732 orig(self, *args, **kwargs)
734 orig(self, *args, **kwargs)
733
735
734 # We don't care that files in the store don't align with what is
736 # We don't care that files in the store don't align with what is
735 # advertised. So suppress these warnings.
737 # advertised. So suppress these warnings.
736 self.warnorphanstorefiles = False
738 self.warnorphanstorefiles = False
737
739
738
740
739 def extsetup(ui):
741 def extsetup(ui):
740 localrepo.featuresetupfuncs.add(featuresetup)
742 localrepo.featuresetupfuncs.add(featuresetup)
741
743
742 extensions.wrapfunction(
744 extensions.wrapfunction(
743 localrepo, 'newreporequirements', newreporequirements
745 localrepo, 'newreporequirements', newreporequirements
744 )
746 )
745 extensions.wrapfunction(localrepo, 'makestore', makestore)
747 extensions.wrapfunction(localrepo, 'makestore', makestore)
746 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
748 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
General Comments 0
You need to be logged in to leave comments. Login now