##// END OF EJS Templates
rawdata: implement `rawdata` for `simplestore` too...
marmoute -
r42950:21771337 default
parent child Browse files
Show More
@@ -1,705 +1,708 b''
1 # simplestorerepo.py - Extension that swaps in alternate repository storage.
1 # simplestorerepo.py - Extension that swaps in alternate repository storage.
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 # To use this with the test suite:
8 # To use this with the test suite:
9 #
9 #
10 # $ HGREPOFEATURES="simplestore" ./run-tests.py \
10 # $ HGREPOFEATURES="simplestore" ./run-tests.py \
11 # --extra-config-opt extensions.simplestore=`pwd`/simplestorerepo.py
11 # --extra-config-opt extensions.simplestore=`pwd`/simplestorerepo.py
12
12
13 from __future__ import absolute_import
13 from __future__ import absolute_import
14
14
15 import stat
15 import stat
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial.node import (
18 from mercurial.node import (
19 bin,
19 bin,
20 hex,
20 hex,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 )
23 )
24 from mercurial.thirdparty import (
24 from mercurial.thirdparty import (
25 attr,
25 attr,
26 )
26 )
27 from mercurial import (
27 from mercurial import (
28 ancestor,
28 ancestor,
29 bundlerepo,
29 bundlerepo,
30 error,
30 error,
31 extensions,
31 extensions,
32 localrepo,
32 localrepo,
33 mdiff,
33 mdiff,
34 pycompat,
34 pycompat,
35 repository,
35 repository,
36 revlog,
36 revlog,
37 store,
37 store,
38 verify,
38 verify,
39 )
39 )
40 from mercurial.utils import (
40 from mercurial.utils import (
41 cborutil,
41 cborutil,
42 interfaceutil,
42 interfaceutil,
43 storageutil,
43 storageutil,
44 )
44 )
45
45
46 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
46 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
47 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
47 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
48 # be specifying the version(s) of Mercurial they are tested with, or
48 # be specifying the version(s) of Mercurial they are tested with, or
49 # leave the attribute unspecified.
49 # leave the attribute unspecified.
50 testedwith = 'ships-with-hg-core'
50 testedwith = 'ships-with-hg-core'
51
51
52 REQUIREMENT = 'testonly-simplestore'
52 REQUIREMENT = 'testonly-simplestore'
53
53
54 def validatenode(node):
54 def validatenode(node):
55 if isinstance(node, int):
55 if isinstance(node, int):
56 raise ValueError('expected node; got int')
56 raise ValueError('expected node; got int')
57
57
58 if len(node) != 20:
58 if len(node) != 20:
59 raise ValueError('expected 20 byte node')
59 raise ValueError('expected 20 byte node')
60
60
61 def validaterev(rev):
61 def validaterev(rev):
62 if not isinstance(rev, int):
62 if not isinstance(rev, int):
63 raise ValueError('expected int')
63 raise ValueError('expected int')
64
64
65 class simplestoreerror(error.StorageError):
65 class simplestoreerror(error.StorageError):
66 pass
66 pass
67
67
68 @interfaceutil.implementer(repository.irevisiondelta)
68 @interfaceutil.implementer(repository.irevisiondelta)
69 @attr.s(slots=True)
69 @attr.s(slots=True)
70 class simplestorerevisiondelta(object):
70 class simplestorerevisiondelta(object):
71 node = attr.ib()
71 node = attr.ib()
72 p1node = attr.ib()
72 p1node = attr.ib()
73 p2node = attr.ib()
73 p2node = attr.ib()
74 basenode = attr.ib()
74 basenode = attr.ib()
75 flags = attr.ib()
75 flags = attr.ib()
76 baserevisionsize = attr.ib()
76 baserevisionsize = attr.ib()
77 revision = attr.ib()
77 revision = attr.ib()
78 delta = attr.ib()
78 delta = attr.ib()
79 linknode = attr.ib(default=None)
79 linknode = attr.ib(default=None)
80
80
81 @interfaceutil.implementer(repository.iverifyproblem)
81 @interfaceutil.implementer(repository.iverifyproblem)
82 @attr.s(frozen=True)
82 @attr.s(frozen=True)
83 class simplefilestoreproblem(object):
83 class simplefilestoreproblem(object):
84 warning = attr.ib(default=None)
84 warning = attr.ib(default=None)
85 error = attr.ib(default=None)
85 error = attr.ib(default=None)
86 node = attr.ib(default=None)
86 node = attr.ib(default=None)
87
87
88 @interfaceutil.implementer(repository.ifilestorage)
88 @interfaceutil.implementer(repository.ifilestorage)
89 class filestorage(object):
89 class filestorage(object):
90 """Implements storage for a tracked path.
90 """Implements storage for a tracked path.
91
91
92 Data is stored in the VFS in a directory corresponding to the tracked
92 Data is stored in the VFS in a directory corresponding to the tracked
93 path.
93 path.
94
94
95 Index data is stored in an ``index`` file using CBOR.
95 Index data is stored in an ``index`` file using CBOR.
96
96
97 Fulltext data is stored in files having names of the node.
97 Fulltext data is stored in files having names of the node.
98 """
98 """
99
99
100 def __init__(self, svfs, path):
100 def __init__(self, svfs, path):
101 self._svfs = svfs
101 self._svfs = svfs
102 self._path = path
102 self._path = path
103
103
104 self._storepath = b'/'.join([b'data', path])
104 self._storepath = b'/'.join([b'data', path])
105 self._indexpath = b'/'.join([self._storepath, b'index'])
105 self._indexpath = b'/'.join([self._storepath, b'index'])
106
106
107 indexdata = self._svfs.tryread(self._indexpath)
107 indexdata = self._svfs.tryread(self._indexpath)
108 if indexdata:
108 if indexdata:
109 indexdata = cborutil.decodeall(indexdata)
109 indexdata = cborutil.decodeall(indexdata)
110
110
111 self._indexdata = indexdata or []
111 self._indexdata = indexdata or []
112 self._indexbynode = {}
112 self._indexbynode = {}
113 self._indexbyrev = {}
113 self._indexbyrev = {}
114 self._index = []
114 self._index = []
115 self._refreshindex()
115 self._refreshindex()
116
116
117 def _refreshindex(self):
117 def _refreshindex(self):
118 self._indexbynode.clear()
118 self._indexbynode.clear()
119 self._indexbyrev.clear()
119 self._indexbyrev.clear()
120 self._index = []
120 self._index = []
121
121
122 for i, entry in enumerate(self._indexdata):
122 for i, entry in enumerate(self._indexdata):
123 self._indexbynode[entry[b'node']] = entry
123 self._indexbynode[entry[b'node']] = entry
124 self._indexbyrev[i] = entry
124 self._indexbyrev[i] = entry
125
125
126 self._indexbynode[nullid] = {
126 self._indexbynode[nullid] = {
127 b'node': nullid,
127 b'node': nullid,
128 b'p1': nullid,
128 b'p1': nullid,
129 b'p2': nullid,
129 b'p2': nullid,
130 b'linkrev': nullrev,
130 b'linkrev': nullrev,
131 b'flags': 0,
131 b'flags': 0,
132 }
132 }
133
133
134 self._indexbyrev[nullrev] = {
134 self._indexbyrev[nullrev] = {
135 b'node': nullid,
135 b'node': nullid,
136 b'p1': nullid,
136 b'p1': nullid,
137 b'p2': nullid,
137 b'p2': nullid,
138 b'linkrev': nullrev,
138 b'linkrev': nullrev,
139 b'flags': 0,
139 b'flags': 0,
140 }
140 }
141
141
142 for i, entry in enumerate(self._indexdata):
142 for i, entry in enumerate(self._indexdata):
143 p1rev, p2rev = self.parentrevs(self.rev(entry[b'node']))
143 p1rev, p2rev = self.parentrevs(self.rev(entry[b'node']))
144
144
145 # start, length, rawsize, chainbase, linkrev, p1, p2, node
145 # start, length, rawsize, chainbase, linkrev, p1, p2, node
146 self._index.append((0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev,
146 self._index.append((0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev,
147 entry[b'node']))
147 entry[b'node']))
148
148
149 self._index.append((0, 0, 0, -1, -1, -1, -1, nullid))
149 self._index.append((0, 0, 0, -1, -1, -1, -1, nullid))
150
150
151 def __len__(self):
151 def __len__(self):
152 return len(self._indexdata)
152 return len(self._indexdata)
153
153
154 def __iter__(self):
154 def __iter__(self):
155 return iter(range(len(self)))
155 return iter(range(len(self)))
156
156
157 def revs(self, start=0, stop=None):
157 def revs(self, start=0, stop=None):
158 step = 1
158 step = 1
159 if stop is not None:
159 if stop is not None:
160 if start > stop:
160 if start > stop:
161 step = -1
161 step = -1
162
162
163 stop += step
163 stop += step
164 else:
164 else:
165 stop = len(self)
165 stop = len(self)
166
166
167 return range(start, stop, step)
167 return range(start, stop, step)
168
168
169 def parents(self, node):
169 def parents(self, node):
170 validatenode(node)
170 validatenode(node)
171
171
172 if node not in self._indexbynode:
172 if node not in self._indexbynode:
173 raise KeyError('unknown node')
173 raise KeyError('unknown node')
174
174
175 entry = self._indexbynode[node]
175 entry = self._indexbynode[node]
176
176
177 return entry[b'p1'], entry[b'p2']
177 return entry[b'p1'], entry[b'p2']
178
178
179 def parentrevs(self, rev):
179 def parentrevs(self, rev):
180 p1, p2 = self.parents(self._indexbyrev[rev][b'node'])
180 p1, p2 = self.parents(self._indexbyrev[rev][b'node'])
181 return self.rev(p1), self.rev(p2)
181 return self.rev(p1), self.rev(p2)
182
182
183 def rev(self, node):
183 def rev(self, node):
184 validatenode(node)
184 validatenode(node)
185
185
186 try:
186 try:
187 self._indexbynode[node]
187 self._indexbynode[node]
188 except KeyError:
188 except KeyError:
189 raise error.LookupError(node, self._indexpath, _('no node'))
189 raise error.LookupError(node, self._indexpath, _('no node'))
190
190
191 for rev, entry in self._indexbyrev.items():
191 for rev, entry in self._indexbyrev.items():
192 if entry[b'node'] == node:
192 if entry[b'node'] == node:
193 return rev
193 return rev
194
194
195 raise error.ProgrammingError('this should not occur')
195 raise error.ProgrammingError('this should not occur')
196
196
197 def node(self, rev):
197 def node(self, rev):
198 validaterev(rev)
198 validaterev(rev)
199
199
200 return self._indexbyrev[rev][b'node']
200 return self._indexbyrev[rev][b'node']
201
201
202 def hasnode(self, node):
202 def hasnode(self, node):
203 validatenode(node)
203 validatenode(node)
204 return node in self._indexbynode
204 return node in self._indexbynode
205
205
206 def censorrevision(self, tr, censornode, tombstone=b''):
206 def censorrevision(self, tr, censornode, tombstone=b''):
207 raise NotImplementedError('TODO')
207 raise NotImplementedError('TODO')
208
208
209 def lookup(self, node):
209 def lookup(self, node):
210 if isinstance(node, int):
210 if isinstance(node, int):
211 return self.node(node)
211 return self.node(node)
212
212
213 if len(node) == 20:
213 if len(node) == 20:
214 self.rev(node)
214 self.rev(node)
215 return node
215 return node
216
216
217 try:
217 try:
218 rev = int(node)
218 rev = int(node)
219 if '%d' % rev != node:
219 if '%d' % rev != node:
220 raise ValueError
220 raise ValueError
221
221
222 if rev < 0:
222 if rev < 0:
223 rev = len(self) + rev
223 rev = len(self) + rev
224 if rev < 0 or rev >= len(self):
224 if rev < 0 or rev >= len(self):
225 raise ValueError
225 raise ValueError
226
226
227 return self.node(rev)
227 return self.node(rev)
228 except (ValueError, OverflowError):
228 except (ValueError, OverflowError):
229 pass
229 pass
230
230
231 if len(node) == 40:
231 if len(node) == 40:
232 try:
232 try:
233 rawnode = bin(node)
233 rawnode = bin(node)
234 self.rev(rawnode)
234 self.rev(rawnode)
235 return rawnode
235 return rawnode
236 except TypeError:
236 except TypeError:
237 pass
237 pass
238
238
239 raise error.LookupError(node, self._path, _('invalid lookup input'))
239 raise error.LookupError(node, self._path, _('invalid lookup input'))
240
240
241 def linkrev(self, rev):
241 def linkrev(self, rev):
242 validaterev(rev)
242 validaterev(rev)
243
243
244 return self._indexbyrev[rev][b'linkrev']
244 return self._indexbyrev[rev][b'linkrev']
245
245
246 def _flags(self, rev):
246 def _flags(self, rev):
247 validaterev(rev)
247 validaterev(rev)
248
248
249 return self._indexbyrev[rev][b'flags']
249 return self._indexbyrev[rev][b'flags']
250
250
251 def _candelta(self, baserev, rev):
251 def _candelta(self, baserev, rev):
252 validaterev(baserev)
252 validaterev(baserev)
253 validaterev(rev)
253 validaterev(rev)
254
254
255 if ((self._flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)
255 if ((self._flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)
256 or (self._flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)):
256 or (self._flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)):
257 return False
257 return False
258
258
259 return True
259 return True
260
260
261 def _processflags(self, text, flags, operation, raw=False):
261 def _processflags(self, text, flags, operation, raw=False):
262 if flags == 0:
262 if flags == 0:
263 return text, True
263 return text, True
264
264
265 if flags & ~revlog.REVIDX_KNOWN_FLAGS:
265 if flags & ~revlog.REVIDX_KNOWN_FLAGS:
266 raise simplestoreerror(_("incompatible revision flag '%#x'") %
266 raise simplestoreerror(_("incompatible revision flag '%#x'") %
267 (flags & ~revlog.REVIDX_KNOWN_FLAGS))
267 (flags & ~revlog.REVIDX_KNOWN_FLAGS))
268
268
269 validatehash = True
269 validatehash = True
270 # Depending on the operation (read or write), the order might be
270 # Depending on the operation (read or write), the order might be
271 # reversed due to non-commutative transforms.
271 # reversed due to non-commutative transforms.
272 orderedflags = revlog.REVIDX_FLAGS_ORDER
272 orderedflags = revlog.REVIDX_FLAGS_ORDER
273 if operation == 'write':
273 if operation == 'write':
274 orderedflags = reversed(orderedflags)
274 orderedflags = reversed(orderedflags)
275
275
276 for flag in orderedflags:
276 for flag in orderedflags:
277 # If a flagprocessor has been registered for a known flag, apply the
277 # If a flagprocessor has been registered for a known flag, apply the
278 # related operation transform and update result tuple.
278 # related operation transform and update result tuple.
279 if flag & flags:
279 if flag & flags:
280 vhash = True
280 vhash = True
281
281
282 if flag not in revlog._flagprocessors:
282 if flag not in revlog._flagprocessors:
283 message = _("missing processor for flag '%#x'") % (flag)
283 message = _("missing processor for flag '%#x'") % (flag)
284 raise simplestoreerror(message)
284 raise simplestoreerror(message)
285
285
286 processor = revlog._flagprocessors[flag]
286 processor = revlog._flagprocessors[flag]
287 if processor is not None:
287 if processor is not None:
288 readtransform, writetransform, rawtransform = processor
288 readtransform, writetransform, rawtransform = processor
289
289
290 if raw:
290 if raw:
291 vhash = rawtransform(self, text)
291 vhash = rawtransform(self, text)
292 elif operation == 'read':
292 elif operation == 'read':
293 text, vhash = readtransform(self, text)
293 text, vhash = readtransform(self, text)
294 else: # write operation
294 else: # write operation
295 text, vhash = writetransform(self, text)
295 text, vhash = writetransform(self, text)
296 validatehash = validatehash and vhash
296 validatehash = validatehash and vhash
297
297
298 return text, validatehash
298 return text, validatehash
299
299
300 def checkhash(self, text, node, p1=None, p2=None, rev=None):
300 def checkhash(self, text, node, p1=None, p2=None, rev=None):
301 if p1 is None and p2 is None:
301 if p1 is None and p2 is None:
302 p1, p2 = self.parents(node)
302 p1, p2 = self.parents(node)
303 if node != storageutil.hashrevisionsha1(text, p1, p2):
303 if node != storageutil.hashrevisionsha1(text, p1, p2):
304 raise simplestoreerror(_("integrity check failed on %s") %
304 raise simplestoreerror(_("integrity check failed on %s") %
305 self._path)
305 self._path)
306
306
307 def revision(self, nodeorrev, raw=False):
307 def revision(self, nodeorrev, raw=False):
308 if isinstance(nodeorrev, int):
308 if isinstance(nodeorrev, int):
309 node = self.node(nodeorrev)
309 node = self.node(nodeorrev)
310 else:
310 else:
311 node = nodeorrev
311 node = nodeorrev
312 validatenode(node)
312 validatenode(node)
313
313
314 if node == nullid:
314 if node == nullid:
315 return b''
315 return b''
316
316
317 rev = self.rev(node)
317 rev = self.rev(node)
318 flags = self._flags(rev)
318 flags = self._flags(rev)
319
319
320 path = b'/'.join([self._storepath, hex(node)])
320 path = b'/'.join([self._storepath, hex(node)])
321 rawtext = self._svfs.read(path)
321 rawtext = self._svfs.read(path)
322
322
323 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
323 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
324 if validatehash:
324 if validatehash:
325 self.checkhash(text, node, rev=rev)
325 self.checkhash(text, node, rev=rev)
326
326
327 return text
327 return text
328
328
329 def rawdata(self, nodeorrev):
330 return self.revision(raw=True)
331
329 def read(self, node):
332 def read(self, node):
330 validatenode(node)
333 validatenode(node)
331
334
332 revision = self.revision(node)
335 revision = self.revision(node)
333
336
334 if not revision.startswith(b'\1\n'):
337 if not revision.startswith(b'\1\n'):
335 return revision
338 return revision
336
339
337 start = revision.index(b'\1\n', 2)
340 start = revision.index(b'\1\n', 2)
338 return revision[start + 2:]
341 return revision[start + 2:]
339
342
340 def renamed(self, node):
343 def renamed(self, node):
341 validatenode(node)
344 validatenode(node)
342
345
343 if self.parents(node)[0] != nullid:
346 if self.parents(node)[0] != nullid:
344 return False
347 return False
345
348
346 fulltext = self.revision(node)
349 fulltext = self.revision(node)
347 m = storageutil.parsemeta(fulltext)[0]
350 m = storageutil.parsemeta(fulltext)[0]
348
351
349 if m and 'copy' in m:
352 if m and 'copy' in m:
350 return m['copy'], bin(m['copyrev'])
353 return m['copy'], bin(m['copyrev'])
351
354
352 return False
355 return False
353
356
354 def cmp(self, node, text):
357 def cmp(self, node, text):
355 validatenode(node)
358 validatenode(node)
356
359
357 t = text
360 t = text
358
361
359 if text.startswith(b'\1\n'):
362 if text.startswith(b'\1\n'):
360 t = b'\1\n\1\n' + text
363 t = b'\1\n\1\n' + text
361
364
362 p1, p2 = self.parents(node)
365 p1, p2 = self.parents(node)
363
366
364 if storageutil.hashrevisionsha1(t, p1, p2) == node:
367 if storageutil.hashrevisionsha1(t, p1, p2) == node:
365 return False
368 return False
366
369
367 if self.iscensored(self.rev(node)):
370 if self.iscensored(self.rev(node)):
368 return text != b''
371 return text != b''
369
372
370 if self.renamed(node):
373 if self.renamed(node):
371 t2 = self.read(node)
374 t2 = self.read(node)
372 return t2 != text
375 return t2 != text
373
376
374 return True
377 return True
375
378
376 def size(self, rev):
379 def size(self, rev):
377 validaterev(rev)
380 validaterev(rev)
378
381
379 node = self._indexbyrev[rev][b'node']
382 node = self._indexbyrev[rev][b'node']
380
383
381 if self.renamed(node):
384 if self.renamed(node):
382 return len(self.read(node))
385 return len(self.read(node))
383
386
384 if self.iscensored(rev):
387 if self.iscensored(rev):
385 return 0
388 return 0
386
389
387 return len(self.revision(node))
390 return len(self.revision(node))
388
391
389 def iscensored(self, rev):
392 def iscensored(self, rev):
390 validaterev(rev)
393 validaterev(rev)
391
394
392 return self._flags(rev) & repository.REVISION_FLAG_CENSORED
395 return self._flags(rev) & repository.REVISION_FLAG_CENSORED
393
396
394 def commonancestorsheads(self, a, b):
397 def commonancestorsheads(self, a, b):
395 validatenode(a)
398 validatenode(a)
396 validatenode(b)
399 validatenode(b)
397
400
398 a = self.rev(a)
401 a = self.rev(a)
399 b = self.rev(b)
402 b = self.rev(b)
400
403
401 ancestors = ancestor.commonancestorsheads(self.parentrevs, a, b)
404 ancestors = ancestor.commonancestorsheads(self.parentrevs, a, b)
402 return pycompat.maplist(self.node, ancestors)
405 return pycompat.maplist(self.node, ancestors)
403
406
404 def descendants(self, revs):
407 def descendants(self, revs):
405 # This is a copy of revlog.descendants()
408 # This is a copy of revlog.descendants()
406 first = min(revs)
409 first = min(revs)
407 if first == nullrev:
410 if first == nullrev:
408 for i in self:
411 for i in self:
409 yield i
412 yield i
410 return
413 return
411
414
412 seen = set(revs)
415 seen = set(revs)
413 for i in self.revs(start=first + 1):
416 for i in self.revs(start=first + 1):
414 for x in self.parentrevs(i):
417 for x in self.parentrevs(i):
415 if x != nullrev and x in seen:
418 if x != nullrev and x in seen:
416 seen.add(i)
419 seen.add(i)
417 yield i
420 yield i
418 break
421 break
419
422
420 # Required by verify.
423 # Required by verify.
421 def files(self):
424 def files(self):
422 entries = self._svfs.listdir(self._storepath)
425 entries = self._svfs.listdir(self._storepath)
423
426
424 # Strip out undo.backup.* files created as part of transaction
427 # Strip out undo.backup.* files created as part of transaction
425 # recording.
428 # recording.
426 entries = [f for f in entries if not f.startswith('undo.backup.')]
429 entries = [f for f in entries if not f.startswith('undo.backup.')]
427
430
428 return [b'/'.join((self._storepath, f)) for f in entries]
431 return [b'/'.join((self._storepath, f)) for f in entries]
429
432
430 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
433 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
431 revisionscount=False, trackedsize=False,
434 revisionscount=False, trackedsize=False,
432 storedsize=False):
435 storedsize=False):
433 # TODO do a real implementation of this
436 # TODO do a real implementation of this
434 return {
437 return {
435 'exclusivefiles': [],
438 'exclusivefiles': [],
436 'sharedfiles': [],
439 'sharedfiles': [],
437 'revisionscount': len(self),
440 'revisionscount': len(self),
438 'trackedsize': 0,
441 'trackedsize': 0,
439 'storedsize': None,
442 'storedsize': None,
440 }
443 }
441
444
442 def verifyintegrity(self, state):
445 def verifyintegrity(self, state):
443 state['skipread'] = set()
446 state['skipread'] = set()
444 for rev in self:
447 for rev in self:
445 node = self.node(rev)
448 node = self.node(rev)
446 try:
449 try:
447 self.revision(node)
450 self.revision(node)
448 except Exception as e:
451 except Exception as e:
449 yield simplefilestoreproblem(
452 yield simplefilestoreproblem(
450 error='unpacking %s: %s' % (node, e),
453 error='unpacking %s: %s' % (node, e),
451 node=node)
454 node=node)
452 state['skipread'].add(node)
455 state['skipread'].add(node)
453
456
454 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
457 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
455 assumehaveparentrevisions=False,
458 assumehaveparentrevisions=False,
456 deltamode=repository.CG_DELTAMODE_STD):
459 deltamode=repository.CG_DELTAMODE_STD):
457 # TODO this will probably break on some ordering options.
460 # TODO this will probably break on some ordering options.
458 nodes = [n for n in nodes if n != nullid]
461 nodes = [n for n in nodes if n != nullid]
459 if not nodes:
462 if not nodes:
460 return
463 return
461 for delta in storageutil.emitrevisions(
464 for delta in storageutil.emitrevisions(
462 self, nodes, nodesorder, simplestorerevisiondelta,
465 self, nodes, nodesorder, simplestorerevisiondelta,
463 revisiondata=revisiondata,
466 revisiondata=revisiondata,
464 assumehaveparentrevisions=assumehaveparentrevisions,
467 assumehaveparentrevisions=assumehaveparentrevisions,
465 deltamode=deltamode):
468 deltamode=deltamode):
466 yield delta
469 yield delta
467
470
468 def add(self, text, meta, transaction, linkrev, p1, p2):
471 def add(self, text, meta, transaction, linkrev, p1, p2):
469 if meta or text.startswith(b'\1\n'):
472 if meta or text.startswith(b'\1\n'):
470 text = storageutil.packmeta(meta, text)
473 text = storageutil.packmeta(meta, text)
471
474
472 return self.addrevision(text, transaction, linkrev, p1, p2)
475 return self.addrevision(text, transaction, linkrev, p1, p2)
473
476
474 def addrevision(self, text, transaction, linkrev, p1, p2, node=None,
477 def addrevision(self, text, transaction, linkrev, p1, p2, node=None,
475 flags=revlog.REVIDX_DEFAULT_FLAGS, cachedelta=None):
478 flags=revlog.REVIDX_DEFAULT_FLAGS, cachedelta=None):
476 validatenode(p1)
479 validatenode(p1)
477 validatenode(p2)
480 validatenode(p2)
478
481
479 if flags:
482 if flags:
480 node = node or storageutil.hashrevisionsha1(text, p1, p2)
483 node = node or storageutil.hashrevisionsha1(text, p1, p2)
481
484
482 rawtext, validatehash = self._processflags(text, flags, 'write')
485 rawtext, validatehash = self._processflags(text, flags, 'write')
483
486
484 node = node or storageutil.hashrevisionsha1(text, p1, p2)
487 node = node or storageutil.hashrevisionsha1(text, p1, p2)
485
488
486 if node in self._indexbynode:
489 if node in self._indexbynode:
487 return node
490 return node
488
491
489 if validatehash:
492 if validatehash:
490 self.checkhash(rawtext, node, p1=p1, p2=p2)
493 self.checkhash(rawtext, node, p1=p1, p2=p2)
491
494
492 return self._addrawrevision(node, rawtext, transaction, linkrev, p1, p2,
495 return self._addrawrevision(node, rawtext, transaction, linkrev, p1, p2,
493 flags)
496 flags)
494
497
495 def _addrawrevision(self, node, rawtext, transaction, link, p1, p2, flags):
498 def _addrawrevision(self, node, rawtext, transaction, link, p1, p2, flags):
496 transaction.addbackup(self._indexpath)
499 transaction.addbackup(self._indexpath)
497
500
498 path = b'/'.join([self._storepath, hex(node)])
501 path = b'/'.join([self._storepath, hex(node)])
499
502
500 self._svfs.write(path, rawtext)
503 self._svfs.write(path, rawtext)
501
504
502 self._indexdata.append({
505 self._indexdata.append({
503 b'node': node,
506 b'node': node,
504 b'p1': p1,
507 b'p1': p1,
505 b'p2': p2,
508 b'p2': p2,
506 b'linkrev': link,
509 b'linkrev': link,
507 b'flags': flags,
510 b'flags': flags,
508 })
511 })
509
512
510 self._reflectindexupdate()
513 self._reflectindexupdate()
511
514
512 return node
515 return node
513
516
514 def _reflectindexupdate(self):
517 def _reflectindexupdate(self):
515 self._refreshindex()
518 self._refreshindex()
516 self._svfs.write(self._indexpath,
519 self._svfs.write(self._indexpath,
517 ''.join(cborutil.streamencode(self._indexdata)))
520 ''.join(cborutil.streamencode(self._indexdata)))
518
521
519 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None,
522 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None,
520 maybemissingparents=False):
523 maybemissingparents=False):
521 if maybemissingparents:
524 if maybemissingparents:
522 raise error.Abort(_('simple store does not support missing parents '
525 raise error.Abort(_('simple store does not support missing parents '
523 'write mode'))
526 'write mode'))
524
527
525 nodes = []
528 nodes = []
526
529
527 transaction.addbackup(self._indexpath)
530 transaction.addbackup(self._indexpath)
528
531
529 for node, p1, p2, linknode, deltabase, delta, flags in deltas:
532 for node, p1, p2, linknode, deltabase, delta, flags in deltas:
530 linkrev = linkmapper(linknode)
533 linkrev = linkmapper(linknode)
531 flags = flags or revlog.REVIDX_DEFAULT_FLAGS
534 flags = flags or revlog.REVIDX_DEFAULT_FLAGS
532
535
533 nodes.append(node)
536 nodes.append(node)
534
537
535 if node in self._indexbynode:
538 if node in self._indexbynode:
536 continue
539 continue
537
540
538 # Need to resolve the fulltext from the delta base.
541 # Need to resolve the fulltext from the delta base.
539 if deltabase == nullid:
542 if deltabase == nullid:
540 text = mdiff.patch(b'', delta)
543 text = mdiff.patch(b'', delta)
541 else:
544 else:
542 text = mdiff.patch(self.revision(deltabase), delta)
545 text = mdiff.patch(self.revision(deltabase), delta)
543
546
544 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
547 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
545 flags)
548 flags)
546
549
547 if addrevisioncb:
550 if addrevisioncb:
548 addrevisioncb(self, node)
551 addrevisioncb(self, node)
549 return nodes
552 return nodes
550
553
551 def _headrevs(self):
554 def _headrevs(self):
552 # Assume all revisions are heads by default.
555 # Assume all revisions are heads by default.
553 revishead = {rev: True for rev in self._indexbyrev}
556 revishead = {rev: True for rev in self._indexbyrev}
554
557
555 for rev, entry in self._indexbyrev.items():
558 for rev, entry in self._indexbyrev.items():
556 # Unset head flag for all seen parents.
559 # Unset head flag for all seen parents.
557 revishead[self.rev(entry[b'p1'])] = False
560 revishead[self.rev(entry[b'p1'])] = False
558 revishead[self.rev(entry[b'p2'])] = False
561 revishead[self.rev(entry[b'p2'])] = False
559
562
560 return [rev for rev, ishead in sorted(revishead.items())
563 return [rev for rev, ishead in sorted(revishead.items())
561 if ishead]
564 if ishead]
562
565
563 def heads(self, start=None, stop=None):
566 def heads(self, start=None, stop=None):
564 # This is copied from revlog.py.
567 # This is copied from revlog.py.
565 if start is None and stop is None:
568 if start is None and stop is None:
566 if not len(self):
569 if not len(self):
567 return [nullid]
570 return [nullid]
568 return [self.node(r) for r in self._headrevs()]
571 return [self.node(r) for r in self._headrevs()]
569
572
570 if start is None:
573 if start is None:
571 start = nullid
574 start = nullid
572 if stop is None:
575 if stop is None:
573 stop = []
576 stop = []
574 stoprevs = set([self.rev(n) for n in stop])
577 stoprevs = set([self.rev(n) for n in stop])
575 startrev = self.rev(start)
578 startrev = self.rev(start)
576 reachable = {startrev}
579 reachable = {startrev}
577 heads = {startrev}
580 heads = {startrev}
578
581
579 parentrevs = self.parentrevs
582 parentrevs = self.parentrevs
580 for r in self.revs(start=startrev + 1):
583 for r in self.revs(start=startrev + 1):
581 for p in parentrevs(r):
584 for p in parentrevs(r):
582 if p in reachable:
585 if p in reachable:
583 if r not in stoprevs:
586 if r not in stoprevs:
584 reachable.add(r)
587 reachable.add(r)
585 heads.add(r)
588 heads.add(r)
586 if p in heads and p not in stoprevs:
589 if p in heads and p not in stoprevs:
587 heads.remove(p)
590 heads.remove(p)
588
591
589 return [self.node(r) for r in heads]
592 return [self.node(r) for r in heads]
590
593
591 def children(self, node):
594 def children(self, node):
592 validatenode(node)
595 validatenode(node)
593
596
594 # This is a copy of revlog.children().
597 # This is a copy of revlog.children().
595 c = []
598 c = []
596 p = self.rev(node)
599 p = self.rev(node)
597 for r in self.revs(start=p + 1):
600 for r in self.revs(start=p + 1):
598 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
601 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
599 if prevs:
602 if prevs:
600 for pr in prevs:
603 for pr in prevs:
601 if pr == p:
604 if pr == p:
602 c.append(self.node(r))
605 c.append(self.node(r))
603 elif p == nullrev:
606 elif p == nullrev:
604 c.append(self.node(r))
607 c.append(self.node(r))
605 return c
608 return c
606
609
607 def getstrippoint(self, minlink):
610 def getstrippoint(self, minlink):
608 return storageutil.resolvestripinfo(
611 return storageutil.resolvestripinfo(
609 minlink, len(self) - 1, self._headrevs(), self.linkrev,
612 minlink, len(self) - 1, self._headrevs(), self.linkrev,
610 self.parentrevs)
613 self.parentrevs)
611
614
612 def strip(self, minlink, transaction):
615 def strip(self, minlink, transaction):
613 if not len(self):
616 if not len(self):
614 return
617 return
615
618
616 rev, _ignored = self.getstrippoint(minlink)
619 rev, _ignored = self.getstrippoint(minlink)
617 if rev == len(self):
620 if rev == len(self):
618 return
621 return
619
622
620 # Purge index data starting at the requested revision.
623 # Purge index data starting at the requested revision.
621 self._indexdata[rev:] = []
624 self._indexdata[rev:] = []
622 self._reflectindexupdate()
625 self._reflectindexupdate()
623
626
624 def issimplestorefile(f, kind, st):
627 def issimplestorefile(f, kind, st):
625 if kind != stat.S_IFREG:
628 if kind != stat.S_IFREG:
626 return False
629 return False
627
630
628 if store.isrevlog(f, kind, st):
631 if store.isrevlog(f, kind, st):
629 return False
632 return False
630
633
631 # Ignore transaction undo files.
634 # Ignore transaction undo files.
632 if f.startswith('undo.'):
635 if f.startswith('undo.'):
633 return False
636 return False
634
637
635 # Otherwise assume it belongs to the simple store.
638 # Otherwise assume it belongs to the simple store.
636 return True
639 return True
637
640
638 class simplestore(store.encodedstore):
641 class simplestore(store.encodedstore):
639 def datafiles(self):
642 def datafiles(self):
640 for x in super(simplestore, self).datafiles():
643 for x in super(simplestore, self).datafiles():
641 yield x
644 yield x
642
645
643 # Supplement with non-revlog files.
646 # Supplement with non-revlog files.
644 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
647 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
645
648
646 for unencoded, encoded, size in extrafiles:
649 for unencoded, encoded, size in extrafiles:
647 try:
650 try:
648 unencoded = store.decodefilename(unencoded)
651 unencoded = store.decodefilename(unencoded)
649 except KeyError:
652 except KeyError:
650 unencoded = None
653 unencoded = None
651
654
652 yield unencoded, encoded, size
655 yield unencoded, encoded, size
653
656
654 def reposetup(ui, repo):
657 def reposetup(ui, repo):
655 if not repo.local():
658 if not repo.local():
656 return
659 return
657
660
658 if isinstance(repo, bundlerepo.bundlerepository):
661 if isinstance(repo, bundlerepo.bundlerepository):
659 raise error.Abort(_('cannot use simple store with bundlerepo'))
662 raise error.Abort(_('cannot use simple store with bundlerepo'))
660
663
661 class simplestorerepo(repo.__class__):
664 class simplestorerepo(repo.__class__):
662 def file(self, f):
665 def file(self, f):
663 return filestorage(self.svfs, f)
666 return filestorage(self.svfs, f)
664
667
665 repo.__class__ = simplestorerepo
668 repo.__class__ = simplestorerepo
666
669
667 def featuresetup(ui, supported):
670 def featuresetup(ui, supported):
668 supported.add(REQUIREMENT)
671 supported.add(REQUIREMENT)
669
672
670 def newreporequirements(orig, ui, createopts):
673 def newreporequirements(orig, ui, createopts):
671 """Modifies default requirements for new repos to use the simple store."""
674 """Modifies default requirements for new repos to use the simple store."""
672 requirements = orig(ui, createopts)
675 requirements = orig(ui, createopts)
673
676
674 # These requirements are only used to affect creation of the store
677 # These requirements are only used to affect creation of the store
675 # object. We have our own store. So we can remove them.
678 # object. We have our own store. So we can remove them.
676 # TODO do this once we feel like taking the test hit.
679 # TODO do this once we feel like taking the test hit.
677 #if 'fncache' in requirements:
680 #if 'fncache' in requirements:
678 # requirements.remove('fncache')
681 # requirements.remove('fncache')
679 #if 'dotencode' in requirements:
682 #if 'dotencode' in requirements:
680 # requirements.remove('dotencode')
683 # requirements.remove('dotencode')
681
684
682 requirements.add(REQUIREMENT)
685 requirements.add(REQUIREMENT)
683
686
684 return requirements
687 return requirements
685
688
686 def makestore(orig, requirements, path, vfstype):
689 def makestore(orig, requirements, path, vfstype):
687 if REQUIREMENT not in requirements:
690 if REQUIREMENT not in requirements:
688 return orig(requirements, path, vfstype)
691 return orig(requirements, path, vfstype)
689
692
690 return simplestore(path, vfstype)
693 return simplestore(path, vfstype)
691
694
692 def verifierinit(orig, self, *args, **kwargs):
695 def verifierinit(orig, self, *args, **kwargs):
693 orig(self, *args, **kwargs)
696 orig(self, *args, **kwargs)
694
697
695 # We don't care that files in the store don't align with what is
698 # We don't care that files in the store don't align with what is
696 # advertised. So suppress these warnings.
699 # advertised. So suppress these warnings.
697 self.warnorphanstorefiles = False
700 self.warnorphanstorefiles = False
698
701
699 def extsetup(ui):
702 def extsetup(ui):
700 localrepo.featuresetupfuncs.add(featuresetup)
703 localrepo.featuresetupfuncs.add(featuresetup)
701
704
702 extensions.wrapfunction(localrepo, 'newreporequirements',
705 extensions.wrapfunction(localrepo, 'newreporequirements',
703 newreporequirements)
706 newreporequirements)
704 extensions.wrapfunction(localrepo, 'makestore', makestore)
707 extensions.wrapfunction(localrepo, 'makestore', makestore)
705 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
708 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
General Comments 0
You need to be logged in to leave comments. Login now