##// END OF EJS Templates
tests: byteify a few more things in simplestorerepo.py...
Matt Harbison -
r44132:8ca92bcb default
parent child Browse files
Show More
@@ -1,735 +1,735 b''
1 # simplestorerepo.py - Extension that swaps in alternate repository storage.
1 # simplestorerepo.py - Extension that swaps in alternate repository storage.
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 # To use this with the test suite:
8 # To use this with the test suite:
9 #
9 #
10 # $ HGREPOFEATURES="simplestore" ./run-tests.py \
10 # $ HGREPOFEATURES="simplestore" ./run-tests.py \
11 # --extra-config-opt extensions.simplestore=`pwd`/simplestorerepo.py
11 # --extra-config-opt extensions.simplestore=`pwd`/simplestorerepo.py
12
12
13 from __future__ import absolute_import
13 from __future__ import absolute_import
14
14
15 import stat
15 import stat
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial.node import (
18 from mercurial.node import (
19 bin,
19 bin,
20 hex,
20 hex,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 )
23 )
24 from mercurial.thirdparty import attr
24 from mercurial.thirdparty import attr
25 from mercurial import (
25 from mercurial import (
26 ancestor,
26 ancestor,
27 bundlerepo,
27 bundlerepo,
28 error,
28 error,
29 extensions,
29 extensions,
30 localrepo,
30 localrepo,
31 mdiff,
31 mdiff,
32 pycompat,
32 pycompat,
33 revlog,
33 revlog,
34 store,
34 store,
35 verify,
35 verify,
36 )
36 )
37 from mercurial.interfaces import (
37 from mercurial.interfaces import (
38 repository,
38 repository,
39 util as interfaceutil,
39 util as interfaceutil,
40 )
40 )
41 from mercurial.utils import (
41 from mercurial.utils import (
42 cborutil,
42 cborutil,
43 storageutil,
43 storageutil,
44 )
44 )
45 from mercurial.revlogutils import flagutil
45 from mercurial.revlogutils import flagutil
46
46
47 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
47 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
48 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
48 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
49 # be specifying the version(s) of Mercurial they are tested with, or
49 # be specifying the version(s) of Mercurial they are tested with, or
50 # leave the attribute unspecified.
50 # leave the attribute unspecified.
51 testedwith = 'ships-with-hg-core'
51 testedwith = b'ships-with-hg-core'
52
52
53 REQUIREMENT = 'testonly-simplestore'
53 REQUIREMENT = b'testonly-simplestore'
54
54
55
55
56 def validatenode(node):
56 def validatenode(node):
57 if isinstance(node, int):
57 if isinstance(node, int):
58 raise ValueError('expected node; got int')
58 raise ValueError('expected node; got int')
59
59
60 if len(node) != 20:
60 if len(node) != 20:
61 raise ValueError('expected 20 byte node')
61 raise ValueError('expected 20 byte node')
62
62
63
63
64 def validaterev(rev):
64 def validaterev(rev):
65 if not isinstance(rev, int):
65 if not isinstance(rev, int):
66 raise ValueError('expected int')
66 raise ValueError('expected int')
67
67
68
68
69 class simplestoreerror(error.StorageError):
69 class simplestoreerror(error.StorageError):
70 pass
70 pass
71
71
72
72
73 @interfaceutil.implementer(repository.irevisiondelta)
73 @interfaceutil.implementer(repository.irevisiondelta)
74 @attr.s(slots=True)
74 @attr.s(slots=True)
75 class simplestorerevisiondelta(object):
75 class simplestorerevisiondelta(object):
76 node = attr.ib()
76 node = attr.ib()
77 p1node = attr.ib()
77 p1node = attr.ib()
78 p2node = attr.ib()
78 p2node = attr.ib()
79 basenode = attr.ib()
79 basenode = attr.ib()
80 flags = attr.ib()
80 flags = attr.ib()
81 baserevisionsize = attr.ib()
81 baserevisionsize = attr.ib()
82 revision = attr.ib()
82 revision = attr.ib()
83 delta = attr.ib()
83 delta = attr.ib()
84 linknode = attr.ib(default=None)
84 linknode = attr.ib(default=None)
85
85
86
86
87 @interfaceutil.implementer(repository.iverifyproblem)
87 @interfaceutil.implementer(repository.iverifyproblem)
88 @attr.s(frozen=True)
88 @attr.s(frozen=True)
89 class simplefilestoreproblem(object):
89 class simplefilestoreproblem(object):
90 warning = attr.ib(default=None)
90 warning = attr.ib(default=None)
91 error = attr.ib(default=None)
91 error = attr.ib(default=None)
92 node = attr.ib(default=None)
92 node = attr.ib(default=None)
93
93
94
94
95 @interfaceutil.implementer(repository.ifilestorage)
95 @interfaceutil.implementer(repository.ifilestorage)
96 class filestorage(object):
96 class filestorage(object):
97 """Implements storage for a tracked path.
97 """Implements storage for a tracked path.
98
98
99 Data is stored in the VFS in a directory corresponding to the tracked
99 Data is stored in the VFS in a directory corresponding to the tracked
100 path.
100 path.
101
101
102 Index data is stored in an ``index`` file using CBOR.
102 Index data is stored in an ``index`` file using CBOR.
103
103
104 Fulltext data is stored in files having names of the node.
104 Fulltext data is stored in files having names of the node.
105 """
105 """
106
106
107 _flagserrorclass = simplestoreerror
107 _flagserrorclass = simplestoreerror
108
108
109 def __init__(self, svfs, path):
109 def __init__(self, svfs, path):
110 self._svfs = svfs
110 self._svfs = svfs
111 self._path = path
111 self._path = path
112
112
113 self._storepath = b'/'.join([b'data', path])
113 self._storepath = b'/'.join([b'data', path])
114 self._indexpath = b'/'.join([self._storepath, b'index'])
114 self._indexpath = b'/'.join([self._storepath, b'index'])
115
115
116 indexdata = self._svfs.tryread(self._indexpath)
116 indexdata = self._svfs.tryread(self._indexpath)
117 if indexdata:
117 if indexdata:
118 indexdata = cborutil.decodeall(indexdata)
118 indexdata = cborutil.decodeall(indexdata)
119
119
120 self._indexdata = indexdata or []
120 self._indexdata = indexdata or []
121 self._indexbynode = {}
121 self._indexbynode = {}
122 self._indexbyrev = {}
122 self._indexbyrev = {}
123 self._index = []
123 self._index = []
124 self._refreshindex()
124 self._refreshindex()
125
125
126 self._flagprocessors = dict(flagutil.flagprocessors)
126 self._flagprocessors = dict(flagutil.flagprocessors)
127
127
128 def _refreshindex(self):
128 def _refreshindex(self):
129 self._indexbynode.clear()
129 self._indexbynode.clear()
130 self._indexbyrev.clear()
130 self._indexbyrev.clear()
131 self._index = []
131 self._index = []
132
132
133 for i, entry in enumerate(self._indexdata):
133 for i, entry in enumerate(self._indexdata):
134 self._indexbynode[entry[b'node']] = entry
134 self._indexbynode[entry[b'node']] = entry
135 self._indexbyrev[i] = entry
135 self._indexbyrev[i] = entry
136
136
137 self._indexbynode[nullid] = {
137 self._indexbynode[nullid] = {
138 b'node': nullid,
138 b'node': nullid,
139 b'p1': nullid,
139 b'p1': nullid,
140 b'p2': nullid,
140 b'p2': nullid,
141 b'linkrev': nullrev,
141 b'linkrev': nullrev,
142 b'flags': 0,
142 b'flags': 0,
143 }
143 }
144
144
145 self._indexbyrev[nullrev] = {
145 self._indexbyrev[nullrev] = {
146 b'node': nullid,
146 b'node': nullid,
147 b'p1': nullid,
147 b'p1': nullid,
148 b'p2': nullid,
148 b'p2': nullid,
149 b'linkrev': nullrev,
149 b'linkrev': nullrev,
150 b'flags': 0,
150 b'flags': 0,
151 }
151 }
152
152
153 for i, entry in enumerate(self._indexdata):
153 for i, entry in enumerate(self._indexdata):
154 p1rev, p2rev = self.parentrevs(self.rev(entry[b'node']))
154 p1rev, p2rev = self.parentrevs(self.rev(entry[b'node']))
155
155
156 # start, length, rawsize, chainbase, linkrev, p1, p2, node
156 # start, length, rawsize, chainbase, linkrev, p1, p2, node
157 self._index.append(
157 self._index.append(
158 (0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev, entry[b'node'])
158 (0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev, entry[b'node'])
159 )
159 )
160
160
161 self._index.append((0, 0, 0, -1, -1, -1, -1, nullid))
161 self._index.append((0, 0, 0, -1, -1, -1, -1, nullid))
162
162
163 def __len__(self):
163 def __len__(self):
164 return len(self._indexdata)
164 return len(self._indexdata)
165
165
166 def __iter__(self):
166 def __iter__(self):
167 return iter(range(len(self)))
167 return iter(range(len(self)))
168
168
169 def revs(self, start=0, stop=None):
169 def revs(self, start=0, stop=None):
170 step = 1
170 step = 1
171 if stop is not None:
171 if stop is not None:
172 if start > stop:
172 if start > stop:
173 step = -1
173 step = -1
174
174
175 stop += step
175 stop += step
176 else:
176 else:
177 stop = len(self)
177 stop = len(self)
178
178
179 return range(start, stop, step)
179 return range(start, stop, step)
180
180
181 def parents(self, node):
181 def parents(self, node):
182 validatenode(node)
182 validatenode(node)
183
183
184 if node not in self._indexbynode:
184 if node not in self._indexbynode:
185 raise KeyError('unknown node')
185 raise KeyError('unknown node')
186
186
187 entry = self._indexbynode[node]
187 entry = self._indexbynode[node]
188
188
189 return entry[b'p1'], entry[b'p2']
189 return entry[b'p1'], entry[b'p2']
190
190
191 def parentrevs(self, rev):
191 def parentrevs(self, rev):
192 p1, p2 = self.parents(self._indexbyrev[rev][b'node'])
192 p1, p2 = self.parents(self._indexbyrev[rev][b'node'])
193 return self.rev(p1), self.rev(p2)
193 return self.rev(p1), self.rev(p2)
194
194
195 def rev(self, node):
195 def rev(self, node):
196 validatenode(node)
196 validatenode(node)
197
197
198 try:
198 try:
199 self._indexbynode[node]
199 self._indexbynode[node]
200 except KeyError:
200 except KeyError:
201 raise error.LookupError(node, self._indexpath, _('no node'))
201 raise error.LookupError(node, self._indexpath, _('no node'))
202
202
203 for rev, entry in self._indexbyrev.items():
203 for rev, entry in self._indexbyrev.items():
204 if entry[b'node'] == node:
204 if entry[b'node'] == node:
205 return rev
205 return rev
206
206
207 raise error.ProgrammingError('this should not occur')
207 raise error.ProgrammingError(b'this should not occur')
208
208
209 def node(self, rev):
209 def node(self, rev):
210 validaterev(rev)
210 validaterev(rev)
211
211
212 return self._indexbyrev[rev][b'node']
212 return self._indexbyrev[rev][b'node']
213
213
214 def hasnode(self, node):
214 def hasnode(self, node):
215 validatenode(node)
215 validatenode(node)
216 return node in self._indexbynode
216 return node in self._indexbynode
217
217
218 def censorrevision(self, tr, censornode, tombstone=b''):
218 def censorrevision(self, tr, censornode, tombstone=b''):
219 raise NotImplementedError('TODO')
219 raise NotImplementedError('TODO')
220
220
221 def lookup(self, node):
221 def lookup(self, node):
222 if isinstance(node, int):
222 if isinstance(node, int):
223 return self.node(node)
223 return self.node(node)
224
224
225 if len(node) == 20:
225 if len(node) == 20:
226 self.rev(node)
226 self.rev(node)
227 return node
227 return node
228
228
229 try:
229 try:
230 rev = int(node)
230 rev = int(node)
231 if '%d' % rev != node:
231 if '%d' % rev != node:
232 raise ValueError
232 raise ValueError
233
233
234 if rev < 0:
234 if rev < 0:
235 rev = len(self) + rev
235 rev = len(self) + rev
236 if rev < 0 or rev >= len(self):
236 if rev < 0 or rev >= len(self):
237 raise ValueError
237 raise ValueError
238
238
239 return self.node(rev)
239 return self.node(rev)
240 except (ValueError, OverflowError):
240 except (ValueError, OverflowError):
241 pass
241 pass
242
242
243 if len(node) == 40:
243 if len(node) == 40:
244 try:
244 try:
245 rawnode = bin(node)
245 rawnode = bin(node)
246 self.rev(rawnode)
246 self.rev(rawnode)
247 return rawnode
247 return rawnode
248 except TypeError:
248 except TypeError:
249 pass
249 pass
250
250
251 raise error.LookupError(node, self._path, _('invalid lookup input'))
251 raise error.LookupError(node, self._path, _('invalid lookup input'))
252
252
253 def linkrev(self, rev):
253 def linkrev(self, rev):
254 validaterev(rev)
254 validaterev(rev)
255
255
256 return self._indexbyrev[rev][b'linkrev']
256 return self._indexbyrev[rev][b'linkrev']
257
257
258 def _flags(self, rev):
258 def _flags(self, rev):
259 validaterev(rev)
259 validaterev(rev)
260
260
261 return self._indexbyrev[rev][b'flags']
261 return self._indexbyrev[rev][b'flags']
262
262
263 def _candelta(self, baserev, rev):
263 def _candelta(self, baserev, rev):
264 validaterev(baserev)
264 validaterev(baserev)
265 validaterev(rev)
265 validaterev(rev)
266
266
267 if (self._flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS) or (
267 if (self._flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS) or (
268 self._flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS
268 self._flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS
269 ):
269 ):
270 return False
270 return False
271
271
272 return True
272 return True
273
273
274 def checkhash(self, text, node, p1=None, p2=None, rev=None):
274 def checkhash(self, text, node, p1=None, p2=None, rev=None):
275 if p1 is None and p2 is None:
275 if p1 is None and p2 is None:
276 p1, p2 = self.parents(node)
276 p1, p2 = self.parents(node)
277 if node != storageutil.hashrevisionsha1(text, p1, p2):
277 if node != storageutil.hashrevisionsha1(text, p1, p2):
278 raise simplestoreerror(
278 raise simplestoreerror(
279 _("integrity check failed on %s") % self._path
279 _("integrity check failed on %s") % self._path
280 )
280 )
281
281
282 def revision(self, nodeorrev, raw=False):
282 def revision(self, nodeorrev, raw=False):
283 if isinstance(nodeorrev, int):
283 if isinstance(nodeorrev, int):
284 node = self.node(nodeorrev)
284 node = self.node(nodeorrev)
285 else:
285 else:
286 node = nodeorrev
286 node = nodeorrev
287 validatenode(node)
287 validatenode(node)
288
288
289 if node == nullid:
289 if node == nullid:
290 return b''
290 return b''
291
291
292 rev = self.rev(node)
292 rev = self.rev(node)
293 flags = self._flags(rev)
293 flags = self._flags(rev)
294
294
295 path = b'/'.join([self._storepath, hex(node)])
295 path = b'/'.join([self._storepath, hex(node)])
296 rawtext = self._svfs.read(path)
296 rawtext = self._svfs.read(path)
297
297
298 if raw:
298 if raw:
299 validatehash = flagutil.processflagsraw(self, rawtext, flags)
299 validatehash = flagutil.processflagsraw(self, rawtext, flags)
300 text = rawtext
300 text = rawtext
301 else:
301 else:
302 r = flagutil.processflagsread(self, rawtext, flags)
302 r = flagutil.processflagsread(self, rawtext, flags)
303 text, validatehash, sidedata = r
303 text, validatehash, sidedata = r
304 if validatehash:
304 if validatehash:
305 self.checkhash(text, node, rev=rev)
305 self.checkhash(text, node, rev=rev)
306
306
307 return text
307 return text
308
308
309 def rawdata(self, nodeorrev):
309 def rawdata(self, nodeorrev):
310 return self.revision(raw=True)
310 return self.revision(raw=True)
311
311
312 def read(self, node):
312 def read(self, node):
313 validatenode(node)
313 validatenode(node)
314
314
315 revision = self.revision(node)
315 revision = self.revision(node)
316
316
317 if not revision.startswith(b'\1\n'):
317 if not revision.startswith(b'\1\n'):
318 return revision
318 return revision
319
319
320 start = revision.index(b'\1\n', 2)
320 start = revision.index(b'\1\n', 2)
321 return revision[start + 2 :]
321 return revision[start + 2 :]
322
322
323 def renamed(self, node):
323 def renamed(self, node):
324 validatenode(node)
324 validatenode(node)
325
325
326 if self.parents(node)[0] != nullid:
326 if self.parents(node)[0] != nullid:
327 return False
327 return False
328
328
329 fulltext = self.revision(node)
329 fulltext = self.revision(node)
330 m = storageutil.parsemeta(fulltext)[0]
330 m = storageutil.parsemeta(fulltext)[0]
331
331
332 if m and 'copy' in m:
332 if m and 'copy' in m:
333 return m['copy'], bin(m['copyrev'])
333 return m['copy'], bin(m['copyrev'])
334
334
335 return False
335 return False
336
336
337 def cmp(self, node, text):
337 def cmp(self, node, text):
338 validatenode(node)
338 validatenode(node)
339
339
340 t = text
340 t = text
341
341
342 if text.startswith(b'\1\n'):
342 if text.startswith(b'\1\n'):
343 t = b'\1\n\1\n' + text
343 t = b'\1\n\1\n' + text
344
344
345 p1, p2 = self.parents(node)
345 p1, p2 = self.parents(node)
346
346
347 if storageutil.hashrevisionsha1(t, p1, p2) == node:
347 if storageutil.hashrevisionsha1(t, p1, p2) == node:
348 return False
348 return False
349
349
350 if self.iscensored(self.rev(node)):
350 if self.iscensored(self.rev(node)):
351 return text != b''
351 return text != b''
352
352
353 if self.renamed(node):
353 if self.renamed(node):
354 t2 = self.read(node)
354 t2 = self.read(node)
355 return t2 != text
355 return t2 != text
356
356
357 return True
357 return True
358
358
359 def size(self, rev):
359 def size(self, rev):
360 validaterev(rev)
360 validaterev(rev)
361
361
362 node = self._indexbyrev[rev][b'node']
362 node = self._indexbyrev[rev][b'node']
363
363
364 if self.renamed(node):
364 if self.renamed(node):
365 return len(self.read(node))
365 return len(self.read(node))
366
366
367 if self.iscensored(rev):
367 if self.iscensored(rev):
368 return 0
368 return 0
369
369
370 return len(self.revision(node))
370 return len(self.revision(node))
371
371
372 def iscensored(self, rev):
372 def iscensored(self, rev):
373 validaterev(rev)
373 validaterev(rev)
374
374
375 return self._flags(rev) & repository.REVISION_FLAG_CENSORED
375 return self._flags(rev) & repository.REVISION_FLAG_CENSORED
376
376
377 def commonancestorsheads(self, a, b):
377 def commonancestorsheads(self, a, b):
378 validatenode(a)
378 validatenode(a)
379 validatenode(b)
379 validatenode(b)
380
380
381 a = self.rev(a)
381 a = self.rev(a)
382 b = self.rev(b)
382 b = self.rev(b)
383
383
384 ancestors = ancestor.commonancestorsheads(self.parentrevs, a, b)
384 ancestors = ancestor.commonancestorsheads(self.parentrevs, a, b)
385 return pycompat.maplist(self.node, ancestors)
385 return pycompat.maplist(self.node, ancestors)
386
386
387 def descendants(self, revs):
387 def descendants(self, revs):
388 # This is a copy of revlog.descendants()
388 # This is a copy of revlog.descendants()
389 first = min(revs)
389 first = min(revs)
390 if first == nullrev:
390 if first == nullrev:
391 for i in self:
391 for i in self:
392 yield i
392 yield i
393 return
393 return
394
394
395 seen = set(revs)
395 seen = set(revs)
396 for i in self.revs(start=first + 1):
396 for i in self.revs(start=first + 1):
397 for x in self.parentrevs(i):
397 for x in self.parentrevs(i):
398 if x != nullrev and x in seen:
398 if x != nullrev and x in seen:
399 seen.add(i)
399 seen.add(i)
400 yield i
400 yield i
401 break
401 break
402
402
403 # Required by verify.
403 # Required by verify.
404 def files(self):
404 def files(self):
405 entries = self._svfs.listdir(self._storepath)
405 entries = self._svfs.listdir(self._storepath)
406
406
407 # Strip out undo.backup.* files created as part of transaction
407 # Strip out undo.backup.* files created as part of transaction
408 # recording.
408 # recording.
409 entries = [f for f in entries if not f.startswith('undo.backup.')]
409 entries = [f for f in entries if not f.startswith('undo.backup.')]
410
410
411 return [b'/'.join((self._storepath, f)) for f in entries]
411 return [b'/'.join((self._storepath, f)) for f in entries]
412
412
413 def storageinfo(
413 def storageinfo(
414 self,
414 self,
415 exclusivefiles=False,
415 exclusivefiles=False,
416 sharedfiles=False,
416 sharedfiles=False,
417 revisionscount=False,
417 revisionscount=False,
418 trackedsize=False,
418 trackedsize=False,
419 storedsize=False,
419 storedsize=False,
420 ):
420 ):
421 # TODO do a real implementation of this
421 # TODO do a real implementation of this
422 return {
422 return {
423 'exclusivefiles': [],
423 'exclusivefiles': [],
424 'sharedfiles': [],
424 'sharedfiles': [],
425 'revisionscount': len(self),
425 'revisionscount': len(self),
426 'trackedsize': 0,
426 'trackedsize': 0,
427 'storedsize': None,
427 'storedsize': None,
428 }
428 }
429
429
430 def verifyintegrity(self, state):
430 def verifyintegrity(self, state):
431 state['skipread'] = set()
431 state['skipread'] = set()
432 for rev in self:
432 for rev in self:
433 node = self.node(rev)
433 node = self.node(rev)
434 try:
434 try:
435 self.revision(node)
435 self.revision(node)
436 except Exception as e:
436 except Exception as e:
437 yield simplefilestoreproblem(
437 yield simplefilestoreproblem(
438 error='unpacking %s: %s' % (node, e), node=node
438 error='unpacking %s: %s' % (node, e), node=node
439 )
439 )
440 state['skipread'].add(node)
440 state['skipread'].add(node)
441
441
442 def emitrevisions(
442 def emitrevisions(
443 self,
443 self,
444 nodes,
444 nodes,
445 nodesorder=None,
445 nodesorder=None,
446 revisiondata=False,
446 revisiondata=False,
447 assumehaveparentrevisions=False,
447 assumehaveparentrevisions=False,
448 deltamode=repository.CG_DELTAMODE_STD,
448 deltamode=repository.CG_DELTAMODE_STD,
449 ):
449 ):
450 # TODO this will probably break on some ordering options.
450 # TODO this will probably break on some ordering options.
451 nodes = [n for n in nodes if n != nullid]
451 nodes = [n for n in nodes if n != nullid]
452 if not nodes:
452 if not nodes:
453 return
453 return
454 for delta in storageutil.emitrevisions(
454 for delta in storageutil.emitrevisions(
455 self,
455 self,
456 nodes,
456 nodes,
457 nodesorder,
457 nodesorder,
458 simplestorerevisiondelta,
458 simplestorerevisiondelta,
459 revisiondata=revisiondata,
459 revisiondata=revisiondata,
460 assumehaveparentrevisions=assumehaveparentrevisions,
460 assumehaveparentrevisions=assumehaveparentrevisions,
461 deltamode=deltamode,
461 deltamode=deltamode,
462 ):
462 ):
463 yield delta
463 yield delta
464
464
465 def add(self, text, meta, transaction, linkrev, p1, p2):
465 def add(self, text, meta, transaction, linkrev, p1, p2):
466 if meta or text.startswith(b'\1\n'):
466 if meta or text.startswith(b'\1\n'):
467 text = storageutil.packmeta(meta, text)
467 text = storageutil.packmeta(meta, text)
468
468
469 return self.addrevision(text, transaction, linkrev, p1, p2)
469 return self.addrevision(text, transaction, linkrev, p1, p2)
470
470
471 def addrevision(
471 def addrevision(
472 self,
472 self,
473 text,
473 text,
474 transaction,
474 transaction,
475 linkrev,
475 linkrev,
476 p1,
476 p1,
477 p2,
477 p2,
478 node=None,
478 node=None,
479 flags=revlog.REVIDX_DEFAULT_FLAGS,
479 flags=revlog.REVIDX_DEFAULT_FLAGS,
480 cachedelta=None,
480 cachedelta=None,
481 ):
481 ):
482 validatenode(p1)
482 validatenode(p1)
483 validatenode(p2)
483 validatenode(p2)
484
484
485 if flags:
485 if flags:
486 node = node or storageutil.hashrevisionsha1(text, p1, p2)
486 node = node or storageutil.hashrevisionsha1(text, p1, p2)
487
487
488 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
488 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
489
489
490 node = node or storageutil.hashrevisionsha1(text, p1, p2)
490 node = node or storageutil.hashrevisionsha1(text, p1, p2)
491
491
492 if node in self._indexbynode:
492 if node in self._indexbynode:
493 return node
493 return node
494
494
495 if validatehash:
495 if validatehash:
496 self.checkhash(rawtext, node, p1=p1, p2=p2)
496 self.checkhash(rawtext, node, p1=p1, p2=p2)
497
497
498 return self._addrawrevision(
498 return self._addrawrevision(
499 node, rawtext, transaction, linkrev, p1, p2, flags
499 node, rawtext, transaction, linkrev, p1, p2, flags
500 )
500 )
501
501
502 def _addrawrevision(self, node, rawtext, transaction, link, p1, p2, flags):
502 def _addrawrevision(self, node, rawtext, transaction, link, p1, p2, flags):
503 transaction.addbackup(self._indexpath)
503 transaction.addbackup(self._indexpath)
504
504
505 path = b'/'.join([self._storepath, hex(node)])
505 path = b'/'.join([self._storepath, hex(node)])
506
506
507 self._svfs.write(path, rawtext)
507 self._svfs.write(path, rawtext)
508
508
509 self._indexdata.append(
509 self._indexdata.append(
510 {
510 {
511 b'node': node,
511 b'node': node,
512 b'p1': p1,
512 b'p1': p1,
513 b'p2': p2,
513 b'p2': p2,
514 b'linkrev': link,
514 b'linkrev': link,
515 b'flags': flags,
515 b'flags': flags,
516 }
516 }
517 )
517 )
518
518
519 self._reflectindexupdate()
519 self._reflectindexupdate()
520
520
521 return node
521 return node
522
522
523 def _reflectindexupdate(self):
523 def _reflectindexupdate(self):
524 self._refreshindex()
524 self._refreshindex()
525 self._svfs.write(
525 self._svfs.write(
526 self._indexpath, ''.join(cborutil.streamencode(self._indexdata))
526 self._indexpath, ''.join(cborutil.streamencode(self._indexdata))
527 )
527 )
528
528
529 def addgroup(
529 def addgroup(
530 self,
530 self,
531 deltas,
531 deltas,
532 linkmapper,
532 linkmapper,
533 transaction,
533 transaction,
534 addrevisioncb=None,
534 addrevisioncb=None,
535 maybemissingparents=False,
535 maybemissingparents=False,
536 ):
536 ):
537 if maybemissingparents:
537 if maybemissingparents:
538 raise error.Abort(
538 raise error.Abort(
539 _('simple store does not support missing parents ' 'write mode')
539 _('simple store does not support missing parents ' 'write mode')
540 )
540 )
541
541
542 nodes = []
542 nodes = []
543
543
544 transaction.addbackup(self._indexpath)
544 transaction.addbackup(self._indexpath)
545
545
546 for node, p1, p2, linknode, deltabase, delta, flags in deltas:
546 for node, p1, p2, linknode, deltabase, delta, flags in deltas:
547 linkrev = linkmapper(linknode)
547 linkrev = linkmapper(linknode)
548 flags = flags or revlog.REVIDX_DEFAULT_FLAGS
548 flags = flags or revlog.REVIDX_DEFAULT_FLAGS
549
549
550 nodes.append(node)
550 nodes.append(node)
551
551
552 if node in self._indexbynode:
552 if node in self._indexbynode:
553 continue
553 continue
554
554
555 # Need to resolve the fulltext from the delta base.
555 # Need to resolve the fulltext from the delta base.
556 if deltabase == nullid:
556 if deltabase == nullid:
557 text = mdiff.patch(b'', delta)
557 text = mdiff.patch(b'', delta)
558 else:
558 else:
559 text = mdiff.patch(self.revision(deltabase), delta)
559 text = mdiff.patch(self.revision(deltabase), delta)
560
560
561 self._addrawrevision(
561 self._addrawrevision(
562 node, text, transaction, linkrev, p1, p2, flags
562 node, text, transaction, linkrev, p1, p2, flags
563 )
563 )
564
564
565 if addrevisioncb:
565 if addrevisioncb:
566 addrevisioncb(self, node)
566 addrevisioncb(self, node)
567 return nodes
567 return nodes
568
568
569 def _headrevs(self):
569 def _headrevs(self):
570 # Assume all revisions are heads by default.
570 # Assume all revisions are heads by default.
571 revishead = {rev: True for rev in self._indexbyrev}
571 revishead = {rev: True for rev in self._indexbyrev}
572
572
573 for rev, entry in self._indexbyrev.items():
573 for rev, entry in self._indexbyrev.items():
574 # Unset head flag for all seen parents.
574 # Unset head flag for all seen parents.
575 revishead[self.rev(entry[b'p1'])] = False
575 revishead[self.rev(entry[b'p1'])] = False
576 revishead[self.rev(entry[b'p2'])] = False
576 revishead[self.rev(entry[b'p2'])] = False
577
577
578 return [rev for rev, ishead in sorted(revishead.items()) if ishead]
578 return [rev for rev, ishead in sorted(revishead.items()) if ishead]
579
579
580 def heads(self, start=None, stop=None):
580 def heads(self, start=None, stop=None):
581 # This is copied from revlog.py.
581 # This is copied from revlog.py.
582 if start is None and stop is None:
582 if start is None and stop is None:
583 if not len(self):
583 if not len(self):
584 return [nullid]
584 return [nullid]
585 return [self.node(r) for r in self._headrevs()]
585 return [self.node(r) for r in self._headrevs()]
586
586
587 if start is None:
587 if start is None:
588 start = nullid
588 start = nullid
589 if stop is None:
589 if stop is None:
590 stop = []
590 stop = []
591 stoprevs = set([self.rev(n) for n in stop])
591 stoprevs = set([self.rev(n) for n in stop])
592 startrev = self.rev(start)
592 startrev = self.rev(start)
593 reachable = {startrev}
593 reachable = {startrev}
594 heads = {startrev}
594 heads = {startrev}
595
595
596 parentrevs = self.parentrevs
596 parentrevs = self.parentrevs
597 for r in self.revs(start=startrev + 1):
597 for r in self.revs(start=startrev + 1):
598 for p in parentrevs(r):
598 for p in parentrevs(r):
599 if p in reachable:
599 if p in reachable:
600 if r not in stoprevs:
600 if r not in stoprevs:
601 reachable.add(r)
601 reachable.add(r)
602 heads.add(r)
602 heads.add(r)
603 if p in heads and p not in stoprevs:
603 if p in heads and p not in stoprevs:
604 heads.remove(p)
604 heads.remove(p)
605
605
606 return [self.node(r) for r in heads]
606 return [self.node(r) for r in heads]
607
607
608 def children(self, node):
608 def children(self, node):
609 validatenode(node)
609 validatenode(node)
610
610
611 # This is a copy of revlog.children().
611 # This is a copy of revlog.children().
612 c = []
612 c = []
613 p = self.rev(node)
613 p = self.rev(node)
614 for r in self.revs(start=p + 1):
614 for r in self.revs(start=p + 1):
615 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
615 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
616 if prevs:
616 if prevs:
617 for pr in prevs:
617 for pr in prevs:
618 if pr == p:
618 if pr == p:
619 c.append(self.node(r))
619 c.append(self.node(r))
620 elif p == nullrev:
620 elif p == nullrev:
621 c.append(self.node(r))
621 c.append(self.node(r))
622 return c
622 return c
623
623
624 def getstrippoint(self, minlink):
624 def getstrippoint(self, minlink):
625 return storageutil.resolvestripinfo(
625 return storageutil.resolvestripinfo(
626 minlink,
626 minlink,
627 len(self) - 1,
627 len(self) - 1,
628 self._headrevs(),
628 self._headrevs(),
629 self.linkrev,
629 self.linkrev,
630 self.parentrevs,
630 self.parentrevs,
631 )
631 )
632
632
633 def strip(self, minlink, transaction):
633 def strip(self, minlink, transaction):
634 if not len(self):
634 if not len(self):
635 return
635 return
636
636
637 rev, _ignored = self.getstrippoint(minlink)
637 rev, _ignored = self.getstrippoint(minlink)
638 if rev == len(self):
638 if rev == len(self):
639 return
639 return
640
640
641 # Purge index data starting at the requested revision.
641 # Purge index data starting at the requested revision.
642 self._indexdata[rev:] = []
642 self._indexdata[rev:] = []
643 self._reflectindexupdate()
643 self._reflectindexupdate()
644
644
645
645
646 def issimplestorefile(f, kind, st):
646 def issimplestorefile(f, kind, st):
647 if kind != stat.S_IFREG:
647 if kind != stat.S_IFREG:
648 return False
648 return False
649
649
650 if store.isrevlog(f, kind, st):
650 if store.isrevlog(f, kind, st):
651 return False
651 return False
652
652
653 # Ignore transaction undo files.
653 # Ignore transaction undo files.
654 if f.startswith('undo.'):
654 if f.startswith('undo.'):
655 return False
655 return False
656
656
657 # Otherwise assume it belongs to the simple store.
657 # Otherwise assume it belongs to the simple store.
658 return True
658 return True
659
659
660
660
661 class simplestore(store.encodedstore):
661 class simplestore(store.encodedstore):
662 def datafiles(self):
662 def datafiles(self):
663 for x in super(simplestore, self).datafiles():
663 for x in super(simplestore, self).datafiles():
664 yield x
664 yield x
665
665
666 # Supplement with non-revlog files.
666 # Supplement with non-revlog files.
667 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
667 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
668
668
669 for unencoded, encoded, size in extrafiles:
669 for unencoded, encoded, size in extrafiles:
670 try:
670 try:
671 unencoded = store.decodefilename(unencoded)
671 unencoded = store.decodefilename(unencoded)
672 except KeyError:
672 except KeyError:
673 unencoded = None
673 unencoded = None
674
674
675 yield unencoded, encoded, size
675 yield unencoded, encoded, size
676
676
677
677
678 def reposetup(ui, repo):
678 def reposetup(ui, repo):
679 if not repo.local():
679 if not repo.local():
680 return
680 return
681
681
682 if isinstance(repo, bundlerepo.bundlerepository):
682 if isinstance(repo, bundlerepo.bundlerepository):
683 raise error.Abort(_('cannot use simple store with bundlerepo'))
683 raise error.Abort(_('cannot use simple store with bundlerepo'))
684
684
685 class simplestorerepo(repo.__class__):
685 class simplestorerepo(repo.__class__):
686 def file(self, f):
686 def file(self, f):
687 return filestorage(self.svfs, f)
687 return filestorage(self.svfs, f)
688
688
689 repo.__class__ = simplestorerepo
689 repo.__class__ = simplestorerepo
690
690
691
691
692 def featuresetup(ui, supported):
692 def featuresetup(ui, supported):
693 supported.add(REQUIREMENT)
693 supported.add(REQUIREMENT)
694
694
695
695
696 def newreporequirements(orig, ui, createopts):
696 def newreporequirements(orig, ui, createopts):
697 """Modifies default requirements for new repos to use the simple store."""
697 """Modifies default requirements for new repos to use the simple store."""
698 requirements = orig(ui, createopts)
698 requirements = orig(ui, createopts)
699
699
700 # These requirements are only used to affect creation of the store
700 # These requirements are only used to affect creation of the store
701 # object. We have our own store. So we can remove them.
701 # object. We have our own store. So we can remove them.
702 # TODO do this once we feel like taking the test hit.
702 # TODO do this once we feel like taking the test hit.
703 # if 'fncache' in requirements:
703 # if 'fncache' in requirements:
704 # requirements.remove('fncache')
704 # requirements.remove('fncache')
705 # if 'dotencode' in requirements:
705 # if 'dotencode' in requirements:
706 # requirements.remove('dotencode')
706 # requirements.remove('dotencode')
707
707
708 requirements.add(REQUIREMENT)
708 requirements.add(REQUIREMENT)
709
709
710 return requirements
710 return requirements
711
711
712
712
713 def makestore(orig, requirements, path, vfstype):
713 def makestore(orig, requirements, path, vfstype):
714 if REQUIREMENT not in requirements:
714 if REQUIREMENT not in requirements:
715 return orig(requirements, path, vfstype)
715 return orig(requirements, path, vfstype)
716
716
717 return simplestore(path, vfstype)
717 return simplestore(path, vfstype)
718
718
719
719
720 def verifierinit(orig, self, *args, **kwargs):
720 def verifierinit(orig, self, *args, **kwargs):
721 orig(self, *args, **kwargs)
721 orig(self, *args, **kwargs)
722
722
723 # We don't care that files in the store don't align with what is
723 # We don't care that files in the store don't align with what is
724 # advertised. So suppress these warnings.
724 # advertised. So suppress these warnings.
725 self.warnorphanstorefiles = False
725 self.warnorphanstorefiles = False
726
726
727
727
728 def extsetup(ui):
728 def extsetup(ui):
729 localrepo.featuresetupfuncs.add(featuresetup)
729 localrepo.featuresetupfuncs.add(featuresetup)
730
730
731 extensions.wrapfunction(
731 extensions.wrapfunction(
732 localrepo, 'newreporequirements', newreporequirements
732 localrepo, 'newreporequirements', newreporequirements
733 )
733 )
734 extensions.wrapfunction(localrepo, 'makestore', makestore)
734 extensions.wrapfunction(localrepo, 'makestore', makestore)
735 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
735 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
General Comments 0
You need to be logged in to leave comments. Login now