##// END OF EJS Templates
simplestorerepo: stop using `_processflags` directly...
marmoute -
r43149:38c62272 default
parent child Browse files
Show More
@@ -1,678 +1,682 b''
1 # simplestorerepo.py - Extension that swaps in alternate repository storage.
1 # simplestorerepo.py - Extension that swaps in alternate repository storage.
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 # To use this with the test suite:
8 # To use this with the test suite:
9 #
9 #
10 # $ HGREPOFEATURES="simplestore" ./run-tests.py \
10 # $ HGREPOFEATURES="simplestore" ./run-tests.py \
11 # --extra-config-opt extensions.simplestore=`pwd`/simplestorerepo.py
11 # --extra-config-opt extensions.simplestore=`pwd`/simplestorerepo.py
12
12
13 from __future__ import absolute_import
13 from __future__ import absolute_import
14
14
15 import stat
15 import stat
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial.node import (
18 from mercurial.node import (
19 bin,
19 bin,
20 hex,
20 hex,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 )
23 )
24 from mercurial.thirdparty import (
24 from mercurial.thirdparty import (
25 attr,
25 attr,
26 )
26 )
27 from mercurial import (
27 from mercurial import (
28 ancestor,
28 ancestor,
29 bundlerepo,
29 bundlerepo,
30 error,
30 error,
31 extensions,
31 extensions,
32 localrepo,
32 localrepo,
33 mdiff,
33 mdiff,
34 pycompat,
34 pycompat,
35 revlog,
35 revlog,
36 store,
36 store,
37 verify,
37 verify,
38 )
38 )
39 from mercurial.interfaces import (
39 from mercurial.interfaces import (
40 repository,
40 repository,
41 util as interfaceutil,
41 util as interfaceutil,
42 )
42 )
43 from mercurial.utils import (
43 from mercurial.utils import (
44 cborutil,
44 cborutil,
45 storageutil,
45 storageutil,
46 )
46 )
47 from mercurial.revlogutils import (
47 from mercurial.revlogutils import (
48 flagutil,
48 flagutil,
49 )
49 )
50
50
51 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
51 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
52 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
52 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
53 # be specifying the version(s) of Mercurial they are tested with, or
53 # be specifying the version(s) of Mercurial they are tested with, or
54 # leave the attribute unspecified.
54 # leave the attribute unspecified.
55 testedwith = 'ships-with-hg-core'
55 testedwith = 'ships-with-hg-core'
56
56
57 REQUIREMENT = 'testonly-simplestore'
57 REQUIREMENT = 'testonly-simplestore'
58
58
59 def validatenode(node):
59 def validatenode(node):
60 if isinstance(node, int):
60 if isinstance(node, int):
61 raise ValueError('expected node; got int')
61 raise ValueError('expected node; got int')
62
62
63 if len(node) != 20:
63 if len(node) != 20:
64 raise ValueError('expected 20 byte node')
64 raise ValueError('expected 20 byte node')
65
65
66 def validaterev(rev):
66 def validaterev(rev):
67 if not isinstance(rev, int):
67 if not isinstance(rev, int):
68 raise ValueError('expected int')
68 raise ValueError('expected int')
69
69
70 class simplestoreerror(error.StorageError):
70 class simplestoreerror(error.StorageError):
71 pass
71 pass
72
72
73 @interfaceutil.implementer(repository.irevisiondelta)
73 @interfaceutil.implementer(repository.irevisiondelta)
74 @attr.s(slots=True)
74 @attr.s(slots=True)
75 class simplestorerevisiondelta(object):
75 class simplestorerevisiondelta(object):
76 node = attr.ib()
76 node = attr.ib()
77 p1node = attr.ib()
77 p1node = attr.ib()
78 p2node = attr.ib()
78 p2node = attr.ib()
79 basenode = attr.ib()
79 basenode = attr.ib()
80 flags = attr.ib()
80 flags = attr.ib()
81 baserevisionsize = attr.ib()
81 baserevisionsize = attr.ib()
82 revision = attr.ib()
82 revision = attr.ib()
83 delta = attr.ib()
83 delta = attr.ib()
84 linknode = attr.ib(default=None)
84 linknode = attr.ib(default=None)
85
85
86 @interfaceutil.implementer(repository.iverifyproblem)
86 @interfaceutil.implementer(repository.iverifyproblem)
87 @attr.s(frozen=True)
87 @attr.s(frozen=True)
88 class simplefilestoreproblem(object):
88 class simplefilestoreproblem(object):
89 warning = attr.ib(default=None)
89 warning = attr.ib(default=None)
90 error = attr.ib(default=None)
90 error = attr.ib(default=None)
91 node = attr.ib(default=None)
91 node = attr.ib(default=None)
92
92
93 @interfaceutil.implementer(repository.ifilestorage)
93 @interfaceutil.implementer(repository.ifilestorage)
94 class filestorage(flagutil.flagprocessorsmixin):
94 class filestorage(flagutil.flagprocessorsmixin):
95 """Implements storage for a tracked path.
95 """Implements storage for a tracked path.
96
96
97 Data is stored in the VFS in a directory corresponding to the tracked
97 Data is stored in the VFS in a directory corresponding to the tracked
98 path.
98 path.
99
99
100 Index data is stored in an ``index`` file using CBOR.
100 Index data is stored in an ``index`` file using CBOR.
101
101
102 Fulltext data is stored in files having names of the node.
102 Fulltext data is stored in files having names of the node.
103 """
103 """
104
104
105 _flagserrorclass = simplestoreerror
105 _flagserrorclass = simplestoreerror
106
106
107 def __init__(self, svfs, path):
107 def __init__(self, svfs, path):
108 self._svfs = svfs
108 self._svfs = svfs
109 self._path = path
109 self._path = path
110
110
111 self._storepath = b'/'.join([b'data', path])
111 self._storepath = b'/'.join([b'data', path])
112 self._indexpath = b'/'.join([self._storepath, b'index'])
112 self._indexpath = b'/'.join([self._storepath, b'index'])
113
113
114 indexdata = self._svfs.tryread(self._indexpath)
114 indexdata = self._svfs.tryread(self._indexpath)
115 if indexdata:
115 if indexdata:
116 indexdata = cborutil.decodeall(indexdata)
116 indexdata = cborutil.decodeall(indexdata)
117
117
118 self._indexdata = indexdata or []
118 self._indexdata = indexdata or []
119 self._indexbynode = {}
119 self._indexbynode = {}
120 self._indexbyrev = {}
120 self._indexbyrev = {}
121 self._index = []
121 self._index = []
122 self._refreshindex()
122 self._refreshindex()
123
123
124 self._flagprocessors = dict(flagutil.flagprocessors)
124 self._flagprocessors = dict(flagutil.flagprocessors)
125
125
126 def _refreshindex(self):
126 def _refreshindex(self):
127 self._indexbynode.clear()
127 self._indexbynode.clear()
128 self._indexbyrev.clear()
128 self._indexbyrev.clear()
129 self._index = []
129 self._index = []
130
130
131 for i, entry in enumerate(self._indexdata):
131 for i, entry in enumerate(self._indexdata):
132 self._indexbynode[entry[b'node']] = entry
132 self._indexbynode[entry[b'node']] = entry
133 self._indexbyrev[i] = entry
133 self._indexbyrev[i] = entry
134
134
135 self._indexbynode[nullid] = {
135 self._indexbynode[nullid] = {
136 b'node': nullid,
136 b'node': nullid,
137 b'p1': nullid,
137 b'p1': nullid,
138 b'p2': nullid,
138 b'p2': nullid,
139 b'linkrev': nullrev,
139 b'linkrev': nullrev,
140 b'flags': 0,
140 b'flags': 0,
141 }
141 }
142
142
143 self._indexbyrev[nullrev] = {
143 self._indexbyrev[nullrev] = {
144 b'node': nullid,
144 b'node': nullid,
145 b'p1': nullid,
145 b'p1': nullid,
146 b'p2': nullid,
146 b'p2': nullid,
147 b'linkrev': nullrev,
147 b'linkrev': nullrev,
148 b'flags': 0,
148 b'flags': 0,
149 }
149 }
150
150
151 for i, entry in enumerate(self._indexdata):
151 for i, entry in enumerate(self._indexdata):
152 p1rev, p2rev = self.parentrevs(self.rev(entry[b'node']))
152 p1rev, p2rev = self.parentrevs(self.rev(entry[b'node']))
153
153
154 # start, length, rawsize, chainbase, linkrev, p1, p2, node
154 # start, length, rawsize, chainbase, linkrev, p1, p2, node
155 self._index.append((0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev,
155 self._index.append((0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev,
156 entry[b'node']))
156 entry[b'node']))
157
157
158 self._index.append((0, 0, 0, -1, -1, -1, -1, nullid))
158 self._index.append((0, 0, 0, -1, -1, -1, -1, nullid))
159
159
160 def __len__(self):
160 def __len__(self):
161 return len(self._indexdata)
161 return len(self._indexdata)
162
162
163 def __iter__(self):
163 def __iter__(self):
164 return iter(range(len(self)))
164 return iter(range(len(self)))
165
165
166 def revs(self, start=0, stop=None):
166 def revs(self, start=0, stop=None):
167 step = 1
167 step = 1
168 if stop is not None:
168 if stop is not None:
169 if start > stop:
169 if start > stop:
170 step = -1
170 step = -1
171
171
172 stop += step
172 stop += step
173 else:
173 else:
174 stop = len(self)
174 stop = len(self)
175
175
176 return range(start, stop, step)
176 return range(start, stop, step)
177
177
178 def parents(self, node):
178 def parents(self, node):
179 validatenode(node)
179 validatenode(node)
180
180
181 if node not in self._indexbynode:
181 if node not in self._indexbynode:
182 raise KeyError('unknown node')
182 raise KeyError('unknown node')
183
183
184 entry = self._indexbynode[node]
184 entry = self._indexbynode[node]
185
185
186 return entry[b'p1'], entry[b'p2']
186 return entry[b'p1'], entry[b'p2']
187
187
188 def parentrevs(self, rev):
188 def parentrevs(self, rev):
189 p1, p2 = self.parents(self._indexbyrev[rev][b'node'])
189 p1, p2 = self.parents(self._indexbyrev[rev][b'node'])
190 return self.rev(p1), self.rev(p2)
190 return self.rev(p1), self.rev(p2)
191
191
192 def rev(self, node):
192 def rev(self, node):
193 validatenode(node)
193 validatenode(node)
194
194
195 try:
195 try:
196 self._indexbynode[node]
196 self._indexbynode[node]
197 except KeyError:
197 except KeyError:
198 raise error.LookupError(node, self._indexpath, _('no node'))
198 raise error.LookupError(node, self._indexpath, _('no node'))
199
199
200 for rev, entry in self._indexbyrev.items():
200 for rev, entry in self._indexbyrev.items():
201 if entry[b'node'] == node:
201 if entry[b'node'] == node:
202 return rev
202 return rev
203
203
204 raise error.ProgrammingError('this should not occur')
204 raise error.ProgrammingError('this should not occur')
205
205
206 def node(self, rev):
206 def node(self, rev):
207 validaterev(rev)
207 validaterev(rev)
208
208
209 return self._indexbyrev[rev][b'node']
209 return self._indexbyrev[rev][b'node']
210
210
211 def hasnode(self, node):
211 def hasnode(self, node):
212 validatenode(node)
212 validatenode(node)
213 return node in self._indexbynode
213 return node in self._indexbynode
214
214
215 def censorrevision(self, tr, censornode, tombstone=b''):
215 def censorrevision(self, tr, censornode, tombstone=b''):
216 raise NotImplementedError('TODO')
216 raise NotImplementedError('TODO')
217
217
218 def lookup(self, node):
218 def lookup(self, node):
219 if isinstance(node, int):
219 if isinstance(node, int):
220 return self.node(node)
220 return self.node(node)
221
221
222 if len(node) == 20:
222 if len(node) == 20:
223 self.rev(node)
223 self.rev(node)
224 return node
224 return node
225
225
226 try:
226 try:
227 rev = int(node)
227 rev = int(node)
228 if '%d' % rev != node:
228 if '%d' % rev != node:
229 raise ValueError
229 raise ValueError
230
230
231 if rev < 0:
231 if rev < 0:
232 rev = len(self) + rev
232 rev = len(self) + rev
233 if rev < 0 or rev >= len(self):
233 if rev < 0 or rev >= len(self):
234 raise ValueError
234 raise ValueError
235
235
236 return self.node(rev)
236 return self.node(rev)
237 except (ValueError, OverflowError):
237 except (ValueError, OverflowError):
238 pass
238 pass
239
239
240 if len(node) == 40:
240 if len(node) == 40:
241 try:
241 try:
242 rawnode = bin(node)
242 rawnode = bin(node)
243 self.rev(rawnode)
243 self.rev(rawnode)
244 return rawnode
244 return rawnode
245 except TypeError:
245 except TypeError:
246 pass
246 pass
247
247
248 raise error.LookupError(node, self._path, _('invalid lookup input'))
248 raise error.LookupError(node, self._path, _('invalid lookup input'))
249
249
250 def linkrev(self, rev):
250 def linkrev(self, rev):
251 validaterev(rev)
251 validaterev(rev)
252
252
253 return self._indexbyrev[rev][b'linkrev']
253 return self._indexbyrev[rev][b'linkrev']
254
254
255 def _flags(self, rev):
255 def _flags(self, rev):
256 validaterev(rev)
256 validaterev(rev)
257
257
258 return self._indexbyrev[rev][b'flags']
258 return self._indexbyrev[rev][b'flags']
259
259
260 def _candelta(self, baserev, rev):
260 def _candelta(self, baserev, rev):
261 validaterev(baserev)
261 validaterev(baserev)
262 validaterev(rev)
262 validaterev(rev)
263
263
264 if ((self._flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)
264 if ((self._flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)
265 or (self._flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)):
265 or (self._flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)):
266 return False
266 return False
267
267
268 return True
268 return True
269
269
270 def checkhash(self, text, node, p1=None, p2=None, rev=None):
270 def checkhash(self, text, node, p1=None, p2=None, rev=None):
271 if p1 is None and p2 is None:
271 if p1 is None and p2 is None:
272 p1, p2 = self.parents(node)
272 p1, p2 = self.parents(node)
273 if node != storageutil.hashrevisionsha1(text, p1, p2):
273 if node != storageutil.hashrevisionsha1(text, p1, p2):
274 raise simplestoreerror(_("integrity check failed on %s") %
274 raise simplestoreerror(_("integrity check failed on %s") %
275 self._path)
275 self._path)
276
276
277 def revision(self, nodeorrev, raw=False):
277 def revision(self, nodeorrev, raw=False):
278 if isinstance(nodeorrev, int):
278 if isinstance(nodeorrev, int):
279 node = self.node(nodeorrev)
279 node = self.node(nodeorrev)
280 else:
280 else:
281 node = nodeorrev
281 node = nodeorrev
282 validatenode(node)
282 validatenode(node)
283
283
284 if node == nullid:
284 if node == nullid:
285 return b''
285 return b''
286
286
287 rev = self.rev(node)
287 rev = self.rev(node)
288 flags = self._flags(rev)
288 flags = self._flags(rev)
289
289
290 path = b'/'.join([self._storepath, hex(node)])
290 path = b'/'.join([self._storepath, hex(node)])
291 rawtext = self._svfs.read(path)
291 rawtext = self._svfs.read(path)
292
292
293 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
293 if raw:
294 validatehash = self._processflagsraw(rawtext, flags)
295 text = rawtext
296 else:
297 text, validatehash = self._processflagsread(rawtext, flags)
294 if validatehash:
298 if validatehash:
295 self.checkhash(text, node, rev=rev)
299 self.checkhash(text, node, rev=rev)
296
300
297 return text
301 return text
298
302
299 def rawdata(self, nodeorrev):
303 def rawdata(self, nodeorrev):
300 return self.revision(raw=True)
304 return self.revision(raw=True)
301
305
302 def read(self, node):
306 def read(self, node):
303 validatenode(node)
307 validatenode(node)
304
308
305 revision = self.revision(node)
309 revision = self.revision(node)
306
310
307 if not revision.startswith(b'\1\n'):
311 if not revision.startswith(b'\1\n'):
308 return revision
312 return revision
309
313
310 start = revision.index(b'\1\n', 2)
314 start = revision.index(b'\1\n', 2)
311 return revision[start + 2:]
315 return revision[start + 2:]
312
316
313 def renamed(self, node):
317 def renamed(self, node):
314 validatenode(node)
318 validatenode(node)
315
319
316 if self.parents(node)[0] != nullid:
320 if self.parents(node)[0] != nullid:
317 return False
321 return False
318
322
319 fulltext = self.revision(node)
323 fulltext = self.revision(node)
320 m = storageutil.parsemeta(fulltext)[0]
324 m = storageutil.parsemeta(fulltext)[0]
321
325
322 if m and 'copy' in m:
326 if m and 'copy' in m:
323 return m['copy'], bin(m['copyrev'])
327 return m['copy'], bin(m['copyrev'])
324
328
325 return False
329 return False
326
330
327 def cmp(self, node, text):
331 def cmp(self, node, text):
328 validatenode(node)
332 validatenode(node)
329
333
330 t = text
334 t = text
331
335
332 if text.startswith(b'\1\n'):
336 if text.startswith(b'\1\n'):
333 t = b'\1\n\1\n' + text
337 t = b'\1\n\1\n' + text
334
338
335 p1, p2 = self.parents(node)
339 p1, p2 = self.parents(node)
336
340
337 if storageutil.hashrevisionsha1(t, p1, p2) == node:
341 if storageutil.hashrevisionsha1(t, p1, p2) == node:
338 return False
342 return False
339
343
340 if self.iscensored(self.rev(node)):
344 if self.iscensored(self.rev(node)):
341 return text != b''
345 return text != b''
342
346
343 if self.renamed(node):
347 if self.renamed(node):
344 t2 = self.read(node)
348 t2 = self.read(node)
345 return t2 != text
349 return t2 != text
346
350
347 return True
351 return True
348
352
349 def size(self, rev):
353 def size(self, rev):
350 validaterev(rev)
354 validaterev(rev)
351
355
352 node = self._indexbyrev[rev][b'node']
356 node = self._indexbyrev[rev][b'node']
353
357
354 if self.renamed(node):
358 if self.renamed(node):
355 return len(self.read(node))
359 return len(self.read(node))
356
360
357 if self.iscensored(rev):
361 if self.iscensored(rev):
358 return 0
362 return 0
359
363
360 return len(self.revision(node))
364 return len(self.revision(node))
361
365
362 def iscensored(self, rev):
366 def iscensored(self, rev):
363 validaterev(rev)
367 validaterev(rev)
364
368
365 return self._flags(rev) & repository.REVISION_FLAG_CENSORED
369 return self._flags(rev) & repository.REVISION_FLAG_CENSORED
366
370
367 def commonancestorsheads(self, a, b):
371 def commonancestorsheads(self, a, b):
368 validatenode(a)
372 validatenode(a)
369 validatenode(b)
373 validatenode(b)
370
374
371 a = self.rev(a)
375 a = self.rev(a)
372 b = self.rev(b)
376 b = self.rev(b)
373
377
374 ancestors = ancestor.commonancestorsheads(self.parentrevs, a, b)
378 ancestors = ancestor.commonancestorsheads(self.parentrevs, a, b)
375 return pycompat.maplist(self.node, ancestors)
379 return pycompat.maplist(self.node, ancestors)
376
380
377 def descendants(self, revs):
381 def descendants(self, revs):
378 # This is a copy of revlog.descendants()
382 # This is a copy of revlog.descendants()
379 first = min(revs)
383 first = min(revs)
380 if first == nullrev:
384 if first == nullrev:
381 for i in self:
385 for i in self:
382 yield i
386 yield i
383 return
387 return
384
388
385 seen = set(revs)
389 seen = set(revs)
386 for i in self.revs(start=first + 1):
390 for i in self.revs(start=first + 1):
387 for x in self.parentrevs(i):
391 for x in self.parentrevs(i):
388 if x != nullrev and x in seen:
392 if x != nullrev and x in seen:
389 seen.add(i)
393 seen.add(i)
390 yield i
394 yield i
391 break
395 break
392
396
393 # Required by verify.
397 # Required by verify.
394 def files(self):
398 def files(self):
395 entries = self._svfs.listdir(self._storepath)
399 entries = self._svfs.listdir(self._storepath)
396
400
397 # Strip out undo.backup.* files created as part of transaction
401 # Strip out undo.backup.* files created as part of transaction
398 # recording.
402 # recording.
399 entries = [f for f in entries if not f.startswith('undo.backup.')]
403 entries = [f for f in entries if not f.startswith('undo.backup.')]
400
404
401 return [b'/'.join((self._storepath, f)) for f in entries]
405 return [b'/'.join((self._storepath, f)) for f in entries]
402
406
403 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
407 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
404 revisionscount=False, trackedsize=False,
408 revisionscount=False, trackedsize=False,
405 storedsize=False):
409 storedsize=False):
406 # TODO do a real implementation of this
410 # TODO do a real implementation of this
407 return {
411 return {
408 'exclusivefiles': [],
412 'exclusivefiles': [],
409 'sharedfiles': [],
413 'sharedfiles': [],
410 'revisionscount': len(self),
414 'revisionscount': len(self),
411 'trackedsize': 0,
415 'trackedsize': 0,
412 'storedsize': None,
416 'storedsize': None,
413 }
417 }
414
418
415 def verifyintegrity(self, state):
419 def verifyintegrity(self, state):
416 state['skipread'] = set()
420 state['skipread'] = set()
417 for rev in self:
421 for rev in self:
418 node = self.node(rev)
422 node = self.node(rev)
419 try:
423 try:
420 self.revision(node)
424 self.revision(node)
421 except Exception as e:
425 except Exception as e:
422 yield simplefilestoreproblem(
426 yield simplefilestoreproblem(
423 error='unpacking %s: %s' % (node, e),
427 error='unpacking %s: %s' % (node, e),
424 node=node)
428 node=node)
425 state['skipread'].add(node)
429 state['skipread'].add(node)
426
430
427 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
431 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
428 assumehaveparentrevisions=False,
432 assumehaveparentrevisions=False,
429 deltamode=repository.CG_DELTAMODE_STD):
433 deltamode=repository.CG_DELTAMODE_STD):
430 # TODO this will probably break on some ordering options.
434 # TODO this will probably break on some ordering options.
431 nodes = [n for n in nodes if n != nullid]
435 nodes = [n for n in nodes if n != nullid]
432 if not nodes:
436 if not nodes:
433 return
437 return
434 for delta in storageutil.emitrevisions(
438 for delta in storageutil.emitrevisions(
435 self, nodes, nodesorder, simplestorerevisiondelta,
439 self, nodes, nodesorder, simplestorerevisiondelta,
436 revisiondata=revisiondata,
440 revisiondata=revisiondata,
437 assumehaveparentrevisions=assumehaveparentrevisions,
441 assumehaveparentrevisions=assumehaveparentrevisions,
438 deltamode=deltamode):
442 deltamode=deltamode):
439 yield delta
443 yield delta
440
444
441 def add(self, text, meta, transaction, linkrev, p1, p2):
445 def add(self, text, meta, transaction, linkrev, p1, p2):
442 if meta or text.startswith(b'\1\n'):
446 if meta or text.startswith(b'\1\n'):
443 text = storageutil.packmeta(meta, text)
447 text = storageutil.packmeta(meta, text)
444
448
445 return self.addrevision(text, transaction, linkrev, p1, p2)
449 return self.addrevision(text, transaction, linkrev, p1, p2)
446
450
447 def addrevision(self, text, transaction, linkrev, p1, p2, node=None,
451 def addrevision(self, text, transaction, linkrev, p1, p2, node=None,
448 flags=revlog.REVIDX_DEFAULT_FLAGS, cachedelta=None):
452 flags=revlog.REVIDX_DEFAULT_FLAGS, cachedelta=None):
449 validatenode(p1)
453 validatenode(p1)
450 validatenode(p2)
454 validatenode(p2)
451
455
452 if flags:
456 if flags:
453 node = node or storageutil.hashrevisionsha1(text, p1, p2)
457 node = node or storageutil.hashrevisionsha1(text, p1, p2)
454
458
455 rawtext, validatehash = self._processflagswrite(text, flags)
459 rawtext, validatehash = self._processflagswrite(text, flags)
456
460
457 node = node or storageutil.hashrevisionsha1(text, p1, p2)
461 node = node or storageutil.hashrevisionsha1(text, p1, p2)
458
462
459 if node in self._indexbynode:
463 if node in self._indexbynode:
460 return node
464 return node
461
465
462 if validatehash:
466 if validatehash:
463 self.checkhash(rawtext, node, p1=p1, p2=p2)
467 self.checkhash(rawtext, node, p1=p1, p2=p2)
464
468
465 return self._addrawrevision(node, rawtext, transaction, linkrev, p1, p2,
469 return self._addrawrevision(node, rawtext, transaction, linkrev, p1, p2,
466 flags)
470 flags)
467
471
468 def _addrawrevision(self, node, rawtext, transaction, link, p1, p2, flags):
472 def _addrawrevision(self, node, rawtext, transaction, link, p1, p2, flags):
469 transaction.addbackup(self._indexpath)
473 transaction.addbackup(self._indexpath)
470
474
471 path = b'/'.join([self._storepath, hex(node)])
475 path = b'/'.join([self._storepath, hex(node)])
472
476
473 self._svfs.write(path, rawtext)
477 self._svfs.write(path, rawtext)
474
478
475 self._indexdata.append({
479 self._indexdata.append({
476 b'node': node,
480 b'node': node,
477 b'p1': p1,
481 b'p1': p1,
478 b'p2': p2,
482 b'p2': p2,
479 b'linkrev': link,
483 b'linkrev': link,
480 b'flags': flags,
484 b'flags': flags,
481 })
485 })
482
486
483 self._reflectindexupdate()
487 self._reflectindexupdate()
484
488
485 return node
489 return node
486
490
487 def _reflectindexupdate(self):
491 def _reflectindexupdate(self):
488 self._refreshindex()
492 self._refreshindex()
489 self._svfs.write(self._indexpath,
493 self._svfs.write(self._indexpath,
490 ''.join(cborutil.streamencode(self._indexdata)))
494 ''.join(cborutil.streamencode(self._indexdata)))
491
495
492 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None,
496 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None,
493 maybemissingparents=False):
497 maybemissingparents=False):
494 if maybemissingparents:
498 if maybemissingparents:
495 raise error.Abort(_('simple store does not support missing parents '
499 raise error.Abort(_('simple store does not support missing parents '
496 'write mode'))
500 'write mode'))
497
501
498 nodes = []
502 nodes = []
499
503
500 transaction.addbackup(self._indexpath)
504 transaction.addbackup(self._indexpath)
501
505
502 for node, p1, p2, linknode, deltabase, delta, flags in deltas:
506 for node, p1, p2, linknode, deltabase, delta, flags in deltas:
503 linkrev = linkmapper(linknode)
507 linkrev = linkmapper(linknode)
504 flags = flags or revlog.REVIDX_DEFAULT_FLAGS
508 flags = flags or revlog.REVIDX_DEFAULT_FLAGS
505
509
506 nodes.append(node)
510 nodes.append(node)
507
511
508 if node in self._indexbynode:
512 if node in self._indexbynode:
509 continue
513 continue
510
514
511 # Need to resolve the fulltext from the delta base.
515 # Need to resolve the fulltext from the delta base.
512 if deltabase == nullid:
516 if deltabase == nullid:
513 text = mdiff.patch(b'', delta)
517 text = mdiff.patch(b'', delta)
514 else:
518 else:
515 text = mdiff.patch(self.revision(deltabase), delta)
519 text = mdiff.patch(self.revision(deltabase), delta)
516
520
517 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
521 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
518 flags)
522 flags)
519
523
520 if addrevisioncb:
524 if addrevisioncb:
521 addrevisioncb(self, node)
525 addrevisioncb(self, node)
522 return nodes
526 return nodes
523
527
524 def _headrevs(self):
528 def _headrevs(self):
525 # Assume all revisions are heads by default.
529 # Assume all revisions are heads by default.
526 revishead = {rev: True for rev in self._indexbyrev}
530 revishead = {rev: True for rev in self._indexbyrev}
527
531
528 for rev, entry in self._indexbyrev.items():
532 for rev, entry in self._indexbyrev.items():
529 # Unset head flag for all seen parents.
533 # Unset head flag for all seen parents.
530 revishead[self.rev(entry[b'p1'])] = False
534 revishead[self.rev(entry[b'p1'])] = False
531 revishead[self.rev(entry[b'p2'])] = False
535 revishead[self.rev(entry[b'p2'])] = False
532
536
533 return [rev for rev, ishead in sorted(revishead.items())
537 return [rev for rev, ishead in sorted(revishead.items())
534 if ishead]
538 if ishead]
535
539
536 def heads(self, start=None, stop=None):
540 def heads(self, start=None, stop=None):
537 # This is copied from revlog.py.
541 # This is copied from revlog.py.
538 if start is None and stop is None:
542 if start is None and stop is None:
539 if not len(self):
543 if not len(self):
540 return [nullid]
544 return [nullid]
541 return [self.node(r) for r in self._headrevs()]
545 return [self.node(r) for r in self._headrevs()]
542
546
543 if start is None:
547 if start is None:
544 start = nullid
548 start = nullid
545 if stop is None:
549 if stop is None:
546 stop = []
550 stop = []
547 stoprevs = set([self.rev(n) for n in stop])
551 stoprevs = set([self.rev(n) for n in stop])
548 startrev = self.rev(start)
552 startrev = self.rev(start)
549 reachable = {startrev}
553 reachable = {startrev}
550 heads = {startrev}
554 heads = {startrev}
551
555
552 parentrevs = self.parentrevs
556 parentrevs = self.parentrevs
553 for r in self.revs(start=startrev + 1):
557 for r in self.revs(start=startrev + 1):
554 for p in parentrevs(r):
558 for p in parentrevs(r):
555 if p in reachable:
559 if p in reachable:
556 if r not in stoprevs:
560 if r not in stoprevs:
557 reachable.add(r)
561 reachable.add(r)
558 heads.add(r)
562 heads.add(r)
559 if p in heads and p not in stoprevs:
563 if p in heads and p not in stoprevs:
560 heads.remove(p)
564 heads.remove(p)
561
565
562 return [self.node(r) for r in heads]
566 return [self.node(r) for r in heads]
563
567
564 def children(self, node):
568 def children(self, node):
565 validatenode(node)
569 validatenode(node)
566
570
567 # This is a copy of revlog.children().
571 # This is a copy of revlog.children().
568 c = []
572 c = []
569 p = self.rev(node)
573 p = self.rev(node)
570 for r in self.revs(start=p + 1):
574 for r in self.revs(start=p + 1):
571 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
575 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
572 if prevs:
576 if prevs:
573 for pr in prevs:
577 for pr in prevs:
574 if pr == p:
578 if pr == p:
575 c.append(self.node(r))
579 c.append(self.node(r))
576 elif p == nullrev:
580 elif p == nullrev:
577 c.append(self.node(r))
581 c.append(self.node(r))
578 return c
582 return c
579
583
580 def getstrippoint(self, minlink):
584 def getstrippoint(self, minlink):
581 return storageutil.resolvestripinfo(
585 return storageutil.resolvestripinfo(
582 minlink, len(self) - 1, self._headrevs(), self.linkrev,
586 minlink, len(self) - 1, self._headrevs(), self.linkrev,
583 self.parentrevs)
587 self.parentrevs)
584
588
585 def strip(self, minlink, transaction):
589 def strip(self, minlink, transaction):
586 if not len(self):
590 if not len(self):
587 return
591 return
588
592
589 rev, _ignored = self.getstrippoint(minlink)
593 rev, _ignored = self.getstrippoint(minlink)
590 if rev == len(self):
594 if rev == len(self):
591 return
595 return
592
596
593 # Purge index data starting at the requested revision.
597 # Purge index data starting at the requested revision.
594 self._indexdata[rev:] = []
598 self._indexdata[rev:] = []
595 self._reflectindexupdate()
599 self._reflectindexupdate()
596
600
597 def issimplestorefile(f, kind, st):
601 def issimplestorefile(f, kind, st):
598 if kind != stat.S_IFREG:
602 if kind != stat.S_IFREG:
599 return False
603 return False
600
604
601 if store.isrevlog(f, kind, st):
605 if store.isrevlog(f, kind, st):
602 return False
606 return False
603
607
604 # Ignore transaction undo files.
608 # Ignore transaction undo files.
605 if f.startswith('undo.'):
609 if f.startswith('undo.'):
606 return False
610 return False
607
611
608 # Otherwise assume it belongs to the simple store.
612 # Otherwise assume it belongs to the simple store.
609 return True
613 return True
610
614
611 class simplestore(store.encodedstore):
615 class simplestore(store.encodedstore):
612 def datafiles(self):
616 def datafiles(self):
613 for x in super(simplestore, self).datafiles():
617 for x in super(simplestore, self).datafiles():
614 yield x
618 yield x
615
619
616 # Supplement with non-revlog files.
620 # Supplement with non-revlog files.
617 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
621 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
618
622
619 for unencoded, encoded, size in extrafiles:
623 for unencoded, encoded, size in extrafiles:
620 try:
624 try:
621 unencoded = store.decodefilename(unencoded)
625 unencoded = store.decodefilename(unencoded)
622 except KeyError:
626 except KeyError:
623 unencoded = None
627 unencoded = None
624
628
625 yield unencoded, encoded, size
629 yield unencoded, encoded, size
626
630
627 def reposetup(ui, repo):
631 def reposetup(ui, repo):
628 if not repo.local():
632 if not repo.local():
629 return
633 return
630
634
631 if isinstance(repo, bundlerepo.bundlerepository):
635 if isinstance(repo, bundlerepo.bundlerepository):
632 raise error.Abort(_('cannot use simple store with bundlerepo'))
636 raise error.Abort(_('cannot use simple store with bundlerepo'))
633
637
634 class simplestorerepo(repo.__class__):
638 class simplestorerepo(repo.__class__):
635 def file(self, f):
639 def file(self, f):
636 return filestorage(self.svfs, f)
640 return filestorage(self.svfs, f)
637
641
638 repo.__class__ = simplestorerepo
642 repo.__class__ = simplestorerepo
639
643
640 def featuresetup(ui, supported):
644 def featuresetup(ui, supported):
641 supported.add(REQUIREMENT)
645 supported.add(REQUIREMENT)
642
646
643 def newreporequirements(orig, ui, createopts):
647 def newreporequirements(orig, ui, createopts):
644 """Modifies default requirements for new repos to use the simple store."""
648 """Modifies default requirements for new repos to use the simple store."""
645 requirements = orig(ui, createopts)
649 requirements = orig(ui, createopts)
646
650
647 # These requirements are only used to affect creation of the store
651 # These requirements are only used to affect creation of the store
648 # object. We have our own store. So we can remove them.
652 # object. We have our own store. So we can remove them.
649 # TODO do this once we feel like taking the test hit.
653 # TODO do this once we feel like taking the test hit.
650 #if 'fncache' in requirements:
654 #if 'fncache' in requirements:
651 # requirements.remove('fncache')
655 # requirements.remove('fncache')
652 #if 'dotencode' in requirements:
656 #if 'dotencode' in requirements:
653 # requirements.remove('dotencode')
657 # requirements.remove('dotencode')
654
658
655 requirements.add(REQUIREMENT)
659 requirements.add(REQUIREMENT)
656
660
657 return requirements
661 return requirements
658
662
659 def makestore(orig, requirements, path, vfstype):
663 def makestore(orig, requirements, path, vfstype):
660 if REQUIREMENT not in requirements:
664 if REQUIREMENT not in requirements:
661 return orig(requirements, path, vfstype)
665 return orig(requirements, path, vfstype)
662
666
663 return simplestore(path, vfstype)
667 return simplestore(path, vfstype)
664
668
665 def verifierinit(orig, self, *args, **kwargs):
669 def verifierinit(orig, self, *args, **kwargs):
666 orig(self, *args, **kwargs)
670 orig(self, *args, **kwargs)
667
671
668 # We don't care that files in the store don't align with what is
672 # We don't care that files in the store don't align with what is
669 # advertised. So suppress these warnings.
673 # advertised. So suppress these warnings.
670 self.warnorphanstorefiles = False
674 self.warnorphanstorefiles = False
671
675
672 def extsetup(ui):
676 def extsetup(ui):
673 localrepo.featuresetupfuncs.add(featuresetup)
677 localrepo.featuresetupfuncs.add(featuresetup)
674
678
675 extensions.wrapfunction(localrepo, 'newreporequirements',
679 extensions.wrapfunction(localrepo, 'newreporequirements',
676 newreporequirements)
680 newreporequirements)
677 extensions.wrapfunction(localrepo, 'makestore', makestore)
681 extensions.wrapfunction(localrepo, 'makestore', makestore)
678 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
682 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
General Comments 0
You need to be logged in to leave comments. Login now