##// END OF EJS Templates
flagutil: use it in simplestorerepo...
marmoute -
r43143:705428da default
parent child Browse files
Show More
@@ -1,713 +1,678 b''
1 # simplestorerepo.py - Extension that swaps in alternate repository storage.
1 # simplestorerepo.py - Extension that swaps in alternate repository storage.
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 # To use this with the test suite:
8 # To use this with the test suite:
9 #
9 #
10 # $ HGREPOFEATURES="simplestore" ./run-tests.py \
10 # $ HGREPOFEATURES="simplestore" ./run-tests.py \
11 # --extra-config-opt extensions.simplestore=`pwd`/simplestorerepo.py
11 # --extra-config-opt extensions.simplestore=`pwd`/simplestorerepo.py
12
12
13 from __future__ import absolute_import
13 from __future__ import absolute_import
14
14
15 import stat
15 import stat
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial.node import (
18 from mercurial.node import (
19 bin,
19 bin,
20 hex,
20 hex,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 )
23 )
24 from mercurial.thirdparty import (
24 from mercurial.thirdparty import (
25 attr,
25 attr,
26 )
26 )
27 from mercurial import (
27 from mercurial import (
28 ancestor,
28 ancestor,
29 bundlerepo,
29 bundlerepo,
30 error,
30 error,
31 extensions,
31 extensions,
32 localrepo,
32 localrepo,
33 mdiff,
33 mdiff,
34 pycompat,
34 pycompat,
35 revlog,
35 revlog,
36 store,
36 store,
37 verify,
37 verify,
38 )
38 )
39 from mercurial.interfaces import (
39 from mercurial.interfaces import (
40 repository,
40 repository,
41 util as interfaceutil,
41 util as interfaceutil,
42 )
42 )
43 from mercurial.utils import (
43 from mercurial.utils import (
44 cborutil,
44 cborutil,
45 storageutil,
45 storageutil,
46 )
46 )
47 from mercurial.revlogutils import (
47 from mercurial.revlogutils import (
48 flagutil,
48 flagutil,
49 )
49 )
50
50
51 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
51 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
52 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
52 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
53 # be specifying the version(s) of Mercurial they are tested with, or
53 # be specifying the version(s) of Mercurial they are tested with, or
54 # leave the attribute unspecified.
54 # leave the attribute unspecified.
55 testedwith = 'ships-with-hg-core'
55 testedwith = 'ships-with-hg-core'
56
56
57 REQUIREMENT = 'testonly-simplestore'
57 REQUIREMENT = 'testonly-simplestore'
58
58
59 def validatenode(node):
59 def validatenode(node):
60 if isinstance(node, int):
60 if isinstance(node, int):
61 raise ValueError('expected node; got int')
61 raise ValueError('expected node; got int')
62
62
63 if len(node) != 20:
63 if len(node) != 20:
64 raise ValueError('expected 20 byte node')
64 raise ValueError('expected 20 byte node')
65
65
66 def validaterev(rev):
66 def validaterev(rev):
67 if not isinstance(rev, int):
67 if not isinstance(rev, int):
68 raise ValueError('expected int')
68 raise ValueError('expected int')
69
69
70 class simplestoreerror(error.StorageError):
70 class simplestoreerror(error.StorageError):
71 pass
71 pass
72
72
73 @interfaceutil.implementer(repository.irevisiondelta)
73 @interfaceutil.implementer(repository.irevisiondelta)
74 @attr.s(slots=True)
74 @attr.s(slots=True)
75 class simplestorerevisiondelta(object):
75 class simplestorerevisiondelta(object):
76 node = attr.ib()
76 node = attr.ib()
77 p1node = attr.ib()
77 p1node = attr.ib()
78 p2node = attr.ib()
78 p2node = attr.ib()
79 basenode = attr.ib()
79 basenode = attr.ib()
80 flags = attr.ib()
80 flags = attr.ib()
81 baserevisionsize = attr.ib()
81 baserevisionsize = attr.ib()
82 revision = attr.ib()
82 revision = attr.ib()
83 delta = attr.ib()
83 delta = attr.ib()
84 linknode = attr.ib(default=None)
84 linknode = attr.ib(default=None)
85
85
86 @interfaceutil.implementer(repository.iverifyproblem)
86 @interfaceutil.implementer(repository.iverifyproblem)
87 @attr.s(frozen=True)
87 @attr.s(frozen=True)
88 class simplefilestoreproblem(object):
88 class simplefilestoreproblem(object):
89 warning = attr.ib(default=None)
89 warning = attr.ib(default=None)
90 error = attr.ib(default=None)
90 error = attr.ib(default=None)
91 node = attr.ib(default=None)
91 node = attr.ib(default=None)
92
92
93 @interfaceutil.implementer(repository.ifilestorage)
93 @interfaceutil.implementer(repository.ifilestorage)
94 class filestorage(object):
94 class filestorage(flagutil.flagprocessorsmixin):
95 """Implements storage for a tracked path.
95 """Implements storage for a tracked path.
96
96
97 Data is stored in the VFS in a directory corresponding to the tracked
97 Data is stored in the VFS in a directory corresponding to the tracked
98 path.
98 path.
99
99
100 Index data is stored in an ``index`` file using CBOR.
100 Index data is stored in an ``index`` file using CBOR.
101
101
102 Fulltext data is stored in files having names of the node.
102 Fulltext data is stored in files having names of the node.
103 """
103 """
104
104
105 _flagserrorclass = simplestoreerror
106
105 def __init__(self, svfs, path):
107 def __init__(self, svfs, path):
106 self._svfs = svfs
108 self._svfs = svfs
107 self._path = path
109 self._path = path
108
110
109 self._storepath = b'/'.join([b'data', path])
111 self._storepath = b'/'.join([b'data', path])
110 self._indexpath = b'/'.join([self._storepath, b'index'])
112 self._indexpath = b'/'.join([self._storepath, b'index'])
111
113
112 indexdata = self._svfs.tryread(self._indexpath)
114 indexdata = self._svfs.tryread(self._indexpath)
113 if indexdata:
115 if indexdata:
114 indexdata = cborutil.decodeall(indexdata)
116 indexdata = cborutil.decodeall(indexdata)
115
117
116 self._indexdata = indexdata or []
118 self._indexdata = indexdata or []
117 self._indexbynode = {}
119 self._indexbynode = {}
118 self._indexbyrev = {}
120 self._indexbyrev = {}
119 self._index = []
121 self._index = []
120 self._refreshindex()
122 self._refreshindex()
121
123
124 self._flagprocessors = dict(flagutil.flagprocessors)
125
122 def _refreshindex(self):
126 def _refreshindex(self):
123 self._indexbynode.clear()
127 self._indexbynode.clear()
124 self._indexbyrev.clear()
128 self._indexbyrev.clear()
125 self._index = []
129 self._index = []
126
130
127 for i, entry in enumerate(self._indexdata):
131 for i, entry in enumerate(self._indexdata):
128 self._indexbynode[entry[b'node']] = entry
132 self._indexbynode[entry[b'node']] = entry
129 self._indexbyrev[i] = entry
133 self._indexbyrev[i] = entry
130
134
131 self._indexbynode[nullid] = {
135 self._indexbynode[nullid] = {
132 b'node': nullid,
136 b'node': nullid,
133 b'p1': nullid,
137 b'p1': nullid,
134 b'p2': nullid,
138 b'p2': nullid,
135 b'linkrev': nullrev,
139 b'linkrev': nullrev,
136 b'flags': 0,
140 b'flags': 0,
137 }
141 }
138
142
139 self._indexbyrev[nullrev] = {
143 self._indexbyrev[nullrev] = {
140 b'node': nullid,
144 b'node': nullid,
141 b'p1': nullid,
145 b'p1': nullid,
142 b'p2': nullid,
146 b'p2': nullid,
143 b'linkrev': nullrev,
147 b'linkrev': nullrev,
144 b'flags': 0,
148 b'flags': 0,
145 }
149 }
146
150
147 for i, entry in enumerate(self._indexdata):
151 for i, entry in enumerate(self._indexdata):
148 p1rev, p2rev = self.parentrevs(self.rev(entry[b'node']))
152 p1rev, p2rev = self.parentrevs(self.rev(entry[b'node']))
149
153
150 # start, length, rawsize, chainbase, linkrev, p1, p2, node
154 # start, length, rawsize, chainbase, linkrev, p1, p2, node
151 self._index.append((0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev,
155 self._index.append((0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev,
152 entry[b'node']))
156 entry[b'node']))
153
157
154 self._index.append((0, 0, 0, -1, -1, -1, -1, nullid))
158 self._index.append((0, 0, 0, -1, -1, -1, -1, nullid))
155
159
156 def __len__(self):
160 def __len__(self):
157 return len(self._indexdata)
161 return len(self._indexdata)
158
162
159 def __iter__(self):
163 def __iter__(self):
160 return iter(range(len(self)))
164 return iter(range(len(self)))
161
165
162 def revs(self, start=0, stop=None):
166 def revs(self, start=0, stop=None):
163 step = 1
167 step = 1
164 if stop is not None:
168 if stop is not None:
165 if start > stop:
169 if start > stop:
166 step = -1
170 step = -1
167
171
168 stop += step
172 stop += step
169 else:
173 else:
170 stop = len(self)
174 stop = len(self)
171
175
172 return range(start, stop, step)
176 return range(start, stop, step)
173
177
174 def parents(self, node):
178 def parents(self, node):
175 validatenode(node)
179 validatenode(node)
176
180
177 if node not in self._indexbynode:
181 if node not in self._indexbynode:
178 raise KeyError('unknown node')
182 raise KeyError('unknown node')
179
183
180 entry = self._indexbynode[node]
184 entry = self._indexbynode[node]
181
185
182 return entry[b'p1'], entry[b'p2']
186 return entry[b'p1'], entry[b'p2']
183
187
184 def parentrevs(self, rev):
188 def parentrevs(self, rev):
185 p1, p2 = self.parents(self._indexbyrev[rev][b'node'])
189 p1, p2 = self.parents(self._indexbyrev[rev][b'node'])
186 return self.rev(p1), self.rev(p2)
190 return self.rev(p1), self.rev(p2)
187
191
188 def rev(self, node):
192 def rev(self, node):
189 validatenode(node)
193 validatenode(node)
190
194
191 try:
195 try:
192 self._indexbynode[node]
196 self._indexbynode[node]
193 except KeyError:
197 except KeyError:
194 raise error.LookupError(node, self._indexpath, _('no node'))
198 raise error.LookupError(node, self._indexpath, _('no node'))
195
199
196 for rev, entry in self._indexbyrev.items():
200 for rev, entry in self._indexbyrev.items():
197 if entry[b'node'] == node:
201 if entry[b'node'] == node:
198 return rev
202 return rev
199
203
200 raise error.ProgrammingError('this should not occur')
204 raise error.ProgrammingError('this should not occur')
201
205
202 def node(self, rev):
206 def node(self, rev):
203 validaterev(rev)
207 validaterev(rev)
204
208
205 return self._indexbyrev[rev][b'node']
209 return self._indexbyrev[rev][b'node']
206
210
207 def hasnode(self, node):
211 def hasnode(self, node):
208 validatenode(node)
212 validatenode(node)
209 return node in self._indexbynode
213 return node in self._indexbynode
210
214
211 def censorrevision(self, tr, censornode, tombstone=b''):
215 def censorrevision(self, tr, censornode, tombstone=b''):
212 raise NotImplementedError('TODO')
216 raise NotImplementedError('TODO')
213
217
214 def lookup(self, node):
218 def lookup(self, node):
215 if isinstance(node, int):
219 if isinstance(node, int):
216 return self.node(node)
220 return self.node(node)
217
221
218 if len(node) == 20:
222 if len(node) == 20:
219 self.rev(node)
223 self.rev(node)
220 return node
224 return node
221
225
222 try:
226 try:
223 rev = int(node)
227 rev = int(node)
224 if '%d' % rev != node:
228 if '%d' % rev != node:
225 raise ValueError
229 raise ValueError
226
230
227 if rev < 0:
231 if rev < 0:
228 rev = len(self) + rev
232 rev = len(self) + rev
229 if rev < 0 or rev >= len(self):
233 if rev < 0 or rev >= len(self):
230 raise ValueError
234 raise ValueError
231
235
232 return self.node(rev)
236 return self.node(rev)
233 except (ValueError, OverflowError):
237 except (ValueError, OverflowError):
234 pass
238 pass
235
239
236 if len(node) == 40:
240 if len(node) == 40:
237 try:
241 try:
238 rawnode = bin(node)
242 rawnode = bin(node)
239 self.rev(rawnode)
243 self.rev(rawnode)
240 return rawnode
244 return rawnode
241 except TypeError:
245 except TypeError:
242 pass
246 pass
243
247
244 raise error.LookupError(node, self._path, _('invalid lookup input'))
248 raise error.LookupError(node, self._path, _('invalid lookup input'))
245
249
246 def linkrev(self, rev):
250 def linkrev(self, rev):
247 validaterev(rev)
251 validaterev(rev)
248
252
249 return self._indexbyrev[rev][b'linkrev']
253 return self._indexbyrev[rev][b'linkrev']
250
254
251 def _flags(self, rev):
255 def _flags(self, rev):
252 validaterev(rev)
256 validaterev(rev)
253
257
254 return self._indexbyrev[rev][b'flags']
258 return self._indexbyrev[rev][b'flags']
255
259
256 def _candelta(self, baserev, rev):
260 def _candelta(self, baserev, rev):
257 validaterev(baserev)
261 validaterev(baserev)
258 validaterev(rev)
262 validaterev(rev)
259
263
260 if ((self._flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)
264 if ((self._flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)
261 or (self._flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)):
265 or (self._flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)):
262 return False
266 return False
263
267
264 return True
268 return True
265
269
266 def _processflags(self, text, flags, operation, raw=False):
267 if flags == 0:
268 return text, True
269
270 if flags & ~flagutil.REVIDX_KNOWN_FLAGS:
271 raise simplestoreerror(_("incompatible revision flag '%#x'") %
272 (flags & ~flagutil.REVIDX_KNOWN_FLAGS))
273
274 validatehash = True
275 # Depending on the operation (read or write), the order might be
276 # reversed due to non-commutative transforms.
277 orderedflags = revlog.REVIDX_FLAGS_ORDER
278 if operation == 'write':
279 orderedflags = reversed(orderedflags)
280
281 for flag in orderedflags:
282 # If a flagprocessor has been registered for a known flag, apply the
283 # related operation transform and update result tuple.
284 if flag & flags:
285 vhash = True
286
287 if flag not in revlog._flagprocessors:
288 message = _("missing processor for flag '%#x'") % (flag)
289 raise simplestoreerror(message)
290
291 processor = revlog._flagprocessors[flag]
292 if processor is not None:
293 readtransform, writetransform, rawtransform = processor
294
295 if raw:
296 vhash = rawtransform(self, text)
297 elif operation == 'read':
298 text, vhash = readtransform(self, text)
299 else: # write operation
300 text, vhash = writetransform(self, text)
301 validatehash = validatehash and vhash
302
303 return text, validatehash
304
305 def checkhash(self, text, node, p1=None, p2=None, rev=None):
270 def checkhash(self, text, node, p1=None, p2=None, rev=None):
306 if p1 is None and p2 is None:
271 if p1 is None and p2 is None:
307 p1, p2 = self.parents(node)
272 p1, p2 = self.parents(node)
308 if node != storageutil.hashrevisionsha1(text, p1, p2):
273 if node != storageutil.hashrevisionsha1(text, p1, p2):
309 raise simplestoreerror(_("integrity check failed on %s") %
274 raise simplestoreerror(_("integrity check failed on %s") %
310 self._path)
275 self._path)
311
276
312 def revision(self, nodeorrev, raw=False):
277 def revision(self, nodeorrev, raw=False):
313 if isinstance(nodeorrev, int):
278 if isinstance(nodeorrev, int):
314 node = self.node(nodeorrev)
279 node = self.node(nodeorrev)
315 else:
280 else:
316 node = nodeorrev
281 node = nodeorrev
317 validatenode(node)
282 validatenode(node)
318
283
319 if node == nullid:
284 if node == nullid:
320 return b''
285 return b''
321
286
322 rev = self.rev(node)
287 rev = self.rev(node)
323 flags = self._flags(rev)
288 flags = self._flags(rev)
324
289
325 path = b'/'.join([self._storepath, hex(node)])
290 path = b'/'.join([self._storepath, hex(node)])
326 rawtext = self._svfs.read(path)
291 rawtext = self._svfs.read(path)
327
292
328 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
293 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
329 if validatehash:
294 if validatehash:
330 self.checkhash(text, node, rev=rev)
295 self.checkhash(text, node, rev=rev)
331
296
332 return text
297 return text
333
298
334 def rawdata(self, nodeorrev):
299 def rawdata(self, nodeorrev):
335 return self.revision(raw=True)
300 return self.revision(raw=True)
336
301
337 def read(self, node):
302 def read(self, node):
338 validatenode(node)
303 validatenode(node)
339
304
340 revision = self.revision(node)
305 revision = self.revision(node)
341
306
342 if not revision.startswith(b'\1\n'):
307 if not revision.startswith(b'\1\n'):
343 return revision
308 return revision
344
309
345 start = revision.index(b'\1\n', 2)
310 start = revision.index(b'\1\n', 2)
346 return revision[start + 2:]
311 return revision[start + 2:]
347
312
348 def renamed(self, node):
313 def renamed(self, node):
349 validatenode(node)
314 validatenode(node)
350
315
351 if self.parents(node)[0] != nullid:
316 if self.parents(node)[0] != nullid:
352 return False
317 return False
353
318
354 fulltext = self.revision(node)
319 fulltext = self.revision(node)
355 m = storageutil.parsemeta(fulltext)[0]
320 m = storageutil.parsemeta(fulltext)[0]
356
321
357 if m and 'copy' in m:
322 if m and 'copy' in m:
358 return m['copy'], bin(m['copyrev'])
323 return m['copy'], bin(m['copyrev'])
359
324
360 return False
325 return False
361
326
362 def cmp(self, node, text):
327 def cmp(self, node, text):
363 validatenode(node)
328 validatenode(node)
364
329
365 t = text
330 t = text
366
331
367 if text.startswith(b'\1\n'):
332 if text.startswith(b'\1\n'):
368 t = b'\1\n\1\n' + text
333 t = b'\1\n\1\n' + text
369
334
370 p1, p2 = self.parents(node)
335 p1, p2 = self.parents(node)
371
336
372 if storageutil.hashrevisionsha1(t, p1, p2) == node:
337 if storageutil.hashrevisionsha1(t, p1, p2) == node:
373 return False
338 return False
374
339
375 if self.iscensored(self.rev(node)):
340 if self.iscensored(self.rev(node)):
376 return text != b''
341 return text != b''
377
342
378 if self.renamed(node):
343 if self.renamed(node):
379 t2 = self.read(node)
344 t2 = self.read(node)
380 return t2 != text
345 return t2 != text
381
346
382 return True
347 return True
383
348
384 def size(self, rev):
349 def size(self, rev):
385 validaterev(rev)
350 validaterev(rev)
386
351
387 node = self._indexbyrev[rev][b'node']
352 node = self._indexbyrev[rev][b'node']
388
353
389 if self.renamed(node):
354 if self.renamed(node):
390 return len(self.read(node))
355 return len(self.read(node))
391
356
392 if self.iscensored(rev):
357 if self.iscensored(rev):
393 return 0
358 return 0
394
359
395 return len(self.revision(node))
360 return len(self.revision(node))
396
361
397 def iscensored(self, rev):
362 def iscensored(self, rev):
398 validaterev(rev)
363 validaterev(rev)
399
364
400 return self._flags(rev) & repository.REVISION_FLAG_CENSORED
365 return self._flags(rev) & repository.REVISION_FLAG_CENSORED
401
366
402 def commonancestorsheads(self, a, b):
367 def commonancestorsheads(self, a, b):
403 validatenode(a)
368 validatenode(a)
404 validatenode(b)
369 validatenode(b)
405
370
406 a = self.rev(a)
371 a = self.rev(a)
407 b = self.rev(b)
372 b = self.rev(b)
408
373
409 ancestors = ancestor.commonancestorsheads(self.parentrevs, a, b)
374 ancestors = ancestor.commonancestorsheads(self.parentrevs, a, b)
410 return pycompat.maplist(self.node, ancestors)
375 return pycompat.maplist(self.node, ancestors)
411
376
412 def descendants(self, revs):
377 def descendants(self, revs):
413 # This is a copy of revlog.descendants()
378 # This is a copy of revlog.descendants()
414 first = min(revs)
379 first = min(revs)
415 if first == nullrev:
380 if first == nullrev:
416 for i in self:
381 for i in self:
417 yield i
382 yield i
418 return
383 return
419
384
420 seen = set(revs)
385 seen = set(revs)
421 for i in self.revs(start=first + 1):
386 for i in self.revs(start=first + 1):
422 for x in self.parentrevs(i):
387 for x in self.parentrevs(i):
423 if x != nullrev and x in seen:
388 if x != nullrev and x in seen:
424 seen.add(i)
389 seen.add(i)
425 yield i
390 yield i
426 break
391 break
427
392
428 # Required by verify.
393 # Required by verify.
429 def files(self):
394 def files(self):
430 entries = self._svfs.listdir(self._storepath)
395 entries = self._svfs.listdir(self._storepath)
431
396
432 # Strip out undo.backup.* files created as part of transaction
397 # Strip out undo.backup.* files created as part of transaction
433 # recording.
398 # recording.
434 entries = [f for f in entries if not f.startswith('undo.backup.')]
399 entries = [f for f in entries if not f.startswith('undo.backup.')]
435
400
436 return [b'/'.join((self._storepath, f)) for f in entries]
401 return [b'/'.join((self._storepath, f)) for f in entries]
437
402
438 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
403 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
439 revisionscount=False, trackedsize=False,
404 revisionscount=False, trackedsize=False,
440 storedsize=False):
405 storedsize=False):
441 # TODO do a real implementation of this
406 # TODO do a real implementation of this
442 return {
407 return {
443 'exclusivefiles': [],
408 'exclusivefiles': [],
444 'sharedfiles': [],
409 'sharedfiles': [],
445 'revisionscount': len(self),
410 'revisionscount': len(self),
446 'trackedsize': 0,
411 'trackedsize': 0,
447 'storedsize': None,
412 'storedsize': None,
448 }
413 }
449
414
450 def verifyintegrity(self, state):
415 def verifyintegrity(self, state):
451 state['skipread'] = set()
416 state['skipread'] = set()
452 for rev in self:
417 for rev in self:
453 node = self.node(rev)
418 node = self.node(rev)
454 try:
419 try:
455 self.revision(node)
420 self.revision(node)
456 except Exception as e:
421 except Exception as e:
457 yield simplefilestoreproblem(
422 yield simplefilestoreproblem(
458 error='unpacking %s: %s' % (node, e),
423 error='unpacking %s: %s' % (node, e),
459 node=node)
424 node=node)
460 state['skipread'].add(node)
425 state['skipread'].add(node)
461
426
462 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
427 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
463 assumehaveparentrevisions=False,
428 assumehaveparentrevisions=False,
464 deltamode=repository.CG_DELTAMODE_STD):
429 deltamode=repository.CG_DELTAMODE_STD):
465 # TODO this will probably break on some ordering options.
430 # TODO this will probably break on some ordering options.
466 nodes = [n for n in nodes if n != nullid]
431 nodes = [n for n in nodes if n != nullid]
467 if not nodes:
432 if not nodes:
468 return
433 return
469 for delta in storageutil.emitrevisions(
434 for delta in storageutil.emitrevisions(
470 self, nodes, nodesorder, simplestorerevisiondelta,
435 self, nodes, nodesorder, simplestorerevisiondelta,
471 revisiondata=revisiondata,
436 revisiondata=revisiondata,
472 assumehaveparentrevisions=assumehaveparentrevisions,
437 assumehaveparentrevisions=assumehaveparentrevisions,
473 deltamode=deltamode):
438 deltamode=deltamode):
474 yield delta
439 yield delta
475
440
476 def add(self, text, meta, transaction, linkrev, p1, p2):
441 def add(self, text, meta, transaction, linkrev, p1, p2):
477 if meta or text.startswith(b'\1\n'):
442 if meta or text.startswith(b'\1\n'):
478 text = storageutil.packmeta(meta, text)
443 text = storageutil.packmeta(meta, text)
479
444
480 return self.addrevision(text, transaction, linkrev, p1, p2)
445 return self.addrevision(text, transaction, linkrev, p1, p2)
481
446
482 def addrevision(self, text, transaction, linkrev, p1, p2, node=None,
447 def addrevision(self, text, transaction, linkrev, p1, p2, node=None,
483 flags=revlog.REVIDX_DEFAULT_FLAGS, cachedelta=None):
448 flags=revlog.REVIDX_DEFAULT_FLAGS, cachedelta=None):
484 validatenode(p1)
449 validatenode(p1)
485 validatenode(p2)
450 validatenode(p2)
486
451
487 if flags:
452 if flags:
488 node = node or storageutil.hashrevisionsha1(text, p1, p2)
453 node = node or storageutil.hashrevisionsha1(text, p1, p2)
489
454
490 rawtext, validatehash = self._processflags(text, flags, 'write')
455 rawtext, validatehash = self._processflags(text, flags, 'write')
491
456
492 node = node or storageutil.hashrevisionsha1(text, p1, p2)
457 node = node or storageutil.hashrevisionsha1(text, p1, p2)
493
458
494 if node in self._indexbynode:
459 if node in self._indexbynode:
495 return node
460 return node
496
461
497 if validatehash:
462 if validatehash:
498 self.checkhash(rawtext, node, p1=p1, p2=p2)
463 self.checkhash(rawtext, node, p1=p1, p2=p2)
499
464
500 return self._addrawrevision(node, rawtext, transaction, linkrev, p1, p2,
465 return self._addrawrevision(node, rawtext, transaction, linkrev, p1, p2,
501 flags)
466 flags)
502
467
503 def _addrawrevision(self, node, rawtext, transaction, link, p1, p2, flags):
468 def _addrawrevision(self, node, rawtext, transaction, link, p1, p2, flags):
504 transaction.addbackup(self._indexpath)
469 transaction.addbackup(self._indexpath)
505
470
506 path = b'/'.join([self._storepath, hex(node)])
471 path = b'/'.join([self._storepath, hex(node)])
507
472
508 self._svfs.write(path, rawtext)
473 self._svfs.write(path, rawtext)
509
474
510 self._indexdata.append({
475 self._indexdata.append({
511 b'node': node,
476 b'node': node,
512 b'p1': p1,
477 b'p1': p1,
513 b'p2': p2,
478 b'p2': p2,
514 b'linkrev': link,
479 b'linkrev': link,
515 b'flags': flags,
480 b'flags': flags,
516 })
481 })
517
482
518 self._reflectindexupdate()
483 self._reflectindexupdate()
519
484
520 return node
485 return node
521
486
522 def _reflectindexupdate(self):
487 def _reflectindexupdate(self):
523 self._refreshindex()
488 self._refreshindex()
524 self._svfs.write(self._indexpath,
489 self._svfs.write(self._indexpath,
525 ''.join(cborutil.streamencode(self._indexdata)))
490 ''.join(cborutil.streamencode(self._indexdata)))
526
491
527 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None,
492 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None,
528 maybemissingparents=False):
493 maybemissingparents=False):
529 if maybemissingparents:
494 if maybemissingparents:
530 raise error.Abort(_('simple store does not support missing parents '
495 raise error.Abort(_('simple store does not support missing parents '
531 'write mode'))
496 'write mode'))
532
497
533 nodes = []
498 nodes = []
534
499
535 transaction.addbackup(self._indexpath)
500 transaction.addbackup(self._indexpath)
536
501
537 for node, p1, p2, linknode, deltabase, delta, flags in deltas:
502 for node, p1, p2, linknode, deltabase, delta, flags in deltas:
538 linkrev = linkmapper(linknode)
503 linkrev = linkmapper(linknode)
539 flags = flags or revlog.REVIDX_DEFAULT_FLAGS
504 flags = flags or revlog.REVIDX_DEFAULT_FLAGS
540
505
541 nodes.append(node)
506 nodes.append(node)
542
507
543 if node in self._indexbynode:
508 if node in self._indexbynode:
544 continue
509 continue
545
510
546 # Need to resolve the fulltext from the delta base.
511 # Need to resolve the fulltext from the delta base.
547 if deltabase == nullid:
512 if deltabase == nullid:
548 text = mdiff.patch(b'', delta)
513 text = mdiff.patch(b'', delta)
549 else:
514 else:
550 text = mdiff.patch(self.revision(deltabase), delta)
515 text = mdiff.patch(self.revision(deltabase), delta)
551
516
552 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
517 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
553 flags)
518 flags)
554
519
555 if addrevisioncb:
520 if addrevisioncb:
556 addrevisioncb(self, node)
521 addrevisioncb(self, node)
557 return nodes
522 return nodes
558
523
559 def _headrevs(self):
524 def _headrevs(self):
560 # Assume all revisions are heads by default.
525 # Assume all revisions are heads by default.
561 revishead = {rev: True for rev in self._indexbyrev}
526 revishead = {rev: True for rev in self._indexbyrev}
562
527
563 for rev, entry in self._indexbyrev.items():
528 for rev, entry in self._indexbyrev.items():
564 # Unset head flag for all seen parents.
529 # Unset head flag for all seen parents.
565 revishead[self.rev(entry[b'p1'])] = False
530 revishead[self.rev(entry[b'p1'])] = False
566 revishead[self.rev(entry[b'p2'])] = False
531 revishead[self.rev(entry[b'p2'])] = False
567
532
568 return [rev for rev, ishead in sorted(revishead.items())
533 return [rev for rev, ishead in sorted(revishead.items())
569 if ishead]
534 if ishead]
570
535
571 def heads(self, start=None, stop=None):
536 def heads(self, start=None, stop=None):
572 # This is copied from revlog.py.
537 # This is copied from revlog.py.
573 if start is None and stop is None:
538 if start is None and stop is None:
574 if not len(self):
539 if not len(self):
575 return [nullid]
540 return [nullid]
576 return [self.node(r) for r in self._headrevs()]
541 return [self.node(r) for r in self._headrevs()]
577
542
578 if start is None:
543 if start is None:
579 start = nullid
544 start = nullid
580 if stop is None:
545 if stop is None:
581 stop = []
546 stop = []
582 stoprevs = set([self.rev(n) for n in stop])
547 stoprevs = set([self.rev(n) for n in stop])
583 startrev = self.rev(start)
548 startrev = self.rev(start)
584 reachable = {startrev}
549 reachable = {startrev}
585 heads = {startrev}
550 heads = {startrev}
586
551
587 parentrevs = self.parentrevs
552 parentrevs = self.parentrevs
588 for r in self.revs(start=startrev + 1):
553 for r in self.revs(start=startrev + 1):
589 for p in parentrevs(r):
554 for p in parentrevs(r):
590 if p in reachable:
555 if p in reachable:
591 if r not in stoprevs:
556 if r not in stoprevs:
592 reachable.add(r)
557 reachable.add(r)
593 heads.add(r)
558 heads.add(r)
594 if p in heads and p not in stoprevs:
559 if p in heads and p not in stoprevs:
595 heads.remove(p)
560 heads.remove(p)
596
561
597 return [self.node(r) for r in heads]
562 return [self.node(r) for r in heads]
598
563
599 def children(self, node):
564 def children(self, node):
600 validatenode(node)
565 validatenode(node)
601
566
602 # This is a copy of revlog.children().
567 # This is a copy of revlog.children().
603 c = []
568 c = []
604 p = self.rev(node)
569 p = self.rev(node)
605 for r in self.revs(start=p + 1):
570 for r in self.revs(start=p + 1):
606 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
571 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
607 if prevs:
572 if prevs:
608 for pr in prevs:
573 for pr in prevs:
609 if pr == p:
574 if pr == p:
610 c.append(self.node(r))
575 c.append(self.node(r))
611 elif p == nullrev:
576 elif p == nullrev:
612 c.append(self.node(r))
577 c.append(self.node(r))
613 return c
578 return c
614
579
615 def getstrippoint(self, minlink):
580 def getstrippoint(self, minlink):
616 return storageutil.resolvestripinfo(
581 return storageutil.resolvestripinfo(
617 minlink, len(self) - 1, self._headrevs(), self.linkrev,
582 minlink, len(self) - 1, self._headrevs(), self.linkrev,
618 self.parentrevs)
583 self.parentrevs)
619
584
620 def strip(self, minlink, transaction):
585 def strip(self, minlink, transaction):
621 if not len(self):
586 if not len(self):
622 return
587 return
623
588
624 rev, _ignored = self.getstrippoint(minlink)
589 rev, _ignored = self.getstrippoint(minlink)
625 if rev == len(self):
590 if rev == len(self):
626 return
591 return
627
592
628 # Purge index data starting at the requested revision.
593 # Purge index data starting at the requested revision.
629 self._indexdata[rev:] = []
594 self._indexdata[rev:] = []
630 self._reflectindexupdate()
595 self._reflectindexupdate()
631
596
632 def issimplestorefile(f, kind, st):
597 def issimplestorefile(f, kind, st):
633 if kind != stat.S_IFREG:
598 if kind != stat.S_IFREG:
634 return False
599 return False
635
600
636 if store.isrevlog(f, kind, st):
601 if store.isrevlog(f, kind, st):
637 return False
602 return False
638
603
639 # Ignore transaction undo files.
604 # Ignore transaction undo files.
640 if f.startswith('undo.'):
605 if f.startswith('undo.'):
641 return False
606 return False
642
607
643 # Otherwise assume it belongs to the simple store.
608 # Otherwise assume it belongs to the simple store.
644 return True
609 return True
645
610
646 class simplestore(store.encodedstore):
611 class simplestore(store.encodedstore):
647 def datafiles(self):
612 def datafiles(self):
648 for x in super(simplestore, self).datafiles():
613 for x in super(simplestore, self).datafiles():
649 yield x
614 yield x
650
615
651 # Supplement with non-revlog files.
616 # Supplement with non-revlog files.
652 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
617 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
653
618
654 for unencoded, encoded, size in extrafiles:
619 for unencoded, encoded, size in extrafiles:
655 try:
620 try:
656 unencoded = store.decodefilename(unencoded)
621 unencoded = store.decodefilename(unencoded)
657 except KeyError:
622 except KeyError:
658 unencoded = None
623 unencoded = None
659
624
660 yield unencoded, encoded, size
625 yield unencoded, encoded, size
661
626
662 def reposetup(ui, repo):
627 def reposetup(ui, repo):
663 if not repo.local():
628 if not repo.local():
664 return
629 return
665
630
666 if isinstance(repo, bundlerepo.bundlerepository):
631 if isinstance(repo, bundlerepo.bundlerepository):
667 raise error.Abort(_('cannot use simple store with bundlerepo'))
632 raise error.Abort(_('cannot use simple store with bundlerepo'))
668
633
669 class simplestorerepo(repo.__class__):
634 class simplestorerepo(repo.__class__):
670 def file(self, f):
635 def file(self, f):
671 return filestorage(self.svfs, f)
636 return filestorage(self.svfs, f)
672
637
673 repo.__class__ = simplestorerepo
638 repo.__class__ = simplestorerepo
674
639
675 def featuresetup(ui, supported):
640 def featuresetup(ui, supported):
676 supported.add(REQUIREMENT)
641 supported.add(REQUIREMENT)
677
642
678 def newreporequirements(orig, ui, createopts):
643 def newreporequirements(orig, ui, createopts):
679 """Modifies default requirements for new repos to use the simple store."""
644 """Modifies default requirements for new repos to use the simple store."""
680 requirements = orig(ui, createopts)
645 requirements = orig(ui, createopts)
681
646
682 # These requirements are only used to affect creation of the store
647 # These requirements are only used to affect creation of the store
683 # object. We have our own store. So we can remove them.
648 # object. We have our own store. So we can remove them.
684 # TODO do this once we feel like taking the test hit.
649 # TODO do this once we feel like taking the test hit.
685 #if 'fncache' in requirements:
650 #if 'fncache' in requirements:
686 # requirements.remove('fncache')
651 # requirements.remove('fncache')
687 #if 'dotencode' in requirements:
652 #if 'dotencode' in requirements:
688 # requirements.remove('dotencode')
653 # requirements.remove('dotencode')
689
654
690 requirements.add(REQUIREMENT)
655 requirements.add(REQUIREMENT)
691
656
692 return requirements
657 return requirements
693
658
694 def makestore(orig, requirements, path, vfstype):
659 def makestore(orig, requirements, path, vfstype):
695 if REQUIREMENT not in requirements:
660 if REQUIREMENT not in requirements:
696 return orig(requirements, path, vfstype)
661 return orig(requirements, path, vfstype)
697
662
698 return simplestore(path, vfstype)
663 return simplestore(path, vfstype)
699
664
700 def verifierinit(orig, self, *args, **kwargs):
665 def verifierinit(orig, self, *args, **kwargs):
701 orig(self, *args, **kwargs)
666 orig(self, *args, **kwargs)
702
667
703 # We don't care that files in the store don't align with what is
668 # We don't care that files in the store don't align with what is
704 # advertised. So suppress these warnings.
669 # advertised. So suppress these warnings.
705 self.warnorphanstorefiles = False
670 self.warnorphanstorefiles = False
706
671
707 def extsetup(ui):
672 def extsetup(ui):
708 localrepo.featuresetupfuncs.add(featuresetup)
673 localrepo.featuresetupfuncs.add(featuresetup)
709
674
710 extensions.wrapfunction(localrepo, 'newreporequirements',
675 extensions.wrapfunction(localrepo, 'newreporequirements',
711 newreporequirements)
676 newreporequirements)
712 extensions.wrapfunction(localrepo, 'makestore', makestore)
677 extensions.wrapfunction(localrepo, 'makestore', makestore)
713 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
678 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
General Comments 0
You need to be logged in to leave comments. Login now