##// END OF EJS Templates
wrapfunction: use sysstr instead of bytes as argument in "sqlitestore"...
marmoute -
r51686:1fbae268 default
parent child Browse files
Show More
@@ -1,1344 +1,1344 b''
1 1 # sqlitestore.py - Storage backend that uses SQLite
2 2 #
3 3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """store repository data in SQLite (EXPERIMENTAL)
9 9
10 10 The sqlitestore extension enables the storage of repository data in SQLite.
11 11
12 12 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
13 13 GUARANTEES. This means that repositories created with this extension may
14 14 only be usable with the exact version of this extension/Mercurial that was
15 15 used. The extension attempts to enforce this in order to prevent repository
16 16 corruption.
17 17
18 18 In addition, several features are not yet supported or have known bugs:
19 19
20 20 * Only some data is stored in SQLite. Changeset, manifest, and other repository
21 21 data is not yet stored in SQLite.
22 22 * Transactions are not robust. If the process is aborted at the right time
23 23 during transaction close/rollback, the repository could be in an inconsistent
24 24 state. This problem will diminish once all repository data is tracked by
25 25 SQLite.
26 26 * Bundle repositories do not work (the ability to use e.g.
27 27 `hg -R <bundle-file> log` to automatically overlay a bundle on top of the
28 28 existing repository).
29 29 * Various other features don't work.
30 30
31 31 This extension should work for basic clone/pull, update, and commit workflows.
32 32 Some history rewriting operations may fail due to lack of support for bundle
33 33 repositories.
34 34
35 35 To use, activate the extension and set the ``storage.new-repo-backend`` config
36 36 option to ``sqlite`` to enable new repositories to use SQLite for storage.
37 37 """
38 38
39 39 # To run the test suite with repos using SQLite by default, execute the
40 40 # following:
41 41 #
42 42 # HGREPOFEATURES="sqlitestore" run-tests.py \
43 43 # --extra-config-opt extensions.sqlitestore= \
44 44 # --extra-config-opt storage.new-repo-backend=sqlite
45 45
46 46
47 47 import sqlite3
48 48 import struct
49 49 import threading
50 50 import zlib
51 51
52 52 from mercurial.i18n import _
53 53 from mercurial.node import (
54 54 nullrev,
55 55 sha1nodeconstants,
56 56 short,
57 57 )
58 58 from mercurial.thirdparty import attr
59 59 from mercurial import (
60 60 ancestor,
61 61 dagop,
62 62 encoding,
63 63 error,
64 64 extensions,
65 65 localrepo,
66 66 mdiff,
67 67 pycompat,
68 68 registrar,
69 69 requirements,
70 70 util,
71 71 verify,
72 72 )
73 73 from mercurial.interfaces import (
74 74 repository,
75 75 util as interfaceutil,
76 76 )
77 77 from mercurial.utils import (
78 78 hashutil,
79 79 storageutil,
80 80 )
81 81
82 82 try:
83 83 from mercurial import zstd # pytype: disable=import-error
84 84
85 85 zstd.__version__
86 86 except ImportError:
87 87 zstd = None
88 88
89 89 configtable = {}
90 90 configitem = registrar.configitem(configtable)
91 91
92 92 # experimental config: storage.sqlite.compression
93 93 configitem(
94 94 b'storage',
95 95 b'sqlite.compression',
96 96 default=b'zstd' if zstd else b'zlib',
97 97 experimental=True,
98 98 )
99 99
100 100 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
101 101 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
102 102 # be specifying the version(s) of Mercurial they are tested with, or
103 103 # leave the attribute unspecified.
104 104 testedwith = b'ships-with-hg-core'
105 105
106 106 REQUIREMENT = b'exp-sqlite-001'
107 107 REQUIREMENT_ZSTD = b'exp-sqlite-comp-001=zstd'
108 108 REQUIREMENT_ZLIB = b'exp-sqlite-comp-001=zlib'
109 109 REQUIREMENT_NONE = b'exp-sqlite-comp-001=none'
110 110 REQUIREMENT_SHALLOW_FILES = b'exp-sqlite-shallow-files'
111 111
112 112 CURRENT_SCHEMA_VERSION = 1
113 113
114 114 COMPRESSION_NONE = 1
115 115 COMPRESSION_ZSTD = 2
116 116 COMPRESSION_ZLIB = 3
117 117
118 118 FLAG_CENSORED = 1
119 119 FLAG_MISSING_P1 = 2
120 120 FLAG_MISSING_P2 = 4
121 121
122 122 CREATE_SCHEMA = [
123 123 # Deltas are stored as content-indexed blobs.
124 124 # compression column holds COMPRESSION_* constant for how the
125 125 # delta is encoded.
126 126 'CREATE TABLE delta ('
127 127 ' id INTEGER PRIMARY KEY, '
128 128 ' compression INTEGER NOT NULL, '
129 129 ' hash BLOB UNIQUE ON CONFLICT ABORT, '
130 130 ' delta BLOB NOT NULL '
131 131 ')',
132 132 # Tracked paths are denormalized to integers to avoid redundant
133 133 # storage of the path name.
134 134 'CREATE TABLE filepath ('
135 135 ' id INTEGER PRIMARY KEY, '
136 136 ' path BLOB NOT NULL '
137 137 ')',
138 138 'CREATE UNIQUE INDEX filepath_path ON filepath (path)',
139 139 # We have a single table for all file revision data.
140 140 # Each file revision is uniquely described by a (path, rev) and
141 141 # (path, node).
142 142 #
143 143 # Revision data is stored as a pointer to the delta producing this
144 144 # revision and the file revision whose delta should be applied before
145 145 # that one. One can reconstruct the delta chain by recursively following
146 146 # the delta base revision pointers until one encounters NULL.
147 147 #
148 148 # flags column holds bitwise integer flags controlling storage options.
149 149 # These flags are defined by the FLAG_* constants.
150 150 'CREATE TABLE fileindex ('
151 151 ' id INTEGER PRIMARY KEY, '
152 152 ' pathid INTEGER REFERENCES filepath(id), '
153 153 ' revnum INTEGER NOT NULL, '
154 154 ' p1rev INTEGER NOT NULL, '
155 155 ' p2rev INTEGER NOT NULL, '
156 156 ' linkrev INTEGER NOT NULL, '
157 157 ' flags INTEGER NOT NULL, '
158 158 ' deltaid INTEGER REFERENCES delta(id), '
159 159 ' deltabaseid INTEGER REFERENCES fileindex(id), '
160 160 ' node BLOB NOT NULL '
161 161 ')',
162 162 'CREATE UNIQUE INDEX fileindex_pathrevnum '
163 163 ' ON fileindex (pathid, revnum)',
164 164 'CREATE UNIQUE INDEX fileindex_pathnode ON fileindex (pathid, node)',
165 165 # Provide a view over all file data for convenience.
166 166 'CREATE VIEW filedata AS '
167 167 'SELECT '
168 168 ' fileindex.id AS id, '
169 169 ' filepath.id AS pathid, '
170 170 ' filepath.path AS path, '
171 171 ' fileindex.revnum AS revnum, '
172 172 ' fileindex.node AS node, '
173 173 ' fileindex.p1rev AS p1rev, '
174 174 ' fileindex.p2rev AS p2rev, '
175 175 ' fileindex.linkrev AS linkrev, '
176 176 ' fileindex.flags AS flags, '
177 177 ' fileindex.deltaid AS deltaid, '
178 178 ' fileindex.deltabaseid AS deltabaseid '
179 179 'FROM filepath, fileindex '
180 180 'WHERE fileindex.pathid=filepath.id',
181 181 'PRAGMA user_version=%d' % CURRENT_SCHEMA_VERSION,
182 182 ]
183 183
184 184
185 185 def resolvedeltachain(db, pathid, node, revisioncache, stoprids, zstddctx=None):
186 186 """Resolve a delta chain for a file node."""
187 187
188 188 # TODO the "not in ({stops})" here is possibly slowing down the query
189 189 # because it needs to perform the lookup on every recursive invocation.
190 190 # This could possibly be faster if we created a temporary query with
191 191 # baseid "poisoned" to null and limited the recursive filter to
192 192 # "is not null".
193 193 res = db.execute(
194 194 'WITH RECURSIVE '
195 195 ' deltachain(deltaid, baseid) AS ('
196 196 ' SELECT deltaid, deltabaseid FROM fileindex '
197 197 ' WHERE pathid=? AND node=? '
198 198 ' UNION ALL '
199 199 ' SELECT fileindex.deltaid, deltabaseid '
200 200 ' FROM fileindex, deltachain '
201 201 ' WHERE '
202 202 ' fileindex.id=deltachain.baseid '
203 203 ' AND deltachain.baseid IS NOT NULL '
204 204 ' AND fileindex.id NOT IN ({stops}) '
205 205 ' ) '
206 206 'SELECT deltachain.baseid, compression, delta '
207 207 'FROM deltachain, delta '
208 208 'WHERE delta.id=deltachain.deltaid'.format(
209 209 stops=','.join(['?'] * len(stoprids))
210 210 ),
211 211 tuple([pathid, node] + list(stoprids.keys())),
212 212 )
213 213
214 214 deltas = []
215 215 lastdeltabaseid = None
216 216
217 217 for deltabaseid, compression, delta in res:
218 218 lastdeltabaseid = deltabaseid
219 219
220 220 if compression == COMPRESSION_ZSTD:
221 221 delta = zstddctx.decompress(delta)
222 222 elif compression == COMPRESSION_NONE:
223 223 delta = delta
224 224 elif compression == COMPRESSION_ZLIB:
225 225 delta = zlib.decompress(delta)
226 226 else:
227 227 raise SQLiteStoreError(
228 228 b'unhandled compression type: %d' % compression
229 229 )
230 230
231 231 deltas.append(delta)
232 232
233 233 if lastdeltabaseid in stoprids:
234 234 basetext = revisioncache[stoprids[lastdeltabaseid]]
235 235 else:
236 236 basetext = deltas.pop()
237 237
238 238 deltas.reverse()
239 239 fulltext = mdiff.patches(basetext, deltas)
240 240
241 241 # SQLite returns buffer instances for blob columns on Python 2. This
242 242 # type can propagate through the delta application layer. Because
243 243 # downstream callers assume revisions are bytes, cast as needed.
244 244 if not isinstance(fulltext, bytes):
245 245 fulltext = bytes(delta)
246 246
247 247 return fulltext
248 248
249 249
250 250 def insertdelta(db, compression, hash, delta):
251 251 try:
252 252 return db.execute(
253 253 'INSERT INTO delta (compression, hash, delta) VALUES (?, ?, ?)',
254 254 (compression, hash, delta),
255 255 ).lastrowid
256 256 except sqlite3.IntegrityError:
257 257 return db.execute(
258 258 'SELECT id FROM delta WHERE hash=?', (hash,)
259 259 ).fetchone()[0]
260 260
261 261
262 262 class SQLiteStoreError(error.StorageError):
263 263 pass
264 264
265 265
266 266 @attr.s
267 267 class revisionentry:
268 268 rid = attr.ib()
269 269 rev = attr.ib()
270 270 node = attr.ib()
271 271 p1rev = attr.ib()
272 272 p2rev = attr.ib()
273 273 p1node = attr.ib()
274 274 p2node = attr.ib()
275 275 linkrev = attr.ib()
276 276 flags = attr.ib()
277 277
278 278
279 279 @interfaceutil.implementer(repository.irevisiondelta)
280 280 @attr.s(slots=True)
281 281 class sqliterevisiondelta:
282 282 node = attr.ib()
283 283 p1node = attr.ib()
284 284 p2node = attr.ib()
285 285 basenode = attr.ib()
286 286 flags = attr.ib()
287 287 baserevisionsize = attr.ib()
288 288 revision = attr.ib()
289 289 delta = attr.ib()
290 290 sidedata = attr.ib()
291 291 protocol_flags = attr.ib()
292 292 linknode = attr.ib(default=None)
293 293
294 294
295 295 @interfaceutil.implementer(repository.iverifyproblem)
296 296 @attr.s(frozen=True)
297 297 class sqliteproblem:
298 298 warning = attr.ib(default=None)
299 299 error = attr.ib(default=None)
300 300 node = attr.ib(default=None)
301 301
302 302
303 303 @interfaceutil.implementer(repository.ifilestorage)
304 304 class sqlitefilestore:
305 305 """Implements storage for an individual tracked path."""
306 306
307 307 def __init__(self, db, path, compression):
308 308 self.nullid = sha1nodeconstants.nullid
309 309 self._db = db
310 310 self._path = path
311 311
312 312 self._pathid = None
313 313
314 314 # revnum -> node
315 315 self._revtonode = {}
316 316 # node -> revnum
317 317 self._nodetorev = {}
318 318 # node -> data structure
319 319 self._revisions = {}
320 320
321 321 self._revisioncache = util.lrucachedict(10)
322 322
323 323 self._compengine = compression
324 324
325 325 if compression == b'zstd':
326 326 self._cctx = zstd.ZstdCompressor(level=3)
327 327 self._dctx = zstd.ZstdDecompressor()
328 328 else:
329 329 self._cctx = None
330 330 self._dctx = None
331 331
332 332 self._refreshindex()
333 333
334 334 def _refreshindex(self):
335 335 self._revtonode = {}
336 336 self._nodetorev = {}
337 337 self._revisions = {}
338 338
339 339 res = list(
340 340 self._db.execute(
341 341 'SELECT id FROM filepath WHERE path=?', (self._path,)
342 342 )
343 343 )
344 344
345 345 if not res:
346 346 self._pathid = None
347 347 return
348 348
349 349 self._pathid = res[0][0]
350 350
351 351 res = self._db.execute(
352 352 'SELECT id, revnum, node, p1rev, p2rev, linkrev, flags '
353 353 'FROM fileindex '
354 354 'WHERE pathid=? '
355 355 'ORDER BY revnum ASC',
356 356 (self._pathid,),
357 357 )
358 358
359 359 for i, row in enumerate(res):
360 360 rid, rev, node, p1rev, p2rev, linkrev, flags = row
361 361
362 362 if i != rev:
363 363 raise SQLiteStoreError(
364 364 _(b'sqlite database has inconsistent revision numbers')
365 365 )
366 366
367 367 if p1rev == nullrev:
368 368 p1node = sha1nodeconstants.nullid
369 369 else:
370 370 p1node = self._revtonode[p1rev]
371 371
372 372 if p2rev == nullrev:
373 373 p2node = sha1nodeconstants.nullid
374 374 else:
375 375 p2node = self._revtonode[p2rev]
376 376
377 377 entry = revisionentry(
378 378 rid=rid,
379 379 rev=rev,
380 380 node=node,
381 381 p1rev=p1rev,
382 382 p2rev=p2rev,
383 383 p1node=p1node,
384 384 p2node=p2node,
385 385 linkrev=linkrev,
386 386 flags=flags,
387 387 )
388 388
389 389 self._revtonode[rev] = node
390 390 self._nodetorev[node] = rev
391 391 self._revisions[node] = entry
392 392
393 393 # Start of ifileindex interface.
394 394
395 395 def __len__(self):
396 396 return len(self._revisions)
397 397
398 398 def __iter__(self):
399 399 return iter(range(len(self._revisions)))
400 400
401 401 def hasnode(self, node):
402 402 if node == sha1nodeconstants.nullid:
403 403 return False
404 404
405 405 return node in self._nodetorev
406 406
407 407 def revs(self, start=0, stop=None):
408 408 return storageutil.iterrevs(
409 409 len(self._revisions), start=start, stop=stop
410 410 )
411 411
412 412 def parents(self, node):
413 413 if node == sha1nodeconstants.nullid:
414 414 return sha1nodeconstants.nullid, sha1nodeconstants.nullid
415 415
416 416 if node not in self._revisions:
417 417 raise error.LookupError(node, self._path, _(b'no node'))
418 418
419 419 entry = self._revisions[node]
420 420 return entry.p1node, entry.p2node
421 421
422 422 def parentrevs(self, rev):
423 423 if rev == nullrev:
424 424 return nullrev, nullrev
425 425
426 426 if rev not in self._revtonode:
427 427 raise IndexError(rev)
428 428
429 429 entry = self._revisions[self._revtonode[rev]]
430 430 return entry.p1rev, entry.p2rev
431 431
432 432 def ancestors(self, revs, stoprev=0, inclusive=False):
433 433 """Generate the ancestors of 'revs' in reverse revision order.
434 434 Does not generate revs lower than stoprev.
435 435
436 436 See the documentation for ancestor.lazyancestors for more details."""
437 437
438 438 # first, make sure start revisions aren't filtered
439 439 revs = list(revs)
440 440 checkrev = self.node
441 441 for r in revs:
442 442 checkrev(r)
443 443
444 444 return ancestor.lazyancestors(
445 445 self.parentrevs,
446 446 revs,
447 447 stoprev=stoprev,
448 448 inclusive=inclusive,
449 449 )
450 450
451 451 def rev(self, node):
452 452 if node == sha1nodeconstants.nullid:
453 453 return nullrev
454 454
455 455 if node not in self._nodetorev:
456 456 raise error.LookupError(node, self._path, _(b'no node'))
457 457
458 458 return self._nodetorev[node]
459 459
460 460 def node(self, rev):
461 461 if rev == nullrev:
462 462 return sha1nodeconstants.nullid
463 463
464 464 if rev not in self._revtonode:
465 465 raise IndexError(rev)
466 466
467 467 return self._revtonode[rev]
468 468
469 469 def lookup(self, node):
470 470 return storageutil.fileidlookup(self, node, self._path)
471 471
472 472 def linkrev(self, rev):
473 473 if rev == nullrev:
474 474 return nullrev
475 475
476 476 if rev not in self._revtonode:
477 477 raise IndexError(rev)
478 478
479 479 entry = self._revisions[self._revtonode[rev]]
480 480 return entry.linkrev
481 481
482 482 def iscensored(self, rev):
483 483 if rev == nullrev:
484 484 return False
485 485
486 486 if rev not in self._revtonode:
487 487 raise IndexError(rev)
488 488
489 489 return self._revisions[self._revtonode[rev]].flags & FLAG_CENSORED
490 490
491 491 def commonancestorsheads(self, node1, node2):
492 492 rev1 = self.rev(node1)
493 493 rev2 = self.rev(node2)
494 494
495 495 ancestors = ancestor.commonancestorsheads(self.parentrevs, rev1, rev2)
496 496 return pycompat.maplist(self.node, ancestors)
497 497
498 498 def descendants(self, revs):
499 499 # TODO we could implement this using a recursive SQL query, which
500 500 # might be faster.
501 501 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
502 502
503 503 def heads(self, start=None, stop=None):
504 504 if start is None and stop is None:
505 505 if not len(self):
506 506 return [sha1nodeconstants.nullid]
507 507
508 508 startrev = self.rev(start) if start is not None else nullrev
509 509 stoprevs = {self.rev(n) for n in stop or []}
510 510
511 511 revs = dagop.headrevssubset(
512 512 self.revs, self.parentrevs, startrev=startrev, stoprevs=stoprevs
513 513 )
514 514
515 515 return [self.node(rev) for rev in revs]
516 516
517 517 def children(self, node):
518 518 rev = self.rev(node)
519 519
520 520 res = self._db.execute(
521 521 'SELECT'
522 522 ' node '
523 523 ' FROM filedata '
524 524 ' WHERE path=? AND (p1rev=? OR p2rev=?) '
525 525 ' ORDER BY revnum ASC',
526 526 (self._path, rev, rev),
527 527 )
528 528
529 529 return [row[0] for row in res]
530 530
531 531 # End of ifileindex interface.
532 532
533 533 # Start of ifiledata interface.
534 534
535 535 def size(self, rev):
536 536 if rev == nullrev:
537 537 return 0
538 538
539 539 if rev not in self._revtonode:
540 540 raise IndexError(rev)
541 541
542 542 node = self._revtonode[rev]
543 543
544 544 if self.renamed(node):
545 545 return len(self.read(node))
546 546
547 547 return len(self.revision(node))
548 548
549 549 def revision(self, node, raw=False, _verifyhash=True):
550 550 if node in (sha1nodeconstants.nullid, nullrev):
551 551 return b''
552 552
553 553 if isinstance(node, int):
554 554 node = self.node(node)
555 555
556 556 if node not in self._nodetorev:
557 557 raise error.LookupError(node, self._path, _(b'no node'))
558 558
559 559 if node in self._revisioncache:
560 560 return self._revisioncache[node]
561 561
562 562 # Because we have a fulltext revision cache, we are able to
563 563 # short-circuit delta chain traversal and decompression as soon as
564 564 # we encounter a revision in the cache.
565 565
566 566 stoprids = {self._revisions[n].rid: n for n in self._revisioncache}
567 567
568 568 if not stoprids:
569 569 stoprids[-1] = None
570 570
571 571 fulltext = resolvedeltachain(
572 572 self._db,
573 573 self._pathid,
574 574 node,
575 575 self._revisioncache,
576 576 stoprids,
577 577 zstddctx=self._dctx,
578 578 )
579 579
580 580 # Don't verify hashes if parent nodes were rewritten, as the hash
581 581 # wouldn't verify.
582 582 if self._revisions[node].flags & (FLAG_MISSING_P1 | FLAG_MISSING_P2):
583 583 _verifyhash = False
584 584
585 585 if _verifyhash:
586 586 self._checkhash(fulltext, node)
587 587 self._revisioncache[node] = fulltext
588 588
589 589 return fulltext
590 590
591 591 def rawdata(self, *args, **kwargs):
592 592 return self.revision(*args, **kwargs)
593 593
594 594 def read(self, node):
595 595 return storageutil.filtermetadata(self.revision(node))
596 596
597 597 def renamed(self, node):
598 598 return storageutil.filerevisioncopied(self, node)
599 599
600 600 def cmp(self, node, fulltext):
601 601 return not storageutil.filedataequivalent(self, node, fulltext)
602 602
603 603 def emitrevisions(
604 604 self,
605 605 nodes,
606 606 nodesorder=None,
607 607 revisiondata=False,
608 608 assumehaveparentrevisions=False,
609 609 deltamode=repository.CG_DELTAMODE_STD,
610 610 sidedata_helpers=None,
611 611 debug_info=None,
612 612 ):
613 613 if nodesorder not in (b'nodes', b'storage', b'linear', None):
614 614 raise error.ProgrammingError(
615 615 b'unhandled value for nodesorder: %s' % nodesorder
616 616 )
617 617
618 618 nodes = [n for n in nodes if n != sha1nodeconstants.nullid]
619 619
620 620 if not nodes:
621 621 return
622 622
623 623 # TODO perform in a single query.
624 624 res = self._db.execute(
625 625 'SELECT revnum, deltaid FROM fileindex '
626 626 'WHERE pathid=? '
627 627 ' AND node in (%s)' % (','.join(['?'] * len(nodes))),
628 628 tuple([self._pathid] + nodes),
629 629 )
630 630
631 631 deltabases = {}
632 632
633 633 for rev, deltaid in res:
634 634 res = self._db.execute(
635 635 'SELECT revnum from fileindex WHERE pathid=? AND deltaid=?',
636 636 (self._pathid, deltaid),
637 637 )
638 638 deltabases[rev] = res.fetchone()[0]
639 639
640 640 # TODO define revdifffn so we can use delta from storage.
641 641 for delta in storageutil.emitrevisions(
642 642 self,
643 643 nodes,
644 644 nodesorder,
645 645 sqliterevisiondelta,
646 646 deltaparentfn=deltabases.__getitem__,
647 647 revisiondata=revisiondata,
648 648 assumehaveparentrevisions=assumehaveparentrevisions,
649 649 deltamode=deltamode,
650 650 sidedata_helpers=sidedata_helpers,
651 651 ):
652 652
653 653 yield delta
654 654
655 655 # End of ifiledata interface.
656 656
657 657 # Start of ifilemutation interface.
658 658
659 659 def add(self, filedata, meta, transaction, linkrev, p1, p2):
660 660 if meta or filedata.startswith(b'\x01\n'):
661 661 filedata = storageutil.packmeta(meta, filedata)
662 662
663 663 rev = self.addrevision(filedata, transaction, linkrev, p1, p2)
664 664 return self.node(rev)
665 665
666 666 def addrevision(
667 667 self,
668 668 revisiondata,
669 669 transaction,
670 670 linkrev,
671 671 p1,
672 672 p2,
673 673 node=None,
674 674 flags=0,
675 675 cachedelta=None,
676 676 ):
677 677 if flags:
678 678 raise SQLiteStoreError(_(b'flags not supported on revisions'))
679 679
680 680 validatehash = node is not None
681 681 node = node or storageutil.hashrevisionsha1(revisiondata, p1, p2)
682 682
683 683 if validatehash:
684 684 self._checkhash(revisiondata, node, p1, p2)
685 685
686 686 rev = self._nodetorev.get(node)
687 687 if rev is not None:
688 688 return rev
689 689
690 690 rev = self._addrawrevision(
691 691 node, revisiondata, transaction, linkrev, p1, p2
692 692 )
693 693
694 694 self._revisioncache[node] = revisiondata
695 695 return rev
696 696
697 697 def addgroup(
698 698 self,
699 699 deltas,
700 700 linkmapper,
701 701 transaction,
702 702 addrevisioncb=None,
703 703 duplicaterevisioncb=None,
704 704 maybemissingparents=False,
705 705 ):
706 706 empty = True
707 707
708 708 for (
709 709 node,
710 710 p1,
711 711 p2,
712 712 linknode,
713 713 deltabase,
714 714 delta,
715 715 wireflags,
716 716 sidedata,
717 717 ) in deltas:
718 718 storeflags = 0
719 719
720 720 if wireflags & repository.REVISION_FLAG_CENSORED:
721 721 storeflags |= FLAG_CENSORED
722 722
723 723 if wireflags & ~repository.REVISION_FLAG_CENSORED:
724 724 raise SQLiteStoreError(b'unhandled revision flag')
725 725
726 726 if maybemissingparents:
727 727 if p1 != sha1nodeconstants.nullid and not self.hasnode(p1):
728 728 p1 = sha1nodeconstants.nullid
729 729 storeflags |= FLAG_MISSING_P1
730 730
731 731 if p2 != sha1nodeconstants.nullid and not self.hasnode(p2):
732 732 p2 = sha1nodeconstants.nullid
733 733 storeflags |= FLAG_MISSING_P2
734 734
735 735 baserev = self.rev(deltabase)
736 736
737 737 # If base is censored, delta must be full replacement in a single
738 738 # patch operation.
739 739 if baserev != nullrev and self.iscensored(baserev):
740 740 hlen = struct.calcsize(b'>lll')
741 741 oldlen = len(self.rawdata(deltabase, _verifyhash=False))
742 742 newlen = len(delta) - hlen
743 743
744 744 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
745 745 raise error.CensoredBaseError(self._path, deltabase)
746 746
747 747 if not (storeflags & FLAG_CENSORED) and storageutil.deltaiscensored(
748 748 delta, baserev, lambda x: len(self.rawdata(x))
749 749 ):
750 750 storeflags |= FLAG_CENSORED
751 751
752 752 linkrev = linkmapper(linknode)
753 753
754 754 if node in self._revisions:
755 755 # Possibly reset parents to make them proper.
756 756 entry = self._revisions[node]
757 757
758 758 if (
759 759 entry.flags & FLAG_MISSING_P1
760 760 and p1 != sha1nodeconstants.nullid
761 761 ):
762 762 entry.p1node = p1
763 763 entry.p1rev = self._nodetorev[p1]
764 764 entry.flags &= ~FLAG_MISSING_P1
765 765
766 766 self._db.execute(
767 767 'UPDATE fileindex SET p1rev=?, flags=? WHERE id=?',
768 768 (self._nodetorev[p1], entry.flags, entry.rid),
769 769 )
770 770
771 771 if (
772 772 entry.flags & FLAG_MISSING_P2
773 773 and p2 != sha1nodeconstants.nullid
774 774 ):
775 775 entry.p2node = p2
776 776 entry.p2rev = self._nodetorev[p2]
777 777 entry.flags &= ~FLAG_MISSING_P2
778 778
779 779 self._db.execute(
780 780 'UPDATE fileindex SET p2rev=?, flags=? WHERE id=?',
781 781 (self._nodetorev[p1], entry.flags, entry.rid),
782 782 )
783 783
784 784 if duplicaterevisioncb:
785 785 duplicaterevisioncb(self, self.rev(node))
786 786 empty = False
787 787 continue
788 788
789 789 if deltabase == sha1nodeconstants.nullid:
790 790 text = mdiff.patch(b'', delta)
791 791 storedelta = None
792 792 else:
793 793 text = None
794 794 storedelta = (deltabase, delta)
795 795
796 796 rev = self._addrawrevision(
797 797 node,
798 798 text,
799 799 transaction,
800 800 linkrev,
801 801 p1,
802 802 p2,
803 803 storedelta=storedelta,
804 804 flags=storeflags,
805 805 )
806 806
807 807 if addrevisioncb:
808 808 addrevisioncb(self, rev)
809 809 empty = False
810 810
811 811 return not empty
812 812
813 813 def censorrevision(self, tr, censornode, tombstone=b''):
814 814 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
815 815
816 816 # This restriction is cargo culted from revlogs and makes no sense for
817 817 # SQLite, since columns can be resized at will.
818 818 if len(tombstone) > len(self.rawdata(censornode)):
819 819 raise error.Abort(
820 820 _(b'censor tombstone must be no longer than censored data')
821 821 )
822 822
823 823 # We need to replace the censored revision's data with the tombstone.
824 824 # But replacing that data will have implications for delta chains that
825 825 # reference it.
826 826 #
827 827 # While "better," more complex strategies are possible, we do something
828 828 # simple: we find delta chain children of the censored revision and we
829 829 # replace those incremental deltas with fulltexts of their corresponding
830 830 # revision. Then we delete the now-unreferenced delta and original
831 831 # revision and insert a replacement.
832 832
833 833 # Find the delta to be censored.
834 834 censoreddeltaid = self._db.execute(
835 835 'SELECT deltaid FROM fileindex WHERE id=?',
836 836 (self._revisions[censornode].rid,),
837 837 ).fetchone()[0]
838 838
839 839 # Find all its delta chain children.
840 840 # TODO once we support storing deltas for !files, we'll need to look
841 841 # for those delta chains too.
842 842 rows = list(
843 843 self._db.execute(
844 844 'SELECT id, pathid, node FROM fileindex '
845 845 'WHERE deltabaseid=? OR deltaid=?',
846 846 (censoreddeltaid, censoreddeltaid),
847 847 )
848 848 )
849 849
850 850 for row in rows:
851 851 rid, pathid, node = row
852 852
853 853 fulltext = resolvedeltachain(
854 854 self._db, pathid, node, {}, {-1: None}, zstddctx=self._dctx
855 855 )
856 856
857 857 deltahash = hashutil.sha1(fulltext).digest()
858 858
859 859 if self._compengine == b'zstd':
860 860 deltablob = self._cctx.compress(fulltext)
861 861 compression = COMPRESSION_ZSTD
862 862 elif self._compengine == b'zlib':
863 863 deltablob = zlib.compress(fulltext)
864 864 compression = COMPRESSION_ZLIB
865 865 elif self._compengine == b'none':
866 866 deltablob = fulltext
867 867 compression = COMPRESSION_NONE
868 868 else:
869 869 raise error.ProgrammingError(
870 870 b'unhandled compression engine: %s' % self._compengine
871 871 )
872 872
873 873 if len(deltablob) >= len(fulltext):
874 874 deltablob = fulltext
875 875 compression = COMPRESSION_NONE
876 876
877 877 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
878 878
879 879 self._db.execute(
880 880 'UPDATE fileindex SET deltaid=?, deltabaseid=NULL '
881 881 'WHERE id=?',
882 882 (deltaid, rid),
883 883 )
884 884
885 885 # Now create the tombstone delta and replace the delta on the censored
886 886 # node.
887 887 deltahash = hashutil.sha1(tombstone).digest()
888 888 tombstonedeltaid = insertdelta(
889 889 self._db, COMPRESSION_NONE, deltahash, tombstone
890 890 )
891 891
892 892 flags = self._revisions[censornode].flags
893 893 flags |= FLAG_CENSORED
894 894
895 895 self._db.execute(
896 896 'UPDATE fileindex SET flags=?, deltaid=?, deltabaseid=NULL '
897 897 'WHERE pathid=? AND node=?',
898 898 (flags, tombstonedeltaid, self._pathid, censornode),
899 899 )
900 900
901 901 self._db.execute('DELETE FROM delta WHERE id=?', (censoreddeltaid,))
902 902
903 903 self._refreshindex()
904 904 self._revisioncache.clear()
905 905
906 906 def getstrippoint(self, minlink):
907 907 return storageutil.resolvestripinfo(
908 908 minlink,
909 909 len(self) - 1,
910 910 [self.rev(n) for n in self.heads()],
911 911 self.linkrev,
912 912 self.parentrevs,
913 913 )
914 914
915 915 def strip(self, minlink, transaction):
916 916 if not len(self):
917 917 return
918 918
919 919 rev, _ignored = self.getstrippoint(minlink)
920 920
921 921 if rev == len(self):
922 922 return
923 923
924 924 for rev in self.revs(rev):
925 925 self._db.execute(
926 926 'DELETE FROM fileindex WHERE pathid=? AND node=?',
927 927 (self._pathid, self.node(rev)),
928 928 )
929 929
930 930 # TODO how should we garbage collect data in delta table?
931 931
932 932 self._refreshindex()
933 933
934 934 # End of ifilemutation interface.
935 935
936 936 # Start of ifilestorage interface.
937 937
938 938 def files(self):
939 939 return []
940 940
941 941 def sidedata(self, nodeorrev, _df=None):
942 942 # Not supported for now
943 943 return {}
944 944
945 945 def storageinfo(
946 946 self,
947 947 exclusivefiles=False,
948 948 sharedfiles=False,
949 949 revisionscount=False,
950 950 trackedsize=False,
951 951 storedsize=False,
952 952 ):
953 953 d = {}
954 954
955 955 if exclusivefiles:
956 956 d[b'exclusivefiles'] = []
957 957
958 958 if sharedfiles:
959 959 # TODO list sqlite file(s) here.
960 960 d[b'sharedfiles'] = []
961 961
962 962 if revisionscount:
963 963 d[b'revisionscount'] = len(self)
964 964
965 965 if trackedsize:
966 966 d[b'trackedsize'] = sum(
967 967 len(self.revision(node)) for node in self._nodetorev
968 968 )
969 969
970 970 if storedsize:
971 971 # TODO implement this?
972 972 d[b'storedsize'] = None
973 973
974 974 return d
975 975
976 976 def verifyintegrity(self, state):
977 977 state[b'skipread'] = set()
978 978
979 979 for rev in self:
980 980 node = self.node(rev)
981 981
982 982 try:
983 983 self.revision(node)
984 984 except Exception as e:
985 985 yield sqliteproblem(
986 986 error=_(b'unpacking %s: %s') % (short(node), e), node=node
987 987 )
988 988
989 989 state[b'skipread'].add(node)
990 990
991 991 # End of ifilestorage interface.
992 992
993 993 def _checkhash(self, fulltext, node, p1=None, p2=None):
994 994 if p1 is None and p2 is None:
995 995 p1, p2 = self.parents(node)
996 996
997 997 if node == storageutil.hashrevisionsha1(fulltext, p1, p2):
998 998 return
999 999
1000 1000 try:
1001 1001 del self._revisioncache[node]
1002 1002 except KeyError:
1003 1003 pass
1004 1004
1005 1005 if storageutil.iscensoredtext(fulltext):
1006 1006 raise error.CensoredNodeError(self._path, node, fulltext)
1007 1007
1008 1008 raise SQLiteStoreError(_(b'integrity check failed on %s') % self._path)
1009 1009
1010 1010 def _addrawrevision(
1011 1011 self,
1012 1012 node,
1013 1013 revisiondata,
1014 1014 transaction,
1015 1015 linkrev,
1016 1016 p1,
1017 1017 p2,
1018 1018 storedelta=None,
1019 1019 flags=0,
1020 1020 ):
1021 1021 if self._pathid is None:
1022 1022 res = self._db.execute(
1023 1023 'INSERT INTO filepath (path) VALUES (?)', (self._path,)
1024 1024 )
1025 1025 self._pathid = res.lastrowid
1026 1026
1027 1027 # For simplicity, always store a delta against p1.
1028 1028 # TODO we need a lot more logic here to make behavior reasonable.
1029 1029
1030 1030 if storedelta:
1031 1031 deltabase, delta = storedelta
1032 1032
1033 1033 if isinstance(deltabase, int):
1034 1034 deltabase = self.node(deltabase)
1035 1035
1036 1036 else:
1037 1037 assert revisiondata is not None
1038 1038 deltabase = p1
1039 1039
1040 1040 if deltabase == sha1nodeconstants.nullid:
1041 1041 delta = revisiondata
1042 1042 else:
1043 1043 delta = mdiff.textdiff(
1044 1044 self.revision(self.rev(deltabase)), revisiondata
1045 1045 )
1046 1046
1047 1047 # File index stores a pointer to its delta and the parent delta.
1048 1048 # The parent delta is stored via a pointer to the fileindex PK.
1049 1049 if deltabase == sha1nodeconstants.nullid:
1050 1050 baseid = None
1051 1051 else:
1052 1052 baseid = self._revisions[deltabase].rid
1053 1053
1054 1054 # Deltas are stored with a hash of their content. This allows
1055 1055 # us to de-duplicate. The table is configured to ignore conflicts
1056 1056 # and it is faster to just insert and silently noop than to look
1057 1057 # first.
1058 1058 deltahash = hashutil.sha1(delta).digest()
1059 1059
1060 1060 if self._compengine == b'zstd':
1061 1061 deltablob = self._cctx.compress(delta)
1062 1062 compression = COMPRESSION_ZSTD
1063 1063 elif self._compengine == b'zlib':
1064 1064 deltablob = zlib.compress(delta)
1065 1065 compression = COMPRESSION_ZLIB
1066 1066 elif self._compengine == b'none':
1067 1067 deltablob = delta
1068 1068 compression = COMPRESSION_NONE
1069 1069 else:
1070 1070 raise error.ProgrammingError(
1071 1071 b'unhandled compression engine: %s' % self._compengine
1072 1072 )
1073 1073
1074 1074 # Don't store compressed data if it isn't practical.
1075 1075 if len(deltablob) >= len(delta):
1076 1076 deltablob = delta
1077 1077 compression = COMPRESSION_NONE
1078 1078
1079 1079 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
1080 1080
1081 1081 rev = len(self)
1082 1082
1083 1083 if p1 == sha1nodeconstants.nullid:
1084 1084 p1rev = nullrev
1085 1085 else:
1086 1086 p1rev = self._nodetorev[p1]
1087 1087
1088 1088 if p2 == sha1nodeconstants.nullid:
1089 1089 p2rev = nullrev
1090 1090 else:
1091 1091 p2rev = self._nodetorev[p2]
1092 1092
1093 1093 rid = self._db.execute(
1094 1094 'INSERT INTO fileindex ('
1095 1095 ' pathid, revnum, node, p1rev, p2rev, linkrev, flags, '
1096 1096 ' deltaid, deltabaseid) '
1097 1097 ' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)',
1098 1098 (
1099 1099 self._pathid,
1100 1100 rev,
1101 1101 node,
1102 1102 p1rev,
1103 1103 p2rev,
1104 1104 linkrev,
1105 1105 flags,
1106 1106 deltaid,
1107 1107 baseid,
1108 1108 ),
1109 1109 ).lastrowid
1110 1110
1111 1111 entry = revisionentry(
1112 1112 rid=rid,
1113 1113 rev=rev,
1114 1114 node=node,
1115 1115 p1rev=p1rev,
1116 1116 p2rev=p2rev,
1117 1117 p1node=p1,
1118 1118 p2node=p2,
1119 1119 linkrev=linkrev,
1120 1120 flags=flags,
1121 1121 )
1122 1122
1123 1123 self._nodetorev[node] = rev
1124 1124 self._revtonode[rev] = node
1125 1125 self._revisions[node] = entry
1126 1126
1127 1127 return rev
1128 1128
1129 1129
1130 1130 class sqliterepository(localrepo.localrepository):
1131 1131 def cancopy(self):
1132 1132 return False
1133 1133
1134 1134 def transaction(self, *args, **kwargs):
1135 1135 current = self.currenttransaction()
1136 1136
1137 1137 tr = super(sqliterepository, self).transaction(*args, **kwargs)
1138 1138
1139 1139 if current:
1140 1140 return tr
1141 1141
1142 1142 self._dbconn.execute('BEGIN TRANSACTION')
1143 1143
1144 1144 def committransaction(_):
1145 1145 self._dbconn.commit()
1146 1146
1147 1147 tr.addfinalize(b'sqlitestore', committransaction)
1148 1148
1149 1149 return tr
1150 1150
1151 1151 @property
1152 1152 def _dbconn(self):
1153 1153 # SQLite connections can only be used on the thread that created
1154 1154 # them. In most cases, this "just works." However, hgweb uses
1155 1155 # multiple threads.
1156 1156 tid = threading.current_thread().ident
1157 1157
1158 1158 if self._db:
1159 1159 if self._db[0] == tid:
1160 1160 return self._db[1]
1161 1161
1162 1162 db = makedb(self.svfs.join(b'db.sqlite'))
1163 1163 self._db = (tid, db)
1164 1164
1165 1165 return db
1166 1166
1167 1167
1168 1168 def makedb(path):
1169 1169 """Construct a database handle for a database at path."""
1170 1170
1171 1171 db = sqlite3.connect(encoding.strfromlocal(path))
1172 1172 db.text_factory = bytes
1173 1173
1174 1174 res = db.execute('PRAGMA user_version').fetchone()[0]
1175 1175
1176 1176 # New database.
1177 1177 if res == 0:
1178 1178 for statement in CREATE_SCHEMA:
1179 1179 db.execute(statement)
1180 1180
1181 1181 db.commit()
1182 1182
1183 1183 elif res == CURRENT_SCHEMA_VERSION:
1184 1184 pass
1185 1185
1186 1186 else:
1187 1187 raise error.Abort(_(b'sqlite database has unrecognized version'))
1188 1188
1189 1189 db.execute('PRAGMA journal_mode=WAL')
1190 1190
1191 1191 return db
1192 1192
1193 1193
1194 1194 def featuresetup(ui, supported):
1195 1195 supported.add(REQUIREMENT)
1196 1196
1197 1197 if zstd:
1198 1198 supported.add(REQUIREMENT_ZSTD)
1199 1199
1200 1200 supported.add(REQUIREMENT_ZLIB)
1201 1201 supported.add(REQUIREMENT_NONE)
1202 1202 supported.add(REQUIREMENT_SHALLOW_FILES)
1203 1203 supported.add(requirements.NARROW_REQUIREMENT)
1204 1204
1205 1205
1206 1206 def newreporequirements(orig, ui, createopts):
1207 1207 if createopts[b'backend'] != b'sqlite':
1208 1208 return orig(ui, createopts)
1209 1209
1210 1210 # This restriction can be lifted once we have more confidence.
1211 1211 if b'sharedrepo' in createopts:
1212 1212 raise error.Abort(
1213 1213 _(b'shared repositories not supported with SQLite store')
1214 1214 )
1215 1215
1216 1216 # This filtering is out of an abundance of caution: we want to ensure
1217 1217 # we honor creation options and we do that by annotating exactly the
1218 1218 # creation options we recognize.
1219 1219 known = {
1220 1220 b'narrowfiles',
1221 1221 b'backend',
1222 1222 b'shallowfilestore',
1223 1223 }
1224 1224
1225 1225 unsupported = set(createopts) - known
1226 1226 if unsupported:
1227 1227 raise error.Abort(
1228 1228 _(b'SQLite store does not support repo creation option: %s')
1229 1229 % b', '.join(sorted(unsupported))
1230 1230 )
1231 1231
1232 1232 # Since we're a hybrid store that still relies on revlogs, we fall back
1233 1233 # to using the revlogv1 backend's storage requirements then adding our
1234 1234 # own requirement.
1235 1235 createopts[b'backend'] = b'revlogv1'
1236 1236 requirements = orig(ui, createopts)
1237 1237 requirements.add(REQUIREMENT)
1238 1238
1239 1239 compression = ui.config(b'storage', b'sqlite.compression')
1240 1240
1241 1241 if compression == b'zstd' and not zstd:
1242 1242 raise error.Abort(
1243 1243 _(
1244 1244 b'storage.sqlite.compression set to "zstd" but '
1245 1245 b'zstandard compression not available to this '
1246 1246 b'Mercurial install'
1247 1247 )
1248 1248 )
1249 1249
1250 1250 if compression == b'zstd':
1251 1251 requirements.add(REQUIREMENT_ZSTD)
1252 1252 elif compression == b'zlib':
1253 1253 requirements.add(REQUIREMENT_ZLIB)
1254 1254 elif compression == b'none':
1255 1255 requirements.add(REQUIREMENT_NONE)
1256 1256 else:
1257 1257 raise error.Abort(
1258 1258 _(
1259 1259 b'unknown compression engine defined in '
1260 1260 b'storage.sqlite.compression: %s'
1261 1261 )
1262 1262 % compression
1263 1263 )
1264 1264
1265 1265 if createopts.get(b'shallowfilestore'):
1266 1266 requirements.add(REQUIREMENT_SHALLOW_FILES)
1267 1267
1268 1268 return requirements
1269 1269
1270 1270
1271 1271 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1272 1272 class sqlitefilestorage:
1273 1273 """Repository file storage backed by SQLite."""
1274 1274
1275 1275 def file(self, path):
1276 1276 if path[0] == b'/':
1277 1277 path = path[1:]
1278 1278
1279 1279 if REQUIREMENT_ZSTD in self.requirements:
1280 1280 compression = b'zstd'
1281 1281 elif REQUIREMENT_ZLIB in self.requirements:
1282 1282 compression = b'zlib'
1283 1283 elif REQUIREMENT_NONE in self.requirements:
1284 1284 compression = b'none'
1285 1285 else:
1286 1286 raise error.Abort(
1287 1287 _(
1288 1288 b'unable to determine what compression engine '
1289 1289 b'to use for SQLite storage'
1290 1290 )
1291 1291 )
1292 1292
1293 1293 return sqlitefilestore(self._dbconn, path, compression)
1294 1294
1295 1295
1296 1296 def makefilestorage(orig, requirements, features, **kwargs):
1297 1297 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1298 1298 if REQUIREMENT in requirements:
1299 1299 if REQUIREMENT_SHALLOW_FILES in requirements:
1300 1300 features.add(repository.REPO_FEATURE_SHALLOW_FILE_STORAGE)
1301 1301
1302 1302 return sqlitefilestorage
1303 1303 else:
1304 1304 return orig(requirements=requirements, features=features, **kwargs)
1305 1305
1306 1306
1307 1307 def makemain(orig, ui, requirements, **kwargs):
1308 1308 if REQUIREMENT in requirements:
1309 1309 if REQUIREMENT_ZSTD in requirements and not zstd:
1310 1310 raise error.Abort(
1311 1311 _(
1312 1312 b'repository uses zstandard compression, which '
1313 1313 b'is not available to this Mercurial install'
1314 1314 )
1315 1315 )
1316 1316
1317 1317 return sqliterepository
1318 1318
1319 1319 return orig(requirements=requirements, **kwargs)
1320 1320
1321 1321
1322 1322 def verifierinit(orig, self, *args, **kwargs):
1323 1323 orig(self, *args, **kwargs)
1324 1324
1325 1325 # We don't care that files in the store don't align with what is
1326 1326 # advertised. So suppress these warnings.
1327 1327 self.warnorphanstorefiles = False
1328 1328
1329 1329
1330 1330 def extsetup(ui):
1331 1331 localrepo.featuresetupfuncs.add(featuresetup)
1332 1332 extensions.wrapfunction(
1333 1333 localrepo, b'newreporequirements', newreporequirements
1334 1334 )
1335 extensions.wrapfunction(localrepo, b'makefilestorage', makefilestorage)
1336 extensions.wrapfunction(localrepo, b'makemain', makemain)
1337 extensions.wrapfunction(verify.verifier, b'__init__', verifierinit)
1335 extensions.wrapfunction(localrepo, 'makefilestorage', makefilestorage)
1336 extensions.wrapfunction(localrepo, 'makemain', makemain)
1337 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
1338 1338
1339 1339
1340 1340 def reposetup(ui, repo):
1341 1341 if isinstance(repo, sqliterepository):
1342 1342 repo._db = None
1343 1343
1344 1344 # TODO check for bundlerepository?
General Comments 0
You need to be logged in to leave comments. Login now