##// END OF EJS Templates
filelog: declare that filelog implements a storage interface...
Gregory Szorc -
r37459:a3202fa8 default
parent child Browse files
Show More
@@ -1,139 +1,144 b''
1 1 # filelog.py - file history class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import re
11 11 import struct
12 12
13 from .thirdparty.zope import (
14 interface as zi,
15 )
13 16 from . import (
14 17 error,
15 18 mdiff,
19 repository,
16 20 revlog,
17 21 )
18 22
19 23 _mdre = re.compile('\1\n')
20 24 def parsemeta(text):
21 25 """return (metadatadict, metadatasize)"""
22 26 # text can be buffer, so we can't use .startswith or .index
23 27 if text[:2] != '\1\n':
24 28 return None, None
25 29 s = _mdre.search(text, 2).start()
26 30 mtext = text[2:s]
27 31 meta = {}
28 32 for l in mtext.splitlines():
29 33 k, v = l.split(": ", 1)
30 34 meta[k] = v
31 35 return meta, (s + 2)
32 36
33 37 def packmeta(meta, text):
34 38 keys = sorted(meta)
35 39 metatext = "".join("%s: %s\n" % (k, meta[k]) for k in keys)
36 40 return "\1\n%s\1\n%s" % (metatext, text)
37 41
38 42 def _censoredtext(text):
39 43 m, offs = parsemeta(text)
40 44 return m and "censored" in m
41 45
46 @zi.implementer(repository.ifilestorage)
42 47 class filelog(revlog.revlog):
43 48 def __init__(self, opener, path):
44 49 super(filelog, self).__init__(opener,
45 50 "/".join(("data", path + ".i")))
46 51 # full name of the user visible file, relative to the repository root
47 52 self.filename = path
48 53
49 54 def read(self, node):
50 55 t = self.revision(node)
51 56 if not t.startswith('\1\n'):
52 57 return t
53 58 s = t.index('\1\n', 2)
54 59 return t[s + 2:]
55 60
56 61 def add(self, text, meta, transaction, link, p1=None, p2=None):
57 62 if meta or text.startswith('\1\n'):
58 63 text = packmeta(meta, text)
59 64 return self.addrevision(text, transaction, link, p1, p2)
60 65
61 66 def renamed(self, node):
62 67 if self.parents(node)[0] != revlog.nullid:
63 68 return False
64 69 t = self.revision(node)
65 70 m = parsemeta(t)[0]
66 71 if m and "copy" in m:
67 72 return (m["copy"], revlog.bin(m["copyrev"]))
68 73 return False
69 74
70 75 def size(self, rev):
71 76 """return the size of a given revision"""
72 77
73 78 # for revisions with renames, we have to go the slow way
74 79 node = self.node(rev)
75 80 if self.renamed(node):
76 81 return len(self.read(node))
77 82 if self.iscensored(rev):
78 83 return 0
79 84
80 85 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
81 86 return super(filelog, self).size(rev)
82 87
83 88 def cmp(self, node, text):
84 89 """compare text with a given file revision
85 90
86 91 returns True if text is different than what is stored.
87 92 """
88 93
89 94 t = text
90 95 if text.startswith('\1\n'):
91 96 t = '\1\n\1\n' + text
92 97
93 98 samehashes = not super(filelog, self).cmp(node, t)
94 99 if samehashes:
95 100 return False
96 101
97 102 # censored files compare against the empty file
98 103 if self.iscensored(self.rev(node)):
99 104 return text != ''
100 105
101 106 # renaming a file produces a different hash, even if the data
102 107 # remains unchanged. Check if it's the case (slow):
103 108 if self.renamed(node):
104 109 t2 = self.read(node)
105 110 return t2 != text
106 111
107 112 return True
108 113
109 114 def checkhash(self, text, node, p1=None, p2=None, rev=None):
110 115 try:
111 116 super(filelog, self).checkhash(text, node, p1=p1, p2=p2, rev=rev)
112 117 except error.RevlogError:
113 118 if _censoredtext(text):
114 119 raise error.CensoredNodeError(self.indexfile, node, text)
115 120 raise
116 121
117 122 def iscensored(self, rev):
118 123 """Check if a file revision is censored."""
119 124 return self.flags(rev) & revlog.REVIDX_ISCENSORED
120 125
121 126 def _peek_iscensored(self, baserev, delta, flush):
122 127 """Quickly check if a delta produces a censored revision."""
123 128 # Fragile heuristic: unless new file meta keys are added alphabetically
124 129 # preceding "censored", all censored revisions are prefixed by
125 130 # "\1\ncensored:". A delta producing such a censored revision must be a
126 131 # full-replacement delta, so we inspect the first and only patch in the
127 132 # delta for this prefix.
128 133 hlen = struct.calcsize(">lll")
129 134 if len(delta) <= hlen:
130 135 return False
131 136
132 137 oldlen = self.rawsize(baserev)
133 138 newlen = len(delta) - hlen
134 139 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
135 140 return False
136 141
137 142 add = "\1\ncensored:"
138 143 addlen = len(add)
139 144 return newlen >= addlen and delta[hlen:hlen + addlen] == add
@@ -1,685 +1,690 b''
1 1 # simplestorerepo.py - Extension that swaps in alternate repository storage.
2 2 #
3 3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 # To use this with the test suite:
9 9 #
10 10 # $ HGREPOFEATURES="simplestore" ./run-tests.py \
11 11 # --extra-config-opt extensions.simplestore=`pwd`/simplestorerepo.py
12 12
13 13 from __future__ import absolute_import
14 14
15 15 import stat
16 16
17 17 from mercurial.i18n import _
18 18 from mercurial.node import (
19 19 bin,
20 20 hex,
21 21 nullid,
22 22 nullrev,
23 23 )
24 24 from mercurial.thirdparty import (
25 25 cbor,
26 26 )
27 from mercurial.thirdparty.zope import (
28 interface as zi,
29 )
27 30 from mercurial import (
28 31 ancestor,
29 32 bundlerepo,
30 33 error,
31 34 extensions,
32 35 filelog,
33 36 localrepo,
34 37 mdiff,
35 38 pycompat,
39 repository,
36 40 revlog,
37 41 store,
38 42 verify,
39 43 )
40 44
41 45 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
42 46 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
43 47 # be specifying the version(s) of Mercurial they are tested with, or
44 48 # leave the attribute unspecified.
45 49 testedwith = 'ships-with-hg-core'
46 50
47 51 REQUIREMENT = 'testonly-simplestore'
48 52
49 53 def validatenode(node):
50 54 if isinstance(node, int):
51 55 raise ValueError('expected node; got int')
52 56
53 57 if len(node) != 20:
54 58 raise ValueError('expected 20 byte node')
55 59
56 60 def validaterev(rev):
57 61 if not isinstance(rev, int):
58 62 raise ValueError('expected int')
59 63
64 @zi.implementer(repository.ifilestorage)
60 65 class filestorage(object):
61 66 """Implements storage for a tracked path.
62 67
63 68 Data is stored in the VFS in a directory corresponding to the tracked
64 69 path.
65 70
66 71 Index data is stored in an ``index`` file using CBOR.
67 72
68 73 Fulltext data is stored in files having names of the node.
69 74 """
70 75
71 76 def __init__(self, svfs, path):
72 77 self._svfs = svfs
73 78 self._path = path
74 79
75 80 self._storepath = b'/'.join([b'data', path])
76 81 self._indexpath = b'/'.join([self._storepath, b'index'])
77 82
78 83 indexdata = self._svfs.tryread(self._indexpath)
79 84 if indexdata:
80 85 indexdata = cbor.loads(indexdata)
81 86
82 87 self._indexdata = indexdata or []
83 88 self._indexbynode = {}
84 89 self._indexbyrev = {}
85 90 self.index = []
86 91 self._refreshindex()
87 92
88 93 # This is used by changegroup code :/
89 94 self._generaldelta = True
90 95 self.storedeltachains = False
91 96
92 97 self.version = 1
93 98
94 99 def _refreshindex(self):
95 100 self._indexbynode.clear()
96 101 self._indexbyrev.clear()
97 102 self.index = []
98 103
99 104 for i, entry in enumerate(self._indexdata):
100 105 self._indexbynode[entry[b'node']] = entry
101 106 self._indexbyrev[i] = entry
102 107
103 108 self._indexbynode[nullid] = {
104 109 b'node': nullid,
105 110 b'p1': nullid,
106 111 b'p2': nullid,
107 112 b'linkrev': nullrev,
108 113 b'flags': 0,
109 114 }
110 115
111 116 self._indexbyrev[nullrev] = {
112 117 b'node': nullid,
113 118 b'p1': nullid,
114 119 b'p2': nullid,
115 120 b'linkrev': nullrev,
116 121 b'flags': 0,
117 122 }
118 123
119 124 for i, entry in enumerate(self._indexdata):
120 125 p1rev, p2rev = self.parentrevs(self.rev(entry[b'node']))
121 126
122 127 # start, length, rawsize, chainbase, linkrev, p1, p2, node
123 128 self.index.append((0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev,
124 129 entry[b'node']))
125 130
126 131 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
127 132
128 133 def __len__(self):
129 134 return len(self._indexdata)
130 135
131 136 def __iter__(self):
132 137 return iter(range(len(self)))
133 138
134 139 def revs(self, start=0, stop=None):
135 140 step = 1
136 141 if stop is not None:
137 142 if start > stop:
138 143 step = -1
139 144
140 145 stop += step
141 146 else:
142 147 stop = len(self)
143 148
144 149 return range(start, stop, step)
145 150
146 151 def parents(self, node):
147 152 validatenode(node)
148 153
149 154 if node not in self._indexbynode:
150 155 raise KeyError('unknown node')
151 156
152 157 entry = self._indexbynode[node]
153 158
154 159 return entry[b'p1'], entry[b'p2']
155 160
156 161 def parentrevs(self, rev):
157 162 p1, p2 = self.parents(self._indexbyrev[rev][b'node'])
158 163 return self.rev(p1), self.rev(p2)
159 164
160 165 def rev(self, node):
161 166 validatenode(node)
162 167
163 168 try:
164 169 self._indexbynode[node]
165 170 except KeyError:
166 171 raise error.LookupError(node, self._indexpath, _('no node'))
167 172
168 173 for rev, entry in self._indexbyrev.items():
169 174 if entry[b'node'] == node:
170 175 return rev
171 176
172 177 raise error.ProgrammingError('this should not occur')
173 178
174 179 def node(self, rev):
175 180 validaterev(rev)
176 181
177 182 return self._indexbyrev[rev][b'node']
178 183
179 184 def lookup(self, node):
180 185 if isinstance(node, int):
181 186 return self.node(node)
182 187
183 188 if len(node) == 20:
184 189 self.rev(node)
185 190 return node
186 191
187 192 try:
188 193 rev = int(node)
189 194 if '%d' % rev != node:
190 195 raise ValueError
191 196
192 197 if rev < 0:
193 198 rev = len(self) + rev
194 199 if rev < 0 or rev >= len(self):
195 200 raise ValueError
196 201
197 202 return self.node(rev)
198 203 except (ValueError, OverflowError):
199 204 pass
200 205
201 206 if len(node) == 40:
202 207 try:
203 208 rawnode = bin(node)
204 209 self.rev(rawnode)
205 210 return rawnode
206 211 except TypeError:
207 212 pass
208 213
209 214 raise error.LookupError(node, self._path, _('invalid lookup input'))
210 215
211 216 def linkrev(self, rev):
212 217 validaterev(rev)
213 218
214 219 return self._indexbyrev[rev][b'linkrev']
215 220
216 221 def flags(self, rev):
217 222 validaterev(rev)
218 223
219 224 return self._indexbyrev[rev][b'flags']
220 225
221 226 def deltaparent(self, rev):
222 227 validaterev(rev)
223 228
224 229 p1node = self.parents(self.node(rev))[0]
225 230 return self.rev(p1node)
226 231
227 232 def candelta(self, baserev, rev):
228 233 validaterev(baserev)
229 234 validaterev(rev)
230 235
231 236 if ((self.flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)
232 237 or (self.flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)):
233 238 return False
234 239
235 240 return True
236 241
237 242 def rawsize(self, rev):
238 243 validaterev(rev)
239 244 node = self.node(rev)
240 245 return len(self.revision(node, raw=True))
241 246
242 247 def _processflags(self, text, flags, operation, raw=False):
243 248 if flags == 0:
244 249 return text, True
245 250
246 251 if flags & ~revlog.REVIDX_KNOWN_FLAGS:
247 252 raise error.RevlogError(_("incompatible revision flag '%#x'") %
248 253 (flags & ~revlog.REVIDX_KNOWN_FLAGS))
249 254
250 255 validatehash = True
251 256 # Depending on the operation (read or write), the order might be
252 257 # reversed due to non-commutative transforms.
253 258 orderedflags = revlog.REVIDX_FLAGS_ORDER
254 259 if operation == 'write':
255 260 orderedflags = reversed(orderedflags)
256 261
257 262 for flag in orderedflags:
258 263 # If a flagprocessor has been registered for a known flag, apply the
259 264 # related operation transform and update result tuple.
260 265 if flag & flags:
261 266 vhash = True
262 267
263 268 if flag not in revlog._flagprocessors:
264 269 message = _("missing processor for flag '%#x'") % (flag)
265 270 raise revlog.RevlogError(message)
266 271
267 272 processor = revlog._flagprocessors[flag]
268 273 if processor is not None:
269 274 readtransform, writetransform, rawtransform = processor
270 275
271 276 if raw:
272 277 vhash = rawtransform(self, text)
273 278 elif operation == 'read':
274 279 text, vhash = readtransform(self, text)
275 280 else: # write operation
276 281 text, vhash = writetransform(self, text)
277 282 validatehash = validatehash and vhash
278 283
279 284 return text, validatehash
280 285
281 286 def checkhash(self, text, node, p1=None, p2=None, rev=None):
282 287 if p1 is None and p2 is None:
283 288 p1, p2 = self.parents(node)
284 289 if node != revlog.hash(text, p1, p2):
285 290 raise error.RevlogError(_("integrity check failed on %s") %
286 291 self._path)
287 292
288 293 def revision(self, node, raw=False):
289 294 validatenode(node)
290 295
291 296 if node == nullid:
292 297 return b''
293 298
294 299 rev = self.rev(node)
295 300 flags = self.flags(rev)
296 301
297 302 path = b'/'.join([self._storepath, hex(node)])
298 303 rawtext = self._svfs.read(path)
299 304
300 305 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
301 306 if validatehash:
302 307 self.checkhash(text, node, rev=rev)
303 308
304 309 return text
305 310
306 311 def read(self, node):
307 312 validatenode(node)
308 313
309 314 revision = self.revision(node)
310 315
311 316 if not revision.startswith(b'\1\n'):
312 317 return revision
313 318
314 319 start = revision.index(b'\1\n', 2)
315 320 return revision[start + 2:]
316 321
317 322 def renamed(self, node):
318 323 validatenode(node)
319 324
320 325 if self.parents(node)[0] != nullid:
321 326 return False
322 327
323 328 fulltext = self.revision(node)
324 329 m = filelog.parsemeta(fulltext)[0]
325 330
326 331 if m and 'copy' in m:
327 332 return m['copy'], bin(m['copyrev'])
328 333
329 334 return False
330 335
331 336 def cmp(self, node, text):
332 337 validatenode(node)
333 338
334 339 t = text
335 340
336 341 if text.startswith(b'\1\n'):
337 342 t = b'\1\n\1\n' + text
338 343
339 344 p1, p2 = self.parents(node)
340 345
341 346 if revlog.hash(t, p1, p2) == node:
342 347 return False
343 348
344 349 if self.iscensored(self.rev(node)):
345 350 return text != b''
346 351
347 352 if self.renamed(node):
348 353 t2 = self.read(node)
349 354 return t2 != text
350 355
351 356 return True
352 357
353 358 def size(self, rev):
354 359 validaterev(rev)
355 360
356 361 node = self._indexbyrev[rev][b'node']
357 362
358 363 if self.renamed(node):
359 364 return len(self.read(node))
360 365
361 366 if self.iscensored(rev):
362 367 return 0
363 368
364 369 return len(self.revision(node))
365 370
366 371 def iscensored(self, rev):
367 372 validaterev(rev)
368 373
369 374 return self.flags(rev) & revlog.REVIDX_ISCENSORED
370 375
371 376 def commonancestorsheads(self, a, b):
372 377 validatenode(a)
373 378 validatenode(b)
374 379
375 380 a = self.rev(a)
376 381 b = self.rev(b)
377 382
378 383 ancestors = ancestor.commonancestorsheads(self.parentrevs, a, b)
379 384 return pycompat.maplist(self.node, ancestors)
380 385
381 386 def descendants(self, revs):
382 387 # This is a copy of revlog.descendants()
383 388 first = min(revs)
384 389 if first == nullrev:
385 390 for i in self:
386 391 yield i
387 392 return
388 393
389 394 seen = set(revs)
390 395 for i in self.revs(start=first + 1):
391 396 for x in self.parentrevs(i):
392 397 if x != nullrev and x in seen:
393 398 seen.add(i)
394 399 yield i
395 400 break
396 401
397 402 # Required by verify.
398 403 def files(self):
399 404 entries = self._svfs.listdir(self._storepath)
400 405
401 406 # Strip out undo.backup.* files created as part of transaction
402 407 # recording.
403 408 entries = [f for f in entries if not f.startswith('undo.backup.')]
404 409
405 410 return [b'/'.join((self._storepath, f)) for f in entries]
406 411
407 412 # Required by verify.
408 413 def checksize(self):
409 414 return 0, 0
410 415
411 416 def add(self, text, meta, transaction, linkrev, p1, p2):
412 417 if meta or text.startswith(b'\1\n'):
413 418 text = filelog.packmeta(meta, text)
414 419
415 420 return self.addrevision(text, transaction, linkrev, p1, p2)
416 421
417 422 def addrevision(self, text, transaction, linkrev, p1, p2, node=None,
418 423 flags=revlog.REVIDX_DEFAULT_FLAGS, cachedelta=None):
419 424 validatenode(p1)
420 425 validatenode(p2)
421 426
422 427 if flags:
423 428 node = node or revlog.hash(text, p1, p2)
424 429
425 430 rawtext, validatehash = self._processflags(text, flags, 'write')
426 431
427 432 node = node or revlog.hash(text, p1, p2)
428 433
429 434 if node in self._indexbynode:
430 435 return node
431 436
432 437 if validatehash:
433 438 self.checkhash(rawtext, node, p1=p1, p2=p2)
434 439
435 440 return self._addrawrevision(node, rawtext, transaction, linkrev, p1, p2,
436 441 flags)
437 442
438 443 def _addrawrevision(self, node, rawtext, transaction, link, p1, p2, flags):
439 444 transaction.addbackup(self._indexpath)
440 445
441 446 path = b'/'.join([self._storepath, hex(node)])
442 447
443 448 self._svfs.write(path, rawtext)
444 449
445 450 self._indexdata.append({
446 451 b'node': node,
447 452 b'p1': p1,
448 453 b'p2': p2,
449 454 b'linkrev': link,
450 455 b'flags': flags,
451 456 })
452 457
453 458 self._reflectindexupdate()
454 459
455 460 return node
456 461
457 462 def _reflectindexupdate(self):
458 463 self._refreshindex()
459 464 self._svfs.write(self._indexpath, cbor.dumps(self._indexdata))
460 465
461 466 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
462 467 nodes = []
463 468
464 469 transaction.addbackup(self._indexpath)
465 470
466 471 for node, p1, p2, linknode, deltabase, delta, flags in deltas:
467 472 linkrev = linkmapper(linknode)
468 473 flags = flags or revlog.REVIDX_DEFAULT_FLAGS
469 474
470 475 nodes.append(node)
471 476
472 477 if node in self._indexbynode:
473 478 continue
474 479
475 480 # Need to resolve the fulltext from the delta base.
476 481 if deltabase == nullid:
477 482 text = mdiff.patch(b'', delta)
478 483 else:
479 484 text = mdiff.patch(self.revision(deltabase), delta)
480 485
481 486 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
482 487 flags)
483 488
484 489 if addrevisioncb:
485 490 addrevisioncb(self, node)
486 491
487 492 return nodes
488 493
489 494 def revdiff(self, rev1, rev2):
490 495 validaterev(rev1)
491 496 validaterev(rev2)
492 497
493 498 node1 = self.node(rev1)
494 499 node2 = self.node(rev2)
495 500
496 501 return mdiff.textdiff(self.revision(node1, raw=True),
497 502 self.revision(node2, raw=True))
498 503
499 504 def headrevs(self):
500 505 # Assume all revisions are heads by default.
501 506 revishead = {rev: True for rev in self._indexbyrev}
502 507
503 508 for rev, entry in self._indexbyrev.items():
504 509 # Unset head flag for all seen parents.
505 510 revishead[self.rev(entry[b'p1'])] = False
506 511 revishead[self.rev(entry[b'p2'])] = False
507 512
508 513 return [rev for rev, ishead in sorted(revishead.items())
509 514 if ishead]
510 515
511 516 def heads(self, start=None, stop=None):
512 517 # This is copied from revlog.py.
513 518 if start is None and stop is None:
514 519 if not len(self):
515 520 return [nullid]
516 521 return [self.node(r) for r in self.headrevs()]
517 522
518 523 if start is None:
519 524 start = nullid
520 525 if stop is None:
521 526 stop = []
522 527 stoprevs = set([self.rev(n) for n in stop])
523 528 startrev = self.rev(start)
524 529 reachable = {startrev}
525 530 heads = {startrev}
526 531
527 532 parentrevs = self.parentrevs
528 533 for r in self.revs(start=startrev + 1):
529 534 for p in parentrevs(r):
530 535 if p in reachable:
531 536 if r not in stoprevs:
532 537 reachable.add(r)
533 538 heads.add(r)
534 539 if p in heads and p not in stoprevs:
535 540 heads.remove(p)
536 541
537 542 return [self.node(r) for r in heads]
538 543
539 544 def children(self, node):
540 545 validatenode(node)
541 546
542 547 # This is a copy of revlog.children().
543 548 c = []
544 549 p = self.rev(node)
545 550 for r in self.revs(start=p + 1):
546 551 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
547 552 if prevs:
548 553 for pr in prevs:
549 554 if pr == p:
550 555 c.append(self.node(r))
551 556 elif p == nullrev:
552 557 c.append(self.node(r))
553 558 return c
554 559
555 560 def getstrippoint(self, minlink):
556 561
557 562 # This is largely a copy of revlog.getstrippoint().
558 563 brokenrevs = set()
559 564 strippoint = len(self)
560 565
561 566 heads = {}
562 567 futurelargelinkrevs = set()
563 568 for head in self.headrevs():
564 569 headlinkrev = self.linkrev(head)
565 570 heads[head] = headlinkrev
566 571 if headlinkrev >= minlink:
567 572 futurelargelinkrevs.add(headlinkrev)
568 573
569 574 # This algorithm involves walking down the rev graph, starting at the
570 575 # heads. Since the revs are topologically sorted according to linkrev,
571 576 # once all head linkrevs are below the minlink, we know there are
572 577 # no more revs that could have a linkrev greater than minlink.
573 578 # So we can stop walking.
574 579 while futurelargelinkrevs:
575 580 strippoint -= 1
576 581 linkrev = heads.pop(strippoint)
577 582
578 583 if linkrev < minlink:
579 584 brokenrevs.add(strippoint)
580 585 else:
581 586 futurelargelinkrevs.remove(linkrev)
582 587
583 588 for p in self.parentrevs(strippoint):
584 589 if p != nullrev:
585 590 plinkrev = self.linkrev(p)
586 591 heads[p] = plinkrev
587 592 if plinkrev >= minlink:
588 593 futurelargelinkrevs.add(plinkrev)
589 594
590 595 return strippoint, brokenrevs
591 596
592 597 def strip(self, minlink, transaction):
593 598 if not len(self):
594 599 return
595 600
596 601 rev, _ignored = self.getstrippoint(minlink)
597 602 if rev == len(self):
598 603 return
599 604
600 605 # Purge index data starting at the requested revision.
601 606 self._indexdata[rev:] = []
602 607 self._reflectindexupdate()
603 608
604 609 def issimplestorefile(f, kind, st):
605 610 if kind != stat.S_IFREG:
606 611 return False
607 612
608 613 if store.isrevlog(f, kind, st):
609 614 return False
610 615
611 616 # Ignore transaction undo files.
612 617 if f.startswith('undo.'):
613 618 return False
614 619
615 620 # Otherwise assume it belongs to the simple store.
616 621 return True
617 622
618 623 class simplestore(store.encodedstore):
619 624 def datafiles(self):
620 625 for x in super(simplestore, self).datafiles():
621 626 yield x
622 627
623 628 # Supplement with non-revlog files.
624 629 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
625 630
626 631 for unencoded, encoded, size in extrafiles:
627 632 try:
628 633 unencoded = store.decodefilename(unencoded)
629 634 except KeyError:
630 635 unencoded = None
631 636
632 637 yield unencoded, encoded, size
633 638
634 639 def reposetup(ui, repo):
635 640 if not repo.local():
636 641 return
637 642
638 643 if isinstance(repo, bundlerepo.bundlerepository):
639 644 raise error.Abort(_('cannot use simple store with bundlerepo'))
640 645
641 646 class simplestorerepo(repo.__class__):
642 647 def file(self, f):
643 648 return filestorage(self.svfs, f)
644 649
645 650 repo.__class__ = simplestorerepo
646 651
647 652 def featuresetup(ui, supported):
648 653 supported.add(REQUIREMENT)
649 654
650 655 def newreporequirements(orig, repo):
651 656 """Modifies default requirements for new repos to use the simple store."""
652 657 requirements = orig(repo)
653 658
654 659 # These requirements are only used to affect creation of the store
655 660 # object. We have our own store. So we can remove them.
656 661 # TODO do this once we feel like taking the test hit.
657 662 #if 'fncache' in requirements:
658 663 # requirements.remove('fncache')
659 664 #if 'dotencode' in requirements:
660 665 # requirements.remove('dotencode')
661 666
662 667 requirements.add(REQUIREMENT)
663 668
664 669 return requirements
665 670
666 671 def makestore(orig, requirements, path, vfstype):
667 672 if REQUIREMENT not in requirements:
668 673 return orig(requirements, path, vfstype)
669 674
670 675 return simplestore(path, vfstype)
671 676
672 677 def verifierinit(orig, self, *args, **kwargs):
673 678 orig(self, *args, **kwargs)
674 679
675 680 # We don't care that files in the store don't align with what is
676 681 # advertised. So suppress these warnings.
677 682 self.warnorphanstorefiles = False
678 683
679 684 def extsetup(ui):
680 685 localrepo.featuresetupfuncs.add(featuresetup)
681 686
682 687 extensions.wrapfunction(localrepo, 'newreporequirements',
683 688 newreporequirements)
684 689 extensions.wrapfunction(store, 'store', makestore)
685 690 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
@@ -1,135 +1,146 b''
1 1 # Test that certain objects conform to well-defined interfaces.
2 2
3 3 from __future__ import absolute_import, print_function
4 4
5 5 import os
6 6
7 7 from mercurial.thirdparty.zope import (
8 8 interface as zi,
9 9 )
10 10 from mercurial.thirdparty.zope.interface import (
11 11 verify as ziverify,
12 12 )
13 13 from mercurial import (
14 14 bundlerepo,
15 filelog,
15 16 httppeer,
16 17 localrepo,
17 18 repository,
18 19 sshpeer,
19 20 statichttprepo,
20 21 ui as uimod,
21 22 unionrepo,
23 vfs as vfsmod,
22 24 wireprotoserver,
23 25 wireprototypes,
24 26 )
25 27
26 28 rootdir = os.path.normpath(os.path.join(os.path.dirname(__file__), '..'))
27 29
28 def checkzobject(o):
30 def checkzobject(o, allowextra=False):
29 31 """Verify an object with a zope interface."""
30 32 ifaces = zi.providedBy(o)
31 33 if not ifaces:
32 34 print('%r does not provide any zope interfaces' % o)
33 35 return
34 36
35 37 # Run zope.interface's built-in verification routine. This verifies that
36 38 # everything that is supposed to be present is present.
37 39 for iface in ifaces:
38 40 ziverify.verifyObject(iface, o)
39 41
42 if allowextra:
43 return
44
40 45 # Now verify that the object provides no extra public attributes that
41 46 # aren't declared as part of interfaces.
42 47 allowed = set()
43 48 for iface in ifaces:
44 49 allowed |= set(iface.names(all=True))
45 50
46 51 public = {a for a in dir(o) if not a.startswith('_')}
47 52
48 53 for attr in sorted(public - allowed):
49 54 print('public attribute not declared in interfaces: %s.%s' % (
50 55 o.__class__.__name__, attr))
51 56
52 57 # Facilitates testing localpeer.
53 58 class dummyrepo(object):
54 59 def __init__(self):
55 60 self.ui = uimod.ui()
56 61 def filtered(self, name):
57 62 pass
58 63 def _restrictcapabilities(self, caps):
59 64 pass
60 65
61 66 class dummyopener(object):
62 67 handlers = []
63 68
64 69 # Facilitates testing sshpeer without requiring an SSH server.
65 70 class badpeer(httppeer.httppeer):
66 71 def __init__(self):
67 72 super(badpeer, self).__init__(None, None, None, dummyopener())
68 73 self.badattribute = True
69 74
70 75 def badmethod(self):
71 76 pass
72 77
73 78 class dummypipe(object):
74 79 def close(self):
75 80 pass
76 81
77 82 def main():
78 83 ui = uimod.ui()
79 84 # Needed so we can open a local repo with obsstore without a warning.
80 85 ui.setconfig('experimental', 'evolution.createmarkers', True)
81 86
82 87 checkzobject(badpeer())
83 88
84 89 ziverify.verifyClass(repository.ipeerbaselegacycommands,
85 90 httppeer.httppeer)
86 91 checkzobject(httppeer.httppeer(None, None, None, dummyopener()))
87 92
88 93 ziverify.verifyClass(repository.ipeerbase,
89 94 localrepo.localpeer)
90 95 checkzobject(localrepo.localpeer(dummyrepo()))
91 96
92 97 ziverify.verifyClass(repository.ipeerbaselegacycommands,
93 98 sshpeer.sshv1peer)
94 99 checkzobject(sshpeer.sshv1peer(ui, 'ssh://localhost/foo', None, dummypipe(),
95 100 dummypipe(), None, None))
96 101
97 102 ziverify.verifyClass(repository.ipeerbaselegacycommands,
98 103 sshpeer.sshv2peer)
99 104 checkzobject(sshpeer.sshv2peer(ui, 'ssh://localhost/foo', None, dummypipe(),
100 105 dummypipe(), None, None))
101 106
102 107 ziverify.verifyClass(repository.ipeerbase, bundlerepo.bundlepeer)
103 108 checkzobject(bundlerepo.bundlepeer(dummyrepo()))
104 109
105 110 ziverify.verifyClass(repository.ipeerbase, statichttprepo.statichttppeer)
106 111 checkzobject(statichttprepo.statichttppeer(dummyrepo()))
107 112
108 113 ziverify.verifyClass(repository.ipeerbase, unionrepo.unionpeer)
109 114 checkzobject(unionrepo.unionpeer(dummyrepo()))
110 115
111 116 ziverify.verifyClass(repository.completelocalrepository,
112 117 localrepo.localrepository)
113 118 repo = localrepo.localrepository(ui, rootdir)
114 119 checkzobject(repo)
115 120
116 121 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
117 122 wireprotoserver.sshv1protocolhandler)
118 123 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
119 124 wireprotoserver.sshv2protocolhandler)
120 125 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
121 126 wireprotoserver.httpv1protocolhandler)
122 127 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
123 128 wireprotoserver.httpv2protocolhandler)
124 129
125 130 sshv1 = wireprotoserver.sshv1protocolhandler(None, None, None)
126 131 checkzobject(sshv1)
127 132 sshv2 = wireprotoserver.sshv2protocolhandler(None, None, None)
128 133 checkzobject(sshv2)
129 134
130 135 httpv1 = wireprotoserver.httpv1protocolhandler(None, None, None)
131 136 checkzobject(httpv1)
132 137 httpv2 = wireprotoserver.httpv2protocolhandler(None, None)
133 138 checkzobject(httpv2)
134 139
140 ziverify.verifyClass(repository.ifilestorage, filelog.filelog)
141
142 vfs = vfsmod.vfs('.')
143 fl = filelog.filelog(vfs, 'dummy.i')
144 checkzobject(fl, allowextra=True)
145
135 146 main()
General Comments 0
You need to be logged in to leave comments. Login now