##// END OF EJS Templates
filelog: declare that filelog implements a storage interface...
Gregory Szorc -
r37459:a3202fa8 default
parent child Browse files
Show More
@@ -1,139 +1,144 b''
1 # filelog.py - file history class for mercurial
1 # filelog.py - file history class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import re
10 import re
11 import struct
11 import struct
12
12
13 from .thirdparty.zope import (
14 interface as zi,
15 )
13 from . import (
16 from . import (
14 error,
17 error,
15 mdiff,
18 mdiff,
19 repository,
16 revlog,
20 revlog,
17 )
21 )
18
22
19 _mdre = re.compile('\1\n')
23 _mdre = re.compile('\1\n')
20 def parsemeta(text):
24 def parsemeta(text):
21 """return (metadatadict, metadatasize)"""
25 """return (metadatadict, metadatasize)"""
22 # text can be buffer, so we can't use .startswith or .index
26 # text can be buffer, so we can't use .startswith or .index
23 if text[:2] != '\1\n':
27 if text[:2] != '\1\n':
24 return None, None
28 return None, None
25 s = _mdre.search(text, 2).start()
29 s = _mdre.search(text, 2).start()
26 mtext = text[2:s]
30 mtext = text[2:s]
27 meta = {}
31 meta = {}
28 for l in mtext.splitlines():
32 for l in mtext.splitlines():
29 k, v = l.split(": ", 1)
33 k, v = l.split(": ", 1)
30 meta[k] = v
34 meta[k] = v
31 return meta, (s + 2)
35 return meta, (s + 2)
32
36
33 def packmeta(meta, text):
37 def packmeta(meta, text):
34 keys = sorted(meta)
38 keys = sorted(meta)
35 metatext = "".join("%s: %s\n" % (k, meta[k]) for k in keys)
39 metatext = "".join("%s: %s\n" % (k, meta[k]) for k in keys)
36 return "\1\n%s\1\n%s" % (metatext, text)
40 return "\1\n%s\1\n%s" % (metatext, text)
37
41
38 def _censoredtext(text):
42 def _censoredtext(text):
39 m, offs = parsemeta(text)
43 m, offs = parsemeta(text)
40 return m and "censored" in m
44 return m and "censored" in m
41
45
46 @zi.implementer(repository.ifilestorage)
42 class filelog(revlog.revlog):
47 class filelog(revlog.revlog):
43 def __init__(self, opener, path):
48 def __init__(self, opener, path):
44 super(filelog, self).__init__(opener,
49 super(filelog, self).__init__(opener,
45 "/".join(("data", path + ".i")))
50 "/".join(("data", path + ".i")))
46 # full name of the user visible file, relative to the repository root
51 # full name of the user visible file, relative to the repository root
47 self.filename = path
52 self.filename = path
48
53
49 def read(self, node):
54 def read(self, node):
50 t = self.revision(node)
55 t = self.revision(node)
51 if not t.startswith('\1\n'):
56 if not t.startswith('\1\n'):
52 return t
57 return t
53 s = t.index('\1\n', 2)
58 s = t.index('\1\n', 2)
54 return t[s + 2:]
59 return t[s + 2:]
55
60
56 def add(self, text, meta, transaction, link, p1=None, p2=None):
61 def add(self, text, meta, transaction, link, p1=None, p2=None):
57 if meta or text.startswith('\1\n'):
62 if meta or text.startswith('\1\n'):
58 text = packmeta(meta, text)
63 text = packmeta(meta, text)
59 return self.addrevision(text, transaction, link, p1, p2)
64 return self.addrevision(text, transaction, link, p1, p2)
60
65
61 def renamed(self, node):
66 def renamed(self, node):
62 if self.parents(node)[0] != revlog.nullid:
67 if self.parents(node)[0] != revlog.nullid:
63 return False
68 return False
64 t = self.revision(node)
69 t = self.revision(node)
65 m = parsemeta(t)[0]
70 m = parsemeta(t)[0]
66 if m and "copy" in m:
71 if m and "copy" in m:
67 return (m["copy"], revlog.bin(m["copyrev"]))
72 return (m["copy"], revlog.bin(m["copyrev"]))
68 return False
73 return False
69
74
70 def size(self, rev):
75 def size(self, rev):
71 """return the size of a given revision"""
76 """return the size of a given revision"""
72
77
73 # for revisions with renames, we have to go the slow way
78 # for revisions with renames, we have to go the slow way
74 node = self.node(rev)
79 node = self.node(rev)
75 if self.renamed(node):
80 if self.renamed(node):
76 return len(self.read(node))
81 return len(self.read(node))
77 if self.iscensored(rev):
82 if self.iscensored(rev):
78 return 0
83 return 0
79
84
80 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
85 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
81 return super(filelog, self).size(rev)
86 return super(filelog, self).size(rev)
82
87
83 def cmp(self, node, text):
88 def cmp(self, node, text):
84 """compare text with a given file revision
89 """compare text with a given file revision
85
90
86 returns True if text is different than what is stored.
91 returns True if text is different than what is stored.
87 """
92 """
88
93
89 t = text
94 t = text
90 if text.startswith('\1\n'):
95 if text.startswith('\1\n'):
91 t = '\1\n\1\n' + text
96 t = '\1\n\1\n' + text
92
97
93 samehashes = not super(filelog, self).cmp(node, t)
98 samehashes = not super(filelog, self).cmp(node, t)
94 if samehashes:
99 if samehashes:
95 return False
100 return False
96
101
97 # censored files compare against the empty file
102 # censored files compare against the empty file
98 if self.iscensored(self.rev(node)):
103 if self.iscensored(self.rev(node)):
99 return text != ''
104 return text != ''
100
105
101 # renaming a file produces a different hash, even if the data
106 # renaming a file produces a different hash, even if the data
102 # remains unchanged. Check if it's the case (slow):
107 # remains unchanged. Check if it's the case (slow):
103 if self.renamed(node):
108 if self.renamed(node):
104 t2 = self.read(node)
109 t2 = self.read(node)
105 return t2 != text
110 return t2 != text
106
111
107 return True
112 return True
108
113
109 def checkhash(self, text, node, p1=None, p2=None, rev=None):
114 def checkhash(self, text, node, p1=None, p2=None, rev=None):
110 try:
115 try:
111 super(filelog, self).checkhash(text, node, p1=p1, p2=p2, rev=rev)
116 super(filelog, self).checkhash(text, node, p1=p1, p2=p2, rev=rev)
112 except error.RevlogError:
117 except error.RevlogError:
113 if _censoredtext(text):
118 if _censoredtext(text):
114 raise error.CensoredNodeError(self.indexfile, node, text)
119 raise error.CensoredNodeError(self.indexfile, node, text)
115 raise
120 raise
116
121
117 def iscensored(self, rev):
122 def iscensored(self, rev):
118 """Check if a file revision is censored."""
123 """Check if a file revision is censored."""
119 return self.flags(rev) & revlog.REVIDX_ISCENSORED
124 return self.flags(rev) & revlog.REVIDX_ISCENSORED
120
125
121 def _peek_iscensored(self, baserev, delta, flush):
126 def _peek_iscensored(self, baserev, delta, flush):
122 """Quickly check if a delta produces a censored revision."""
127 """Quickly check if a delta produces a censored revision."""
123 # Fragile heuristic: unless new file meta keys are added alphabetically
128 # Fragile heuristic: unless new file meta keys are added alphabetically
124 # preceding "censored", all censored revisions are prefixed by
129 # preceding "censored", all censored revisions are prefixed by
125 # "\1\ncensored:". A delta producing such a censored revision must be a
130 # "\1\ncensored:". A delta producing such a censored revision must be a
126 # full-replacement delta, so we inspect the first and only patch in the
131 # full-replacement delta, so we inspect the first and only patch in the
127 # delta for this prefix.
132 # delta for this prefix.
128 hlen = struct.calcsize(">lll")
133 hlen = struct.calcsize(">lll")
129 if len(delta) <= hlen:
134 if len(delta) <= hlen:
130 return False
135 return False
131
136
132 oldlen = self.rawsize(baserev)
137 oldlen = self.rawsize(baserev)
133 newlen = len(delta) - hlen
138 newlen = len(delta) - hlen
134 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
139 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
135 return False
140 return False
136
141
137 add = "\1\ncensored:"
142 add = "\1\ncensored:"
138 addlen = len(add)
143 addlen = len(add)
139 return newlen >= addlen and delta[hlen:hlen + addlen] == add
144 return newlen >= addlen and delta[hlen:hlen + addlen] == add
@@ -1,685 +1,690 b''
1 # simplestorerepo.py - Extension that swaps in alternate repository storage.
1 # simplestorerepo.py - Extension that swaps in alternate repository storage.
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 # To use this with the test suite:
8 # To use this with the test suite:
9 #
9 #
10 # $ HGREPOFEATURES="simplestore" ./run-tests.py \
10 # $ HGREPOFEATURES="simplestore" ./run-tests.py \
11 # --extra-config-opt extensions.simplestore=`pwd`/simplestorerepo.py
11 # --extra-config-opt extensions.simplestore=`pwd`/simplestorerepo.py
12
12
13 from __future__ import absolute_import
13 from __future__ import absolute_import
14
14
15 import stat
15 import stat
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial.node import (
18 from mercurial.node import (
19 bin,
19 bin,
20 hex,
20 hex,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 )
23 )
24 from mercurial.thirdparty import (
24 from mercurial.thirdparty import (
25 cbor,
25 cbor,
26 )
26 )
27 from mercurial.thirdparty.zope import (
28 interface as zi,
29 )
27 from mercurial import (
30 from mercurial import (
28 ancestor,
31 ancestor,
29 bundlerepo,
32 bundlerepo,
30 error,
33 error,
31 extensions,
34 extensions,
32 filelog,
35 filelog,
33 localrepo,
36 localrepo,
34 mdiff,
37 mdiff,
35 pycompat,
38 pycompat,
39 repository,
36 revlog,
40 revlog,
37 store,
41 store,
38 verify,
42 verify,
39 )
43 )
40
44
41 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
45 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
42 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
46 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
43 # be specifying the version(s) of Mercurial they are tested with, or
47 # be specifying the version(s) of Mercurial they are tested with, or
44 # leave the attribute unspecified.
48 # leave the attribute unspecified.
45 testedwith = 'ships-with-hg-core'
49 testedwith = 'ships-with-hg-core'
46
50
47 REQUIREMENT = 'testonly-simplestore'
51 REQUIREMENT = 'testonly-simplestore'
48
52
49 def validatenode(node):
53 def validatenode(node):
50 if isinstance(node, int):
54 if isinstance(node, int):
51 raise ValueError('expected node; got int')
55 raise ValueError('expected node; got int')
52
56
53 if len(node) != 20:
57 if len(node) != 20:
54 raise ValueError('expected 20 byte node')
58 raise ValueError('expected 20 byte node')
55
59
56 def validaterev(rev):
60 def validaterev(rev):
57 if not isinstance(rev, int):
61 if not isinstance(rev, int):
58 raise ValueError('expected int')
62 raise ValueError('expected int')
59
63
64 @zi.implementer(repository.ifilestorage)
60 class filestorage(object):
65 class filestorage(object):
61 """Implements storage for a tracked path.
66 """Implements storage for a tracked path.
62
67
63 Data is stored in the VFS in a directory corresponding to the tracked
68 Data is stored in the VFS in a directory corresponding to the tracked
64 path.
69 path.
65
70
66 Index data is stored in an ``index`` file using CBOR.
71 Index data is stored in an ``index`` file using CBOR.
67
72
68 Fulltext data is stored in files having names of the node.
73 Fulltext data is stored in files having names of the node.
69 """
74 """
70
75
71 def __init__(self, svfs, path):
76 def __init__(self, svfs, path):
72 self._svfs = svfs
77 self._svfs = svfs
73 self._path = path
78 self._path = path
74
79
75 self._storepath = b'/'.join([b'data', path])
80 self._storepath = b'/'.join([b'data', path])
76 self._indexpath = b'/'.join([self._storepath, b'index'])
81 self._indexpath = b'/'.join([self._storepath, b'index'])
77
82
78 indexdata = self._svfs.tryread(self._indexpath)
83 indexdata = self._svfs.tryread(self._indexpath)
79 if indexdata:
84 if indexdata:
80 indexdata = cbor.loads(indexdata)
85 indexdata = cbor.loads(indexdata)
81
86
82 self._indexdata = indexdata or []
87 self._indexdata = indexdata or []
83 self._indexbynode = {}
88 self._indexbynode = {}
84 self._indexbyrev = {}
89 self._indexbyrev = {}
85 self.index = []
90 self.index = []
86 self._refreshindex()
91 self._refreshindex()
87
92
88 # This is used by changegroup code :/
93 # This is used by changegroup code :/
89 self._generaldelta = True
94 self._generaldelta = True
90 self.storedeltachains = False
95 self.storedeltachains = False
91
96
92 self.version = 1
97 self.version = 1
93
98
94 def _refreshindex(self):
99 def _refreshindex(self):
95 self._indexbynode.clear()
100 self._indexbynode.clear()
96 self._indexbyrev.clear()
101 self._indexbyrev.clear()
97 self.index = []
102 self.index = []
98
103
99 for i, entry in enumerate(self._indexdata):
104 for i, entry in enumerate(self._indexdata):
100 self._indexbynode[entry[b'node']] = entry
105 self._indexbynode[entry[b'node']] = entry
101 self._indexbyrev[i] = entry
106 self._indexbyrev[i] = entry
102
107
103 self._indexbynode[nullid] = {
108 self._indexbynode[nullid] = {
104 b'node': nullid,
109 b'node': nullid,
105 b'p1': nullid,
110 b'p1': nullid,
106 b'p2': nullid,
111 b'p2': nullid,
107 b'linkrev': nullrev,
112 b'linkrev': nullrev,
108 b'flags': 0,
113 b'flags': 0,
109 }
114 }
110
115
111 self._indexbyrev[nullrev] = {
116 self._indexbyrev[nullrev] = {
112 b'node': nullid,
117 b'node': nullid,
113 b'p1': nullid,
118 b'p1': nullid,
114 b'p2': nullid,
119 b'p2': nullid,
115 b'linkrev': nullrev,
120 b'linkrev': nullrev,
116 b'flags': 0,
121 b'flags': 0,
117 }
122 }
118
123
119 for i, entry in enumerate(self._indexdata):
124 for i, entry in enumerate(self._indexdata):
120 p1rev, p2rev = self.parentrevs(self.rev(entry[b'node']))
125 p1rev, p2rev = self.parentrevs(self.rev(entry[b'node']))
121
126
122 # start, length, rawsize, chainbase, linkrev, p1, p2, node
127 # start, length, rawsize, chainbase, linkrev, p1, p2, node
123 self.index.append((0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev,
128 self.index.append((0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev,
124 entry[b'node']))
129 entry[b'node']))
125
130
126 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
131 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
127
132
128 def __len__(self):
133 def __len__(self):
129 return len(self._indexdata)
134 return len(self._indexdata)
130
135
131 def __iter__(self):
136 def __iter__(self):
132 return iter(range(len(self)))
137 return iter(range(len(self)))
133
138
134 def revs(self, start=0, stop=None):
139 def revs(self, start=0, stop=None):
135 step = 1
140 step = 1
136 if stop is not None:
141 if stop is not None:
137 if start > stop:
142 if start > stop:
138 step = -1
143 step = -1
139
144
140 stop += step
145 stop += step
141 else:
146 else:
142 stop = len(self)
147 stop = len(self)
143
148
144 return range(start, stop, step)
149 return range(start, stop, step)
145
150
146 def parents(self, node):
151 def parents(self, node):
147 validatenode(node)
152 validatenode(node)
148
153
149 if node not in self._indexbynode:
154 if node not in self._indexbynode:
150 raise KeyError('unknown node')
155 raise KeyError('unknown node')
151
156
152 entry = self._indexbynode[node]
157 entry = self._indexbynode[node]
153
158
154 return entry[b'p1'], entry[b'p2']
159 return entry[b'p1'], entry[b'p2']
155
160
156 def parentrevs(self, rev):
161 def parentrevs(self, rev):
157 p1, p2 = self.parents(self._indexbyrev[rev][b'node'])
162 p1, p2 = self.parents(self._indexbyrev[rev][b'node'])
158 return self.rev(p1), self.rev(p2)
163 return self.rev(p1), self.rev(p2)
159
164
160 def rev(self, node):
165 def rev(self, node):
161 validatenode(node)
166 validatenode(node)
162
167
163 try:
168 try:
164 self._indexbynode[node]
169 self._indexbynode[node]
165 except KeyError:
170 except KeyError:
166 raise error.LookupError(node, self._indexpath, _('no node'))
171 raise error.LookupError(node, self._indexpath, _('no node'))
167
172
168 for rev, entry in self._indexbyrev.items():
173 for rev, entry in self._indexbyrev.items():
169 if entry[b'node'] == node:
174 if entry[b'node'] == node:
170 return rev
175 return rev
171
176
172 raise error.ProgrammingError('this should not occur')
177 raise error.ProgrammingError('this should not occur')
173
178
174 def node(self, rev):
179 def node(self, rev):
175 validaterev(rev)
180 validaterev(rev)
176
181
177 return self._indexbyrev[rev][b'node']
182 return self._indexbyrev[rev][b'node']
178
183
179 def lookup(self, node):
184 def lookup(self, node):
180 if isinstance(node, int):
185 if isinstance(node, int):
181 return self.node(node)
186 return self.node(node)
182
187
183 if len(node) == 20:
188 if len(node) == 20:
184 self.rev(node)
189 self.rev(node)
185 return node
190 return node
186
191
187 try:
192 try:
188 rev = int(node)
193 rev = int(node)
189 if '%d' % rev != node:
194 if '%d' % rev != node:
190 raise ValueError
195 raise ValueError
191
196
192 if rev < 0:
197 if rev < 0:
193 rev = len(self) + rev
198 rev = len(self) + rev
194 if rev < 0 or rev >= len(self):
199 if rev < 0 or rev >= len(self):
195 raise ValueError
200 raise ValueError
196
201
197 return self.node(rev)
202 return self.node(rev)
198 except (ValueError, OverflowError):
203 except (ValueError, OverflowError):
199 pass
204 pass
200
205
201 if len(node) == 40:
206 if len(node) == 40:
202 try:
207 try:
203 rawnode = bin(node)
208 rawnode = bin(node)
204 self.rev(rawnode)
209 self.rev(rawnode)
205 return rawnode
210 return rawnode
206 except TypeError:
211 except TypeError:
207 pass
212 pass
208
213
209 raise error.LookupError(node, self._path, _('invalid lookup input'))
214 raise error.LookupError(node, self._path, _('invalid lookup input'))
210
215
211 def linkrev(self, rev):
216 def linkrev(self, rev):
212 validaterev(rev)
217 validaterev(rev)
213
218
214 return self._indexbyrev[rev][b'linkrev']
219 return self._indexbyrev[rev][b'linkrev']
215
220
216 def flags(self, rev):
221 def flags(self, rev):
217 validaterev(rev)
222 validaterev(rev)
218
223
219 return self._indexbyrev[rev][b'flags']
224 return self._indexbyrev[rev][b'flags']
220
225
221 def deltaparent(self, rev):
226 def deltaparent(self, rev):
222 validaterev(rev)
227 validaterev(rev)
223
228
224 p1node = self.parents(self.node(rev))[0]
229 p1node = self.parents(self.node(rev))[0]
225 return self.rev(p1node)
230 return self.rev(p1node)
226
231
227 def candelta(self, baserev, rev):
232 def candelta(self, baserev, rev):
228 validaterev(baserev)
233 validaterev(baserev)
229 validaterev(rev)
234 validaterev(rev)
230
235
231 if ((self.flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)
236 if ((self.flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)
232 or (self.flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)):
237 or (self.flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)):
233 return False
238 return False
234
239
235 return True
240 return True
236
241
237 def rawsize(self, rev):
242 def rawsize(self, rev):
238 validaterev(rev)
243 validaterev(rev)
239 node = self.node(rev)
244 node = self.node(rev)
240 return len(self.revision(node, raw=True))
245 return len(self.revision(node, raw=True))
241
246
242 def _processflags(self, text, flags, operation, raw=False):
247 def _processflags(self, text, flags, operation, raw=False):
243 if flags == 0:
248 if flags == 0:
244 return text, True
249 return text, True
245
250
246 if flags & ~revlog.REVIDX_KNOWN_FLAGS:
251 if flags & ~revlog.REVIDX_KNOWN_FLAGS:
247 raise error.RevlogError(_("incompatible revision flag '%#x'") %
252 raise error.RevlogError(_("incompatible revision flag '%#x'") %
248 (flags & ~revlog.REVIDX_KNOWN_FLAGS))
253 (flags & ~revlog.REVIDX_KNOWN_FLAGS))
249
254
250 validatehash = True
255 validatehash = True
251 # Depending on the operation (read or write), the order might be
256 # Depending on the operation (read or write), the order might be
252 # reversed due to non-commutative transforms.
257 # reversed due to non-commutative transforms.
253 orderedflags = revlog.REVIDX_FLAGS_ORDER
258 orderedflags = revlog.REVIDX_FLAGS_ORDER
254 if operation == 'write':
259 if operation == 'write':
255 orderedflags = reversed(orderedflags)
260 orderedflags = reversed(orderedflags)
256
261
257 for flag in orderedflags:
262 for flag in orderedflags:
258 # If a flagprocessor has been registered for a known flag, apply the
263 # If a flagprocessor has been registered for a known flag, apply the
259 # related operation transform and update result tuple.
264 # related operation transform and update result tuple.
260 if flag & flags:
265 if flag & flags:
261 vhash = True
266 vhash = True
262
267
263 if flag not in revlog._flagprocessors:
268 if flag not in revlog._flagprocessors:
264 message = _("missing processor for flag '%#x'") % (flag)
269 message = _("missing processor for flag '%#x'") % (flag)
265 raise revlog.RevlogError(message)
270 raise revlog.RevlogError(message)
266
271
267 processor = revlog._flagprocessors[flag]
272 processor = revlog._flagprocessors[flag]
268 if processor is not None:
273 if processor is not None:
269 readtransform, writetransform, rawtransform = processor
274 readtransform, writetransform, rawtransform = processor
270
275
271 if raw:
276 if raw:
272 vhash = rawtransform(self, text)
277 vhash = rawtransform(self, text)
273 elif operation == 'read':
278 elif operation == 'read':
274 text, vhash = readtransform(self, text)
279 text, vhash = readtransform(self, text)
275 else: # write operation
280 else: # write operation
276 text, vhash = writetransform(self, text)
281 text, vhash = writetransform(self, text)
277 validatehash = validatehash and vhash
282 validatehash = validatehash and vhash
278
283
279 return text, validatehash
284 return text, validatehash
280
285
281 def checkhash(self, text, node, p1=None, p2=None, rev=None):
286 def checkhash(self, text, node, p1=None, p2=None, rev=None):
282 if p1 is None and p2 is None:
287 if p1 is None and p2 is None:
283 p1, p2 = self.parents(node)
288 p1, p2 = self.parents(node)
284 if node != revlog.hash(text, p1, p2):
289 if node != revlog.hash(text, p1, p2):
285 raise error.RevlogError(_("integrity check failed on %s") %
290 raise error.RevlogError(_("integrity check failed on %s") %
286 self._path)
291 self._path)
287
292
288 def revision(self, node, raw=False):
293 def revision(self, node, raw=False):
289 validatenode(node)
294 validatenode(node)
290
295
291 if node == nullid:
296 if node == nullid:
292 return b''
297 return b''
293
298
294 rev = self.rev(node)
299 rev = self.rev(node)
295 flags = self.flags(rev)
300 flags = self.flags(rev)
296
301
297 path = b'/'.join([self._storepath, hex(node)])
302 path = b'/'.join([self._storepath, hex(node)])
298 rawtext = self._svfs.read(path)
303 rawtext = self._svfs.read(path)
299
304
300 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
305 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
301 if validatehash:
306 if validatehash:
302 self.checkhash(text, node, rev=rev)
307 self.checkhash(text, node, rev=rev)
303
308
304 return text
309 return text
305
310
306 def read(self, node):
311 def read(self, node):
307 validatenode(node)
312 validatenode(node)
308
313
309 revision = self.revision(node)
314 revision = self.revision(node)
310
315
311 if not revision.startswith(b'\1\n'):
316 if not revision.startswith(b'\1\n'):
312 return revision
317 return revision
313
318
314 start = revision.index(b'\1\n', 2)
319 start = revision.index(b'\1\n', 2)
315 return revision[start + 2:]
320 return revision[start + 2:]
316
321
317 def renamed(self, node):
322 def renamed(self, node):
318 validatenode(node)
323 validatenode(node)
319
324
320 if self.parents(node)[0] != nullid:
325 if self.parents(node)[0] != nullid:
321 return False
326 return False
322
327
323 fulltext = self.revision(node)
328 fulltext = self.revision(node)
324 m = filelog.parsemeta(fulltext)[0]
329 m = filelog.parsemeta(fulltext)[0]
325
330
326 if m and 'copy' in m:
331 if m and 'copy' in m:
327 return m['copy'], bin(m['copyrev'])
332 return m['copy'], bin(m['copyrev'])
328
333
329 return False
334 return False
330
335
331 def cmp(self, node, text):
336 def cmp(self, node, text):
332 validatenode(node)
337 validatenode(node)
333
338
334 t = text
339 t = text
335
340
336 if text.startswith(b'\1\n'):
341 if text.startswith(b'\1\n'):
337 t = b'\1\n\1\n' + text
342 t = b'\1\n\1\n' + text
338
343
339 p1, p2 = self.parents(node)
344 p1, p2 = self.parents(node)
340
345
341 if revlog.hash(t, p1, p2) == node:
346 if revlog.hash(t, p1, p2) == node:
342 return False
347 return False
343
348
344 if self.iscensored(self.rev(node)):
349 if self.iscensored(self.rev(node)):
345 return text != b''
350 return text != b''
346
351
347 if self.renamed(node):
352 if self.renamed(node):
348 t2 = self.read(node)
353 t2 = self.read(node)
349 return t2 != text
354 return t2 != text
350
355
351 return True
356 return True
352
357
353 def size(self, rev):
358 def size(self, rev):
354 validaterev(rev)
359 validaterev(rev)
355
360
356 node = self._indexbyrev[rev][b'node']
361 node = self._indexbyrev[rev][b'node']
357
362
358 if self.renamed(node):
363 if self.renamed(node):
359 return len(self.read(node))
364 return len(self.read(node))
360
365
361 if self.iscensored(rev):
366 if self.iscensored(rev):
362 return 0
367 return 0
363
368
364 return len(self.revision(node))
369 return len(self.revision(node))
365
370
366 def iscensored(self, rev):
371 def iscensored(self, rev):
367 validaterev(rev)
372 validaterev(rev)
368
373
369 return self.flags(rev) & revlog.REVIDX_ISCENSORED
374 return self.flags(rev) & revlog.REVIDX_ISCENSORED
370
375
371 def commonancestorsheads(self, a, b):
376 def commonancestorsheads(self, a, b):
372 validatenode(a)
377 validatenode(a)
373 validatenode(b)
378 validatenode(b)
374
379
375 a = self.rev(a)
380 a = self.rev(a)
376 b = self.rev(b)
381 b = self.rev(b)
377
382
378 ancestors = ancestor.commonancestorsheads(self.parentrevs, a, b)
383 ancestors = ancestor.commonancestorsheads(self.parentrevs, a, b)
379 return pycompat.maplist(self.node, ancestors)
384 return pycompat.maplist(self.node, ancestors)
380
385
381 def descendants(self, revs):
386 def descendants(self, revs):
382 # This is a copy of revlog.descendants()
387 # This is a copy of revlog.descendants()
383 first = min(revs)
388 first = min(revs)
384 if first == nullrev:
389 if first == nullrev:
385 for i in self:
390 for i in self:
386 yield i
391 yield i
387 return
392 return
388
393
389 seen = set(revs)
394 seen = set(revs)
390 for i in self.revs(start=first + 1):
395 for i in self.revs(start=first + 1):
391 for x in self.parentrevs(i):
396 for x in self.parentrevs(i):
392 if x != nullrev and x in seen:
397 if x != nullrev and x in seen:
393 seen.add(i)
398 seen.add(i)
394 yield i
399 yield i
395 break
400 break
396
401
397 # Required by verify.
402 # Required by verify.
398 def files(self):
403 def files(self):
399 entries = self._svfs.listdir(self._storepath)
404 entries = self._svfs.listdir(self._storepath)
400
405
401 # Strip out undo.backup.* files created as part of transaction
406 # Strip out undo.backup.* files created as part of transaction
402 # recording.
407 # recording.
403 entries = [f for f in entries if not f.startswith('undo.backup.')]
408 entries = [f for f in entries if not f.startswith('undo.backup.')]
404
409
405 return [b'/'.join((self._storepath, f)) for f in entries]
410 return [b'/'.join((self._storepath, f)) for f in entries]
406
411
407 # Required by verify.
412 # Required by verify.
408 def checksize(self):
413 def checksize(self):
409 return 0, 0
414 return 0, 0
410
415
411 def add(self, text, meta, transaction, linkrev, p1, p2):
416 def add(self, text, meta, transaction, linkrev, p1, p2):
412 if meta or text.startswith(b'\1\n'):
417 if meta or text.startswith(b'\1\n'):
413 text = filelog.packmeta(meta, text)
418 text = filelog.packmeta(meta, text)
414
419
415 return self.addrevision(text, transaction, linkrev, p1, p2)
420 return self.addrevision(text, transaction, linkrev, p1, p2)
416
421
417 def addrevision(self, text, transaction, linkrev, p1, p2, node=None,
422 def addrevision(self, text, transaction, linkrev, p1, p2, node=None,
418 flags=revlog.REVIDX_DEFAULT_FLAGS, cachedelta=None):
423 flags=revlog.REVIDX_DEFAULT_FLAGS, cachedelta=None):
419 validatenode(p1)
424 validatenode(p1)
420 validatenode(p2)
425 validatenode(p2)
421
426
422 if flags:
427 if flags:
423 node = node or revlog.hash(text, p1, p2)
428 node = node or revlog.hash(text, p1, p2)
424
429
425 rawtext, validatehash = self._processflags(text, flags, 'write')
430 rawtext, validatehash = self._processflags(text, flags, 'write')
426
431
427 node = node or revlog.hash(text, p1, p2)
432 node = node or revlog.hash(text, p1, p2)
428
433
429 if node in self._indexbynode:
434 if node in self._indexbynode:
430 return node
435 return node
431
436
432 if validatehash:
437 if validatehash:
433 self.checkhash(rawtext, node, p1=p1, p2=p2)
438 self.checkhash(rawtext, node, p1=p1, p2=p2)
434
439
435 return self._addrawrevision(node, rawtext, transaction, linkrev, p1, p2,
440 return self._addrawrevision(node, rawtext, transaction, linkrev, p1, p2,
436 flags)
441 flags)
437
442
438 def _addrawrevision(self, node, rawtext, transaction, link, p1, p2, flags):
443 def _addrawrevision(self, node, rawtext, transaction, link, p1, p2, flags):
439 transaction.addbackup(self._indexpath)
444 transaction.addbackup(self._indexpath)
440
445
441 path = b'/'.join([self._storepath, hex(node)])
446 path = b'/'.join([self._storepath, hex(node)])
442
447
443 self._svfs.write(path, rawtext)
448 self._svfs.write(path, rawtext)
444
449
445 self._indexdata.append({
450 self._indexdata.append({
446 b'node': node,
451 b'node': node,
447 b'p1': p1,
452 b'p1': p1,
448 b'p2': p2,
453 b'p2': p2,
449 b'linkrev': link,
454 b'linkrev': link,
450 b'flags': flags,
455 b'flags': flags,
451 })
456 })
452
457
453 self._reflectindexupdate()
458 self._reflectindexupdate()
454
459
455 return node
460 return node
456
461
457 def _reflectindexupdate(self):
462 def _reflectindexupdate(self):
458 self._refreshindex()
463 self._refreshindex()
459 self._svfs.write(self._indexpath, cbor.dumps(self._indexdata))
464 self._svfs.write(self._indexpath, cbor.dumps(self._indexdata))
460
465
461 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
466 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
462 nodes = []
467 nodes = []
463
468
464 transaction.addbackup(self._indexpath)
469 transaction.addbackup(self._indexpath)
465
470
466 for node, p1, p2, linknode, deltabase, delta, flags in deltas:
471 for node, p1, p2, linknode, deltabase, delta, flags in deltas:
467 linkrev = linkmapper(linknode)
472 linkrev = linkmapper(linknode)
468 flags = flags or revlog.REVIDX_DEFAULT_FLAGS
473 flags = flags or revlog.REVIDX_DEFAULT_FLAGS
469
474
470 nodes.append(node)
475 nodes.append(node)
471
476
472 if node in self._indexbynode:
477 if node in self._indexbynode:
473 continue
478 continue
474
479
475 # Need to resolve the fulltext from the delta base.
480 # Need to resolve the fulltext from the delta base.
476 if deltabase == nullid:
481 if deltabase == nullid:
477 text = mdiff.patch(b'', delta)
482 text = mdiff.patch(b'', delta)
478 else:
483 else:
479 text = mdiff.patch(self.revision(deltabase), delta)
484 text = mdiff.patch(self.revision(deltabase), delta)
480
485
481 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
486 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
482 flags)
487 flags)
483
488
484 if addrevisioncb:
489 if addrevisioncb:
485 addrevisioncb(self, node)
490 addrevisioncb(self, node)
486
491
487 return nodes
492 return nodes
488
493
489 def revdiff(self, rev1, rev2):
494 def revdiff(self, rev1, rev2):
490 validaterev(rev1)
495 validaterev(rev1)
491 validaterev(rev2)
496 validaterev(rev2)
492
497
493 node1 = self.node(rev1)
498 node1 = self.node(rev1)
494 node2 = self.node(rev2)
499 node2 = self.node(rev2)
495
500
496 return mdiff.textdiff(self.revision(node1, raw=True),
501 return mdiff.textdiff(self.revision(node1, raw=True),
497 self.revision(node2, raw=True))
502 self.revision(node2, raw=True))
498
503
499 def headrevs(self):
504 def headrevs(self):
500 # Assume all revisions are heads by default.
505 # Assume all revisions are heads by default.
501 revishead = {rev: True for rev in self._indexbyrev}
506 revishead = {rev: True for rev in self._indexbyrev}
502
507
503 for rev, entry in self._indexbyrev.items():
508 for rev, entry in self._indexbyrev.items():
504 # Unset head flag for all seen parents.
509 # Unset head flag for all seen parents.
505 revishead[self.rev(entry[b'p1'])] = False
510 revishead[self.rev(entry[b'p1'])] = False
506 revishead[self.rev(entry[b'p2'])] = False
511 revishead[self.rev(entry[b'p2'])] = False
507
512
508 return [rev for rev, ishead in sorted(revishead.items())
513 return [rev for rev, ishead in sorted(revishead.items())
509 if ishead]
514 if ishead]
510
515
511 def heads(self, start=None, stop=None):
516 def heads(self, start=None, stop=None):
512 # This is copied from revlog.py.
517 # This is copied from revlog.py.
513 if start is None and stop is None:
518 if start is None and stop is None:
514 if not len(self):
519 if not len(self):
515 return [nullid]
520 return [nullid]
516 return [self.node(r) for r in self.headrevs()]
521 return [self.node(r) for r in self.headrevs()]
517
522
518 if start is None:
523 if start is None:
519 start = nullid
524 start = nullid
520 if stop is None:
525 if stop is None:
521 stop = []
526 stop = []
522 stoprevs = set([self.rev(n) for n in stop])
527 stoprevs = set([self.rev(n) for n in stop])
523 startrev = self.rev(start)
528 startrev = self.rev(start)
524 reachable = {startrev}
529 reachable = {startrev}
525 heads = {startrev}
530 heads = {startrev}
526
531
527 parentrevs = self.parentrevs
532 parentrevs = self.parentrevs
528 for r in self.revs(start=startrev + 1):
533 for r in self.revs(start=startrev + 1):
529 for p in parentrevs(r):
534 for p in parentrevs(r):
530 if p in reachable:
535 if p in reachable:
531 if r not in stoprevs:
536 if r not in stoprevs:
532 reachable.add(r)
537 reachable.add(r)
533 heads.add(r)
538 heads.add(r)
534 if p in heads and p not in stoprevs:
539 if p in heads and p not in stoprevs:
535 heads.remove(p)
540 heads.remove(p)
536
541
537 return [self.node(r) for r in heads]
542 return [self.node(r) for r in heads]
538
543
539 def children(self, node):
544 def children(self, node):
540 validatenode(node)
545 validatenode(node)
541
546
542 # This is a copy of revlog.children().
547 # This is a copy of revlog.children().
543 c = []
548 c = []
544 p = self.rev(node)
549 p = self.rev(node)
545 for r in self.revs(start=p + 1):
550 for r in self.revs(start=p + 1):
546 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
551 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
547 if prevs:
552 if prevs:
548 for pr in prevs:
553 for pr in prevs:
549 if pr == p:
554 if pr == p:
550 c.append(self.node(r))
555 c.append(self.node(r))
551 elif p == nullrev:
556 elif p == nullrev:
552 c.append(self.node(r))
557 c.append(self.node(r))
553 return c
558 return c
554
559
555 def getstrippoint(self, minlink):
560 def getstrippoint(self, minlink):
556
561
557 # This is largely a copy of revlog.getstrippoint().
562 # This is largely a copy of revlog.getstrippoint().
558 brokenrevs = set()
563 brokenrevs = set()
559 strippoint = len(self)
564 strippoint = len(self)
560
565
561 heads = {}
566 heads = {}
562 futurelargelinkrevs = set()
567 futurelargelinkrevs = set()
563 for head in self.headrevs():
568 for head in self.headrevs():
564 headlinkrev = self.linkrev(head)
569 headlinkrev = self.linkrev(head)
565 heads[head] = headlinkrev
570 heads[head] = headlinkrev
566 if headlinkrev >= minlink:
571 if headlinkrev >= minlink:
567 futurelargelinkrevs.add(headlinkrev)
572 futurelargelinkrevs.add(headlinkrev)
568
573
569 # This algorithm involves walking down the rev graph, starting at the
574 # This algorithm involves walking down the rev graph, starting at the
570 # heads. Since the revs are topologically sorted according to linkrev,
575 # heads. Since the revs are topologically sorted according to linkrev,
571 # once all head linkrevs are below the minlink, we know there are
576 # once all head linkrevs are below the minlink, we know there are
572 # no more revs that could have a linkrev greater than minlink.
577 # no more revs that could have a linkrev greater than minlink.
573 # So we can stop walking.
578 # So we can stop walking.
574 while futurelargelinkrevs:
579 while futurelargelinkrevs:
575 strippoint -= 1
580 strippoint -= 1
576 linkrev = heads.pop(strippoint)
581 linkrev = heads.pop(strippoint)
577
582
578 if linkrev < minlink:
583 if linkrev < minlink:
579 brokenrevs.add(strippoint)
584 brokenrevs.add(strippoint)
580 else:
585 else:
581 futurelargelinkrevs.remove(linkrev)
586 futurelargelinkrevs.remove(linkrev)
582
587
583 for p in self.parentrevs(strippoint):
588 for p in self.parentrevs(strippoint):
584 if p != nullrev:
589 if p != nullrev:
585 plinkrev = self.linkrev(p)
590 plinkrev = self.linkrev(p)
586 heads[p] = plinkrev
591 heads[p] = plinkrev
587 if plinkrev >= minlink:
592 if plinkrev >= minlink:
588 futurelargelinkrevs.add(plinkrev)
593 futurelargelinkrevs.add(plinkrev)
589
594
590 return strippoint, brokenrevs
595 return strippoint, brokenrevs
591
596
592 def strip(self, minlink, transaction):
597 def strip(self, minlink, transaction):
593 if not len(self):
598 if not len(self):
594 return
599 return
595
600
596 rev, _ignored = self.getstrippoint(minlink)
601 rev, _ignored = self.getstrippoint(minlink)
597 if rev == len(self):
602 if rev == len(self):
598 return
603 return
599
604
600 # Purge index data starting at the requested revision.
605 # Purge index data starting at the requested revision.
601 self._indexdata[rev:] = []
606 self._indexdata[rev:] = []
602 self._reflectindexupdate()
607 self._reflectindexupdate()
603
608
604 def issimplestorefile(f, kind, st):
609 def issimplestorefile(f, kind, st):
605 if kind != stat.S_IFREG:
610 if kind != stat.S_IFREG:
606 return False
611 return False
607
612
608 if store.isrevlog(f, kind, st):
613 if store.isrevlog(f, kind, st):
609 return False
614 return False
610
615
611 # Ignore transaction undo files.
616 # Ignore transaction undo files.
612 if f.startswith('undo.'):
617 if f.startswith('undo.'):
613 return False
618 return False
614
619
615 # Otherwise assume it belongs to the simple store.
620 # Otherwise assume it belongs to the simple store.
616 return True
621 return True
617
622
618 class simplestore(store.encodedstore):
623 class simplestore(store.encodedstore):
619 def datafiles(self):
624 def datafiles(self):
620 for x in super(simplestore, self).datafiles():
625 for x in super(simplestore, self).datafiles():
621 yield x
626 yield x
622
627
623 # Supplement with non-revlog files.
628 # Supplement with non-revlog files.
624 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
629 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
625
630
626 for unencoded, encoded, size in extrafiles:
631 for unencoded, encoded, size in extrafiles:
627 try:
632 try:
628 unencoded = store.decodefilename(unencoded)
633 unencoded = store.decodefilename(unencoded)
629 except KeyError:
634 except KeyError:
630 unencoded = None
635 unencoded = None
631
636
632 yield unencoded, encoded, size
637 yield unencoded, encoded, size
633
638
634 def reposetup(ui, repo):
639 def reposetup(ui, repo):
635 if not repo.local():
640 if not repo.local():
636 return
641 return
637
642
638 if isinstance(repo, bundlerepo.bundlerepository):
643 if isinstance(repo, bundlerepo.bundlerepository):
639 raise error.Abort(_('cannot use simple store with bundlerepo'))
644 raise error.Abort(_('cannot use simple store with bundlerepo'))
640
645
641 class simplestorerepo(repo.__class__):
646 class simplestorerepo(repo.__class__):
642 def file(self, f):
647 def file(self, f):
643 return filestorage(self.svfs, f)
648 return filestorage(self.svfs, f)
644
649
645 repo.__class__ = simplestorerepo
650 repo.__class__ = simplestorerepo
646
651
647 def featuresetup(ui, supported):
652 def featuresetup(ui, supported):
648 supported.add(REQUIREMENT)
653 supported.add(REQUIREMENT)
649
654
650 def newreporequirements(orig, repo):
655 def newreporequirements(orig, repo):
651 """Modifies default requirements for new repos to use the simple store."""
656 """Modifies default requirements for new repos to use the simple store."""
652 requirements = orig(repo)
657 requirements = orig(repo)
653
658
654 # These requirements are only used to affect creation of the store
659 # These requirements are only used to affect creation of the store
655 # object. We have our own store. So we can remove them.
660 # object. We have our own store. So we can remove them.
656 # TODO do this once we feel like taking the test hit.
661 # TODO do this once we feel like taking the test hit.
657 #if 'fncache' in requirements:
662 #if 'fncache' in requirements:
658 # requirements.remove('fncache')
663 # requirements.remove('fncache')
659 #if 'dotencode' in requirements:
664 #if 'dotencode' in requirements:
660 # requirements.remove('dotencode')
665 # requirements.remove('dotencode')
661
666
662 requirements.add(REQUIREMENT)
667 requirements.add(REQUIREMENT)
663
668
664 return requirements
669 return requirements
665
670
666 def makestore(orig, requirements, path, vfstype):
671 def makestore(orig, requirements, path, vfstype):
667 if REQUIREMENT not in requirements:
672 if REQUIREMENT not in requirements:
668 return orig(requirements, path, vfstype)
673 return orig(requirements, path, vfstype)
669
674
670 return simplestore(path, vfstype)
675 return simplestore(path, vfstype)
671
676
672 def verifierinit(orig, self, *args, **kwargs):
677 def verifierinit(orig, self, *args, **kwargs):
673 orig(self, *args, **kwargs)
678 orig(self, *args, **kwargs)
674
679
675 # We don't care that files in the store don't align with what is
680 # We don't care that files in the store don't align with what is
676 # advertised. So suppress these warnings.
681 # advertised. So suppress these warnings.
677 self.warnorphanstorefiles = False
682 self.warnorphanstorefiles = False
678
683
679 def extsetup(ui):
684 def extsetup(ui):
680 localrepo.featuresetupfuncs.add(featuresetup)
685 localrepo.featuresetupfuncs.add(featuresetup)
681
686
682 extensions.wrapfunction(localrepo, 'newreporequirements',
687 extensions.wrapfunction(localrepo, 'newreporequirements',
683 newreporequirements)
688 newreporequirements)
684 extensions.wrapfunction(store, 'store', makestore)
689 extensions.wrapfunction(store, 'store', makestore)
685 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
690 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
@@ -1,135 +1,146 b''
1 # Test that certain objects conform to well-defined interfaces.
1 # Test that certain objects conform to well-defined interfaces.
2
2
3 from __future__ import absolute_import, print_function
3 from __future__ import absolute_import, print_function
4
4
5 import os
5 import os
6
6
7 from mercurial.thirdparty.zope import (
7 from mercurial.thirdparty.zope import (
8 interface as zi,
8 interface as zi,
9 )
9 )
10 from mercurial.thirdparty.zope.interface import (
10 from mercurial.thirdparty.zope.interface import (
11 verify as ziverify,
11 verify as ziverify,
12 )
12 )
13 from mercurial import (
13 from mercurial import (
14 bundlerepo,
14 bundlerepo,
15 filelog,
15 httppeer,
16 httppeer,
16 localrepo,
17 localrepo,
17 repository,
18 repository,
18 sshpeer,
19 sshpeer,
19 statichttprepo,
20 statichttprepo,
20 ui as uimod,
21 ui as uimod,
21 unionrepo,
22 unionrepo,
23 vfs as vfsmod,
22 wireprotoserver,
24 wireprotoserver,
23 wireprototypes,
25 wireprototypes,
24 )
26 )
25
27
26 rootdir = os.path.normpath(os.path.join(os.path.dirname(__file__), '..'))
28 rootdir = os.path.normpath(os.path.join(os.path.dirname(__file__), '..'))
27
29
28 def checkzobject(o):
30 def checkzobject(o, allowextra=False):
29 """Verify an object with a zope interface."""
31 """Verify an object with a zope interface."""
30 ifaces = zi.providedBy(o)
32 ifaces = zi.providedBy(o)
31 if not ifaces:
33 if not ifaces:
32 print('%r does not provide any zope interfaces' % o)
34 print('%r does not provide any zope interfaces' % o)
33 return
35 return
34
36
35 # Run zope.interface's built-in verification routine. This verifies that
37 # Run zope.interface's built-in verification routine. This verifies that
36 # everything that is supposed to be present is present.
38 # everything that is supposed to be present is present.
37 for iface in ifaces:
39 for iface in ifaces:
38 ziverify.verifyObject(iface, o)
40 ziverify.verifyObject(iface, o)
39
41
42 if allowextra:
43 return
44
40 # Now verify that the object provides no extra public attributes that
45 # Now verify that the object provides no extra public attributes that
41 # aren't declared as part of interfaces.
46 # aren't declared as part of interfaces.
42 allowed = set()
47 allowed = set()
43 for iface in ifaces:
48 for iface in ifaces:
44 allowed |= set(iface.names(all=True))
49 allowed |= set(iface.names(all=True))
45
50
46 public = {a for a in dir(o) if not a.startswith('_')}
51 public = {a for a in dir(o) if not a.startswith('_')}
47
52
48 for attr in sorted(public - allowed):
53 for attr in sorted(public - allowed):
49 print('public attribute not declared in interfaces: %s.%s' % (
54 print('public attribute not declared in interfaces: %s.%s' % (
50 o.__class__.__name__, attr))
55 o.__class__.__name__, attr))
51
56
52 # Facilitates testing localpeer.
57 # Facilitates testing localpeer.
53 class dummyrepo(object):
58 class dummyrepo(object):
54 def __init__(self):
59 def __init__(self):
55 self.ui = uimod.ui()
60 self.ui = uimod.ui()
56 def filtered(self, name):
61 def filtered(self, name):
57 pass
62 pass
58 def _restrictcapabilities(self, caps):
63 def _restrictcapabilities(self, caps):
59 pass
64 pass
60
65
61 class dummyopener(object):
66 class dummyopener(object):
62 handlers = []
67 handlers = []
63
68
64 # Facilitates testing sshpeer without requiring an SSH server.
69 # Facilitates testing sshpeer without requiring an SSH server.
65 class badpeer(httppeer.httppeer):
70 class badpeer(httppeer.httppeer):
66 def __init__(self):
71 def __init__(self):
67 super(badpeer, self).__init__(None, None, None, dummyopener())
72 super(badpeer, self).__init__(None, None, None, dummyopener())
68 self.badattribute = True
73 self.badattribute = True
69
74
70 def badmethod(self):
75 def badmethod(self):
71 pass
76 pass
72
77
73 class dummypipe(object):
78 class dummypipe(object):
74 def close(self):
79 def close(self):
75 pass
80 pass
76
81
77 def main():
82 def main():
78 ui = uimod.ui()
83 ui = uimod.ui()
79 # Needed so we can open a local repo with obsstore without a warning.
84 # Needed so we can open a local repo with obsstore without a warning.
80 ui.setconfig('experimental', 'evolution.createmarkers', True)
85 ui.setconfig('experimental', 'evolution.createmarkers', True)
81
86
82 checkzobject(badpeer())
87 checkzobject(badpeer())
83
88
84 ziverify.verifyClass(repository.ipeerbaselegacycommands,
89 ziverify.verifyClass(repository.ipeerbaselegacycommands,
85 httppeer.httppeer)
90 httppeer.httppeer)
86 checkzobject(httppeer.httppeer(None, None, None, dummyopener()))
91 checkzobject(httppeer.httppeer(None, None, None, dummyopener()))
87
92
88 ziverify.verifyClass(repository.ipeerbase,
93 ziverify.verifyClass(repository.ipeerbase,
89 localrepo.localpeer)
94 localrepo.localpeer)
90 checkzobject(localrepo.localpeer(dummyrepo()))
95 checkzobject(localrepo.localpeer(dummyrepo()))
91
96
92 ziverify.verifyClass(repository.ipeerbaselegacycommands,
97 ziverify.verifyClass(repository.ipeerbaselegacycommands,
93 sshpeer.sshv1peer)
98 sshpeer.sshv1peer)
94 checkzobject(sshpeer.sshv1peer(ui, 'ssh://localhost/foo', None, dummypipe(),
99 checkzobject(sshpeer.sshv1peer(ui, 'ssh://localhost/foo', None, dummypipe(),
95 dummypipe(), None, None))
100 dummypipe(), None, None))
96
101
97 ziverify.verifyClass(repository.ipeerbaselegacycommands,
102 ziverify.verifyClass(repository.ipeerbaselegacycommands,
98 sshpeer.sshv2peer)
103 sshpeer.sshv2peer)
99 checkzobject(sshpeer.sshv2peer(ui, 'ssh://localhost/foo', None, dummypipe(),
104 checkzobject(sshpeer.sshv2peer(ui, 'ssh://localhost/foo', None, dummypipe(),
100 dummypipe(), None, None))
105 dummypipe(), None, None))
101
106
102 ziverify.verifyClass(repository.ipeerbase, bundlerepo.bundlepeer)
107 ziverify.verifyClass(repository.ipeerbase, bundlerepo.bundlepeer)
103 checkzobject(bundlerepo.bundlepeer(dummyrepo()))
108 checkzobject(bundlerepo.bundlepeer(dummyrepo()))
104
109
105 ziverify.verifyClass(repository.ipeerbase, statichttprepo.statichttppeer)
110 ziverify.verifyClass(repository.ipeerbase, statichttprepo.statichttppeer)
106 checkzobject(statichttprepo.statichttppeer(dummyrepo()))
111 checkzobject(statichttprepo.statichttppeer(dummyrepo()))
107
112
108 ziverify.verifyClass(repository.ipeerbase, unionrepo.unionpeer)
113 ziverify.verifyClass(repository.ipeerbase, unionrepo.unionpeer)
109 checkzobject(unionrepo.unionpeer(dummyrepo()))
114 checkzobject(unionrepo.unionpeer(dummyrepo()))
110
115
111 ziverify.verifyClass(repository.completelocalrepository,
116 ziverify.verifyClass(repository.completelocalrepository,
112 localrepo.localrepository)
117 localrepo.localrepository)
113 repo = localrepo.localrepository(ui, rootdir)
118 repo = localrepo.localrepository(ui, rootdir)
114 checkzobject(repo)
119 checkzobject(repo)
115
120
116 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
121 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
117 wireprotoserver.sshv1protocolhandler)
122 wireprotoserver.sshv1protocolhandler)
118 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
123 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
119 wireprotoserver.sshv2protocolhandler)
124 wireprotoserver.sshv2protocolhandler)
120 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
125 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
121 wireprotoserver.httpv1protocolhandler)
126 wireprotoserver.httpv1protocolhandler)
122 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
127 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
123 wireprotoserver.httpv2protocolhandler)
128 wireprotoserver.httpv2protocolhandler)
124
129
125 sshv1 = wireprotoserver.sshv1protocolhandler(None, None, None)
130 sshv1 = wireprotoserver.sshv1protocolhandler(None, None, None)
126 checkzobject(sshv1)
131 checkzobject(sshv1)
127 sshv2 = wireprotoserver.sshv2protocolhandler(None, None, None)
132 sshv2 = wireprotoserver.sshv2protocolhandler(None, None, None)
128 checkzobject(sshv2)
133 checkzobject(sshv2)
129
134
130 httpv1 = wireprotoserver.httpv1protocolhandler(None, None, None)
135 httpv1 = wireprotoserver.httpv1protocolhandler(None, None, None)
131 checkzobject(httpv1)
136 checkzobject(httpv1)
132 httpv2 = wireprotoserver.httpv2protocolhandler(None, None)
137 httpv2 = wireprotoserver.httpv2protocolhandler(None, None)
133 checkzobject(httpv2)
138 checkzobject(httpv2)
134
139
140 ziverify.verifyClass(repository.ifilestorage, filelog.filelog)
141
142 vfs = vfsmod.vfs('.')
143 fl = filelog.filelog(vfs, 'dummy.i')
144 checkzobject(fl, allowextra=True)
145
135 main()
146 main()
General Comments 0
You need to be logged in to leave comments. Login now