##// END OF EJS Templates
simplestore: correctly implement flag processors...
Gregory Szorc -
r37454:9d4f09bf default
parent child Browse files
Show More
@@ -1,142 +1,136 b''
1 # coding=UTF-8
1 # coding=UTF-8
2
2
3 from __future__ import absolute_import
3 from __future__ import absolute_import
4
4
5 import base64
5 import base64
6 import zlib
6 import zlib
7
7
8 from mercurial import (
8 from mercurial import (
9 changegroup,
9 changegroup,
10 exchange,
10 exchange,
11 extensions,
11 extensions,
12 filelog,
13 revlog,
12 revlog,
14 util,
13 util,
15 )
14 )
16
15
17 # Test only: These flags are defined here only in the context of testing the
16 # Test only: These flags are defined here only in the context of testing the
18 # behavior of the flag processor. The canonical way to add flags is to get in
17 # behavior of the flag processor. The canonical way to add flags is to get in
19 # touch with the community and make them known in revlog.
18 # touch with the community and make them known in revlog.
20 REVIDX_NOOP = (1 << 3)
19 REVIDX_NOOP = (1 << 3)
21 REVIDX_BASE64 = (1 << 2)
20 REVIDX_BASE64 = (1 << 2)
22 REVIDX_GZIP = (1 << 1)
21 REVIDX_GZIP = (1 << 1)
23 REVIDX_FAIL = 1
22 REVIDX_FAIL = 1
24
23
25 def validatehash(self, text):
24 def validatehash(self, text):
26 return True
25 return True
27
26
28 def bypass(self, text):
27 def bypass(self, text):
29 return False
28 return False
30
29
31 def noopdonothing(self, text):
30 def noopdonothing(self, text):
32 return (text, True)
31 return (text, True)
33
32
34 def b64encode(self, text):
33 def b64encode(self, text):
35 return (base64.b64encode(text), False)
34 return (base64.b64encode(text), False)
36
35
37 def b64decode(self, text):
36 def b64decode(self, text):
38 return (base64.b64decode(text), True)
37 return (base64.b64decode(text), True)
39
38
40 def gzipcompress(self, text):
39 def gzipcompress(self, text):
41 return (zlib.compress(text), False)
40 return (zlib.compress(text), False)
42
41
43 def gzipdecompress(self, text):
42 def gzipdecompress(self, text):
44 return (zlib.decompress(text), True)
43 return (zlib.decompress(text), True)
45
44
46 def supportedoutgoingversions(orig, repo):
45 def supportedoutgoingversions(orig, repo):
47 versions = orig(repo)
46 versions = orig(repo)
48 versions.discard(b'01')
47 versions.discard(b'01')
49 versions.discard(b'02')
48 versions.discard(b'02')
50 versions.add(b'03')
49 versions.add(b'03')
51 return versions
50 return versions
52
51
53 def allsupportedversions(orig, ui):
52 def allsupportedversions(orig, ui):
54 versions = orig(ui)
53 versions = orig(ui)
55 versions.add(b'03')
54 versions.add(b'03')
56 return versions
55 return versions
57
56
58 def noopaddrevision(orig, self, text, transaction, link, p1, p2,
57 def makewrappedfile(obj):
58 class wrappedfile(obj.__class__):
59 def addrevision(self, text, transaction, link, p1, p2,
59 cachedelta=None, node=None,
60 cachedelta=None, node=None,
60 flags=revlog.REVIDX_DEFAULT_FLAGS):
61 flags=revlog.REVIDX_DEFAULT_FLAGS):
61 if b'[NOOP]' in text:
62 if b'[NOOP]' in text:
62 flags |= REVIDX_NOOP
63 flags |= REVIDX_NOOP
63 return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta,
64 node=node, flags=flags)
65
64
66 def b64addrevision(orig, self, text, transaction, link, p1, p2,
67 cachedelta=None, node=None,
68 flags=revlog.REVIDX_DEFAULT_FLAGS):
69 if b'[BASE64]' in text:
65 if b'[BASE64]' in text:
70 flags |= REVIDX_BASE64
66 flags |= REVIDX_BASE64
71 return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta,
72 node=node, flags=flags)
73
67
74 def gzipaddrevision(orig, self, text, transaction, link, p1, p2,
75 cachedelta=None, node=None,
76 flags=revlog.REVIDX_DEFAULT_FLAGS):
77 if b'[GZIP]' in text:
68 if b'[GZIP]' in text:
78 flags |= REVIDX_GZIP
69 flags |= REVIDX_GZIP
79 return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta,
80 node=node, flags=flags)
81
70
82 def failaddrevision(orig, self, text, transaction, link, p1, p2,
83 cachedelta=None, node=None,
84 flags=revlog.REVIDX_DEFAULT_FLAGS):
85 # This addrevision wrapper is meant to add a flag we will not have
71 # This addrevision wrapper is meant to add a flag we will not have
86 # transforms registered for, ensuring we handle this error case.
72 # transforms registered for, ensuring we handle this error case.
87 if b'[FAIL]' in text:
73 if b'[FAIL]' in text:
88 flags |= REVIDX_FAIL
74 flags |= REVIDX_FAIL
89 return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta,
75
90 node=node, flags=flags)
76 return super(wrappedfile, self).addrevision(text, transaction, link,
77 p1, p2,
78 cachedelta=cachedelta,
79 node=node,
80 flags=flags)
81
82 obj.__class__ = wrappedfile
83
84 def reposetup(ui, repo):
85 class wrappingflagprocessorrepo(repo.__class__):
86 def file(self, f):
87 orig = super(wrappingflagprocessorrepo, self).file(f)
88 makewrappedfile(orig)
89 return orig
90
91 repo.__class__ = wrappingflagprocessorrepo
91
92
92 def extsetup(ui):
93 def extsetup(ui):
93 # Enable changegroup3 for flags to be sent over the wire
94 # Enable changegroup3 for flags to be sent over the wire
94 wrapfunction = extensions.wrapfunction
95 wrapfunction = extensions.wrapfunction
95 wrapfunction(changegroup,
96 wrapfunction(changegroup,
96 'supportedoutgoingversions',
97 'supportedoutgoingversions',
97 supportedoutgoingversions)
98 supportedoutgoingversions)
98 wrapfunction(changegroup,
99 wrapfunction(changegroup,
99 'allsupportedversions',
100 'allsupportedversions',
100 allsupportedversions)
101 allsupportedversions)
101
102
102 # Teach revlog about our test flags
103 # Teach revlog about our test flags
103 flags = [REVIDX_NOOP, REVIDX_BASE64, REVIDX_GZIP, REVIDX_FAIL]
104 flags = [REVIDX_NOOP, REVIDX_BASE64, REVIDX_GZIP, REVIDX_FAIL]
104 revlog.REVIDX_KNOWN_FLAGS |= util.bitsfrom(flags)
105 revlog.REVIDX_KNOWN_FLAGS |= util.bitsfrom(flags)
105 revlog.REVIDX_FLAGS_ORDER.extend(flags)
106 revlog.REVIDX_FLAGS_ORDER.extend(flags)
106
107
107 # Teach exchange to use changegroup 3
108 # Teach exchange to use changegroup 3
108 for k in exchange._bundlespeccontentopts.keys():
109 for k in exchange._bundlespeccontentopts.keys():
109 exchange._bundlespeccontentopts[k]["cg.version"] = "03"
110 exchange._bundlespeccontentopts[k]["cg.version"] = "03"
110
111
111 # Add wrappers for addrevision, responsible to set flags depending on the
112 # revision data contents.
113 wrapfunction(filelog.filelog, 'addrevision', noopaddrevision)
114 wrapfunction(filelog.filelog, 'addrevision', b64addrevision)
115 wrapfunction(filelog.filelog, 'addrevision', gzipaddrevision)
116 wrapfunction(filelog.filelog, 'addrevision', failaddrevision)
117
118 # Register flag processors for each extension
112 # Register flag processors for each extension
119 revlog.addflagprocessor(
113 revlog.addflagprocessor(
120 REVIDX_NOOP,
114 REVIDX_NOOP,
121 (
115 (
122 noopdonothing,
116 noopdonothing,
123 noopdonothing,
117 noopdonothing,
124 validatehash,
118 validatehash,
125 )
119 )
126 )
120 )
127 revlog.addflagprocessor(
121 revlog.addflagprocessor(
128 REVIDX_BASE64,
122 REVIDX_BASE64,
129 (
123 (
130 b64decode,
124 b64decode,
131 b64encode,
125 b64encode,
132 bypass,
126 bypass,
133 ),
127 ),
134 )
128 )
135 revlog.addflagprocessor(
129 revlog.addflagprocessor(
136 REVIDX_GZIP,
130 REVIDX_GZIP,
137 (
131 (
138 gzipdecompress,
132 gzipdecompress,
139 gzipcompress,
133 gzipcompress,
140 bypass
134 bypass
141 )
135 )
142 )
136 )
@@ -1,675 +1,685 b''
1 # simplestorerepo.py - Extension that swaps in alternate repository storage.
1 # simplestorerepo.py - Extension that swaps in alternate repository storage.
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 # To use this with the test suite:
8 # To use this with the test suite:
9 #
9 #
10 # $ HGREPOFEATURES="simplestore" ./run-tests.py \
10 # $ HGREPOFEATURES="simplestore" ./run-tests.py \
11 # --extra-config-opt extensions.simplestore=`pwd`/simplestorerepo.py
11 # --extra-config-opt extensions.simplestore=`pwd`/simplestorerepo.py
12
12
13 from __future__ import absolute_import
13 from __future__ import absolute_import
14
14
15 import stat
15 import stat
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial.node import (
18 from mercurial.node import (
19 bin,
19 bin,
20 hex,
20 hex,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 )
23 )
24 from mercurial.thirdparty import (
24 from mercurial.thirdparty import (
25 cbor,
25 cbor,
26 )
26 )
27 from mercurial import (
27 from mercurial import (
28 ancestor,
28 ancestor,
29 bundlerepo,
29 bundlerepo,
30 error,
30 error,
31 extensions,
31 extensions,
32 filelog,
32 filelog,
33 localrepo,
33 localrepo,
34 mdiff,
34 mdiff,
35 pycompat,
35 pycompat,
36 revlog,
36 revlog,
37 store,
37 store,
38 verify,
38 verify,
39 )
39 )
40
40
41 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
41 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
42 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
42 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
43 # be specifying the version(s) of Mercurial they are tested with, or
43 # be specifying the version(s) of Mercurial they are tested with, or
44 # leave the attribute unspecified.
44 # leave the attribute unspecified.
45 testedwith = 'ships-with-hg-core'
45 testedwith = 'ships-with-hg-core'
46
46
47 REQUIREMENT = 'testonly-simplestore'
47 REQUIREMENT = 'testonly-simplestore'
48
48
49 def validatenode(node):
49 def validatenode(node):
50 if isinstance(node, int):
50 if isinstance(node, int):
51 raise ValueError('expected node; got int')
51 raise ValueError('expected node; got int')
52
52
53 if len(node) != 20:
53 if len(node) != 20:
54 raise ValueError('expected 20 byte node')
54 raise ValueError('expected 20 byte node')
55
55
56 def validaterev(rev):
56 def validaterev(rev):
57 if not isinstance(rev, int):
57 if not isinstance(rev, int):
58 raise ValueError('expected int')
58 raise ValueError('expected int')
59
59
60 class filestorage(object):
60 class filestorage(object):
61 """Implements storage for a tracked path.
61 """Implements storage for a tracked path.
62
62
63 Data is stored in the VFS in a directory corresponding to the tracked
63 Data is stored in the VFS in a directory corresponding to the tracked
64 path.
64 path.
65
65
66 Index data is stored in an ``index`` file using CBOR.
66 Index data is stored in an ``index`` file using CBOR.
67
67
68 Fulltext data is stored in files having names of the node.
68 Fulltext data is stored in files having names of the node.
69 """
69 """
70
70
71 def __init__(self, svfs, path):
71 def __init__(self, svfs, path):
72 self._svfs = svfs
72 self._svfs = svfs
73 self._path = path
73 self._path = path
74
74
75 self._storepath = b'/'.join([b'data', path])
75 self._storepath = b'/'.join([b'data', path])
76 self._indexpath = b'/'.join([self._storepath, b'index'])
76 self._indexpath = b'/'.join([self._storepath, b'index'])
77
77
78 indexdata = self._svfs.tryread(self._indexpath)
78 indexdata = self._svfs.tryread(self._indexpath)
79 if indexdata:
79 if indexdata:
80 indexdata = cbor.loads(indexdata)
80 indexdata = cbor.loads(indexdata)
81
81
82 self._indexdata = indexdata or []
82 self._indexdata = indexdata or []
83 self._indexbynode = {}
83 self._indexbynode = {}
84 self._indexbyrev = {}
84 self._indexbyrev = {}
85 self.index = []
85 self.index = []
86 self._refreshindex()
86 self._refreshindex()
87
87
88 # This is used by changegroup code :/
88 # This is used by changegroup code :/
89 self._generaldelta = True
89 self._generaldelta = True
90 self.storedeltachains = False
90 self.storedeltachains = False
91
91
92 self.version = 1
92 self.version = 1
93
93
94 def _refreshindex(self):
94 def _refreshindex(self):
95 self._indexbynode.clear()
95 self._indexbynode.clear()
96 self._indexbyrev.clear()
96 self._indexbyrev.clear()
97 self.index = []
97 self.index = []
98
98
99 for i, entry in enumerate(self._indexdata):
99 for i, entry in enumerate(self._indexdata):
100 self._indexbynode[entry[b'node']] = entry
100 self._indexbynode[entry[b'node']] = entry
101 self._indexbyrev[i] = entry
101 self._indexbyrev[i] = entry
102
102
103 self._indexbynode[nullid] = {
103 self._indexbynode[nullid] = {
104 b'node': nullid,
104 b'node': nullid,
105 b'p1': nullid,
105 b'p1': nullid,
106 b'p2': nullid,
106 b'p2': nullid,
107 b'linkrev': nullrev,
107 b'linkrev': nullrev,
108 b'flags': 0,
108 b'flags': 0,
109 }
109 }
110
110
111 self._indexbyrev[nullrev] = {
111 self._indexbyrev[nullrev] = {
112 b'node': nullid,
112 b'node': nullid,
113 b'p1': nullid,
113 b'p1': nullid,
114 b'p2': nullid,
114 b'p2': nullid,
115 b'linkrev': nullrev,
115 b'linkrev': nullrev,
116 b'flags': 0,
116 b'flags': 0,
117 }
117 }
118
118
119 for i, entry in enumerate(self._indexdata):
119 for i, entry in enumerate(self._indexdata):
120 p1rev, p2rev = self.parentrevs(self.rev(entry[b'node']))
120 p1rev, p2rev = self.parentrevs(self.rev(entry[b'node']))
121
121
122 # start, length, rawsize, chainbase, linkrev, p1, p2, node
122 # start, length, rawsize, chainbase, linkrev, p1, p2, node
123 self.index.append((0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev,
123 self.index.append((0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev,
124 entry[b'node']))
124 entry[b'node']))
125
125
126 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
126 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
127
127
128 def __len__(self):
128 def __len__(self):
129 return len(self._indexdata)
129 return len(self._indexdata)
130
130
131 def __iter__(self):
131 def __iter__(self):
132 return iter(range(len(self)))
132 return iter(range(len(self)))
133
133
134 def revs(self, start=0, stop=None):
134 def revs(self, start=0, stop=None):
135 step = 1
135 step = 1
136 if stop is not None:
136 if stop is not None:
137 if start > stop:
137 if start > stop:
138 step = -1
138 step = -1
139
139
140 stop += step
140 stop += step
141 else:
141 else:
142 stop = len(self)
142 stop = len(self)
143
143
144 return range(start, stop, step)
144 return range(start, stop, step)
145
145
146 def parents(self, node):
146 def parents(self, node):
147 validatenode(node)
147 validatenode(node)
148
148
149 if node not in self._indexbynode:
149 if node not in self._indexbynode:
150 raise KeyError('unknown node')
150 raise KeyError('unknown node')
151
151
152 entry = self._indexbynode[node]
152 entry = self._indexbynode[node]
153
153
154 return entry[b'p1'], entry[b'p2']
154 return entry[b'p1'], entry[b'p2']
155
155
156 def parentrevs(self, rev):
156 def parentrevs(self, rev):
157 p1, p2 = self.parents(self._indexbyrev[rev][b'node'])
157 p1, p2 = self.parents(self._indexbyrev[rev][b'node'])
158 return self.rev(p1), self.rev(p2)
158 return self.rev(p1), self.rev(p2)
159
159
160 def rev(self, node):
160 def rev(self, node):
161 validatenode(node)
161 validatenode(node)
162
162
163 try:
163 try:
164 self._indexbynode[node]
164 self._indexbynode[node]
165 except KeyError:
165 except KeyError:
166 raise error.LookupError(node, self._indexpath, _('no node'))
166 raise error.LookupError(node, self._indexpath, _('no node'))
167
167
168 for rev, entry in self._indexbyrev.items():
168 for rev, entry in self._indexbyrev.items():
169 if entry[b'node'] == node:
169 if entry[b'node'] == node:
170 return rev
170 return rev
171
171
172 raise error.ProgrammingError('this should not occur')
172 raise error.ProgrammingError('this should not occur')
173
173
174 def node(self, rev):
174 def node(self, rev):
175 validaterev(rev)
175 validaterev(rev)
176
176
177 return self._indexbyrev[rev][b'node']
177 return self._indexbyrev[rev][b'node']
178
178
179 def lookup(self, node):
179 def lookup(self, node):
180 if isinstance(node, int):
180 if isinstance(node, int):
181 return self.node(node)
181 return self.node(node)
182
182
183 if len(node) == 20:
183 if len(node) == 20:
184 self.rev(node)
184 self.rev(node)
185 return node
185 return node
186
186
187 try:
187 try:
188 rev = int(node)
188 rev = int(node)
189 if '%d' % rev != node:
189 if '%d' % rev != node:
190 raise ValueError
190 raise ValueError
191
191
192 if rev < 0:
192 if rev < 0:
193 rev = len(self) + rev
193 rev = len(self) + rev
194 if rev < 0 or rev >= len(self):
194 if rev < 0 or rev >= len(self):
195 raise ValueError
195 raise ValueError
196
196
197 return self.node(rev)
197 return self.node(rev)
198 except (ValueError, OverflowError):
198 except (ValueError, OverflowError):
199 pass
199 pass
200
200
201 if len(node) == 40:
201 if len(node) == 40:
202 try:
202 try:
203 rawnode = bin(node)
203 rawnode = bin(node)
204 self.rev(rawnode)
204 self.rev(rawnode)
205 return rawnode
205 return rawnode
206 except TypeError:
206 except TypeError:
207 pass
207 pass
208
208
209 raise error.LookupError(node, self._path, _('invalid lookup input'))
209 raise error.LookupError(node, self._path, _('invalid lookup input'))
210
210
211 def linkrev(self, rev):
211 def linkrev(self, rev):
212 validaterev(rev)
212 validaterev(rev)
213
213
214 return self._indexbyrev[rev][b'linkrev']
214 return self._indexbyrev[rev][b'linkrev']
215
215
216 def flags(self, rev):
216 def flags(self, rev):
217 validaterev(rev)
217 validaterev(rev)
218
218
219 return self._indexbyrev[rev][b'flags']
219 return self._indexbyrev[rev][b'flags']
220
220
221 def deltaparent(self, rev):
221 def deltaparent(self, rev):
222 validaterev(rev)
222 validaterev(rev)
223
223
224 p1node = self.parents(self.node(rev))[0]
224 p1node = self.parents(self.node(rev))[0]
225 return self.rev(p1node)
225 return self.rev(p1node)
226
226
227 def candelta(self, baserev, rev):
227 def candelta(self, baserev, rev):
228 validaterev(baserev)
228 validaterev(baserev)
229 validaterev(rev)
229 validaterev(rev)
230
230
231 if ((self.flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)
231 if ((self.flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)
232 or (self.flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)):
232 or (self.flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)):
233 return False
233 return False
234
234
235 return True
235 return True
236
236
237 def rawsize(self, rev):
237 def rawsize(self, rev):
238 validaterev(rev)
238 validaterev(rev)
239 node = self.node(rev)
239 node = self.node(rev)
240 return len(self.revision(node, raw=True))
240 return len(self.revision(node, raw=True))
241
241
242 def _processflags(self, text, flags, operation, raw=False):
242 def _processflags(self, text, flags, operation, raw=False):
243 if flags == 0:
243 if flags == 0:
244 return text, True
244 return text, True
245
245
246 if flags & ~revlog.REVIDX_KNOWN_FLAGS:
247 raise error.RevlogError(_("incompatible revision flag '%#x'") %
248 (flags & ~revlog.REVIDX_KNOWN_FLAGS))
249
246 validatehash = True
250 validatehash = True
247 # Depending on the operation (read or write), the order might be
251 # Depending on the operation (read or write), the order might be
248 # reversed due to non-commutative transforms.
252 # reversed due to non-commutative transforms.
249 orderedflags = revlog.REVIDX_FLAGS_ORDER
253 orderedflags = revlog.REVIDX_FLAGS_ORDER
250 if operation == 'write':
254 if operation == 'write':
251 orderedflags = reversed(orderedflags)
255 orderedflags = reversed(orderedflags)
252
256
253 for flag in orderedflags:
257 for flag in orderedflags:
254 # If a flagprocessor has been registered for a known flag, apply the
258 # If a flagprocessor has been registered for a known flag, apply the
255 # related operation transform and update result tuple.
259 # related operation transform and update result tuple.
256 if flag & flags:
260 if flag & flags:
257 vhash = True
261 vhash = True
258
262
259 if flag not in revlog._flagprocessors:
263 if flag not in revlog._flagprocessors:
260 message = _("missing processor for flag '%#x'") % (flag)
264 message = _("missing processor for flag '%#x'") % (flag)
261 raise revlog.RevlogError(message)
265 raise revlog.RevlogError(message)
262
266
263 processor = revlog._flagprocessors[flag]
267 processor = revlog._flagprocessors[flag]
264 if processor is not None:
268 if processor is not None:
265 readtransform, writetransform, rawtransform = processor
269 readtransform, writetransform, rawtransform = processor
266
270
267 if raw:
271 if raw:
268 vhash = rawtransform(self, text)
272 vhash = rawtransform(self, text)
269 elif operation == 'read':
273 elif operation == 'read':
270 text, vhash = readtransform(self, text)
274 text, vhash = readtransform(self, text)
271 else: # write operation
275 else: # write operation
272 text, vhash = writetransform(self, text)
276 text, vhash = writetransform(self, text)
273 validatehash = validatehash and vhash
277 validatehash = validatehash and vhash
274
278
275 return text, validatehash
279 return text, validatehash
276
280
277 def checkhash(self, text, node, p1=None, p2=None, rev=None):
281 def checkhash(self, text, node, p1=None, p2=None, rev=None):
278 if p1 is None and p2 is None:
282 if p1 is None and p2 is None:
279 p1, p2 = self.parents(node)
283 p1, p2 = self.parents(node)
280 if node != revlog.hash(text, p1, p2):
284 if node != revlog.hash(text, p1, p2):
281 raise error.RevlogError(_("integrity check failed on %s") %
285 raise error.RevlogError(_("integrity check failed on %s") %
282 self._path)
286 self._path)
283
287
284 def revision(self, node, raw=False):
288 def revision(self, node, raw=False):
285 validatenode(node)
289 validatenode(node)
286
290
287 if node == nullid:
291 if node == nullid:
288 return b''
292 return b''
289
293
290 rev = self.rev(node)
294 rev = self.rev(node)
291 flags = self.flags(rev)
295 flags = self.flags(rev)
292
296
293 path = b'/'.join([self._storepath, hex(node)])
297 path = b'/'.join([self._storepath, hex(node)])
294 rawtext = self._svfs.read(path)
298 rawtext = self._svfs.read(path)
295
299
296 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
300 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
297 if validatehash:
301 if validatehash:
298 self.checkhash(text, node, rev=rev)
302 self.checkhash(text, node, rev=rev)
299
303
300 return text
304 return text
301
305
302 def read(self, node):
306 def read(self, node):
303 validatenode(node)
307 validatenode(node)
304
308
305 revision = self.revision(node)
309 revision = self.revision(node)
306
310
307 if not revision.startswith(b'\1\n'):
311 if not revision.startswith(b'\1\n'):
308 return revision
312 return revision
309
313
310 start = revision.index(b'\1\n', 2)
314 start = revision.index(b'\1\n', 2)
311 return revision[start + 2:]
315 return revision[start + 2:]
312
316
313 def renamed(self, node):
317 def renamed(self, node):
314 validatenode(node)
318 validatenode(node)
315
319
316 if self.parents(node)[0] != nullid:
320 if self.parents(node)[0] != nullid:
317 return False
321 return False
318
322
319 fulltext = self.revision(node)
323 fulltext = self.revision(node)
320 m = filelog.parsemeta(fulltext)[0]
324 m = filelog.parsemeta(fulltext)[0]
321
325
322 if m and 'copy' in m:
326 if m and 'copy' in m:
323 return m['copy'], bin(m['copyrev'])
327 return m['copy'], bin(m['copyrev'])
324
328
325 return False
329 return False
326
330
327 def cmp(self, node, text):
331 def cmp(self, node, text):
328 validatenode(node)
332 validatenode(node)
329
333
330 t = text
334 t = text
331
335
332 if text.startswith(b'\1\n'):
336 if text.startswith(b'\1\n'):
333 t = b'\1\n\1\n' + text
337 t = b'\1\n\1\n' + text
334
338
335 p1, p2 = self.parents(node)
339 p1, p2 = self.parents(node)
336
340
337 if revlog.hash(t, p1, p2) == node:
341 if revlog.hash(t, p1, p2) == node:
338 return False
342 return False
339
343
340 if self.iscensored(self.rev(node)):
344 if self.iscensored(self.rev(node)):
341 return text != b''
345 return text != b''
342
346
343 if self.renamed(node):
347 if self.renamed(node):
344 t2 = self.read(node)
348 t2 = self.read(node)
345 return t2 != text
349 return t2 != text
346
350
347 return True
351 return True
348
352
349 def size(self, rev):
353 def size(self, rev):
350 validaterev(rev)
354 validaterev(rev)
351
355
352 node = self._indexbyrev[rev][b'node']
356 node = self._indexbyrev[rev][b'node']
353
357
354 if self.renamed(node):
358 if self.renamed(node):
355 return len(self.read(node))
359 return len(self.read(node))
356
360
357 if self.iscensored(rev):
361 if self.iscensored(rev):
358 return 0
362 return 0
359
363
360 return len(self.revision(node))
364 return len(self.revision(node))
361
365
362 def iscensored(self, rev):
366 def iscensored(self, rev):
363 validaterev(rev)
367 validaterev(rev)
364
368
365 return self.flags(rev) & revlog.REVIDX_ISCENSORED
369 return self.flags(rev) & revlog.REVIDX_ISCENSORED
366
370
367 def commonancestorsheads(self, a, b):
371 def commonancestorsheads(self, a, b):
368 validatenode(a)
372 validatenode(a)
369 validatenode(b)
373 validatenode(b)
370
374
371 a = self.rev(a)
375 a = self.rev(a)
372 b = self.rev(b)
376 b = self.rev(b)
373
377
374 ancestors = ancestor.commonancestorsheads(self.parentrevs, a, b)
378 ancestors = ancestor.commonancestorsheads(self.parentrevs, a, b)
375 return pycompat.maplist(self.node, ancestors)
379 return pycompat.maplist(self.node, ancestors)
376
380
377 def descendants(self, revs):
381 def descendants(self, revs):
378 # This is a copy of revlog.descendants()
382 # This is a copy of revlog.descendants()
379 first = min(revs)
383 first = min(revs)
380 if first == nullrev:
384 if first == nullrev:
381 for i in self:
385 for i in self:
382 yield i
386 yield i
383 return
387 return
384
388
385 seen = set(revs)
389 seen = set(revs)
386 for i in self.revs(start=first + 1):
390 for i in self.revs(start=first + 1):
387 for x in self.parentrevs(i):
391 for x in self.parentrevs(i):
388 if x != nullrev and x in seen:
392 if x != nullrev and x in seen:
389 seen.add(i)
393 seen.add(i)
390 yield i
394 yield i
391 break
395 break
392
396
393 # Required by verify.
397 # Required by verify.
394 def files(self):
398 def files(self):
395 entries = self._svfs.listdir(self._storepath)
399 entries = self._svfs.listdir(self._storepath)
396
400
397 # Strip out undo.backup.* files created as part of transaction
401 # Strip out undo.backup.* files created as part of transaction
398 # recording.
402 # recording.
399 entries = [f for f in entries if not f.startswith('undo.backup.')]
403 entries = [f for f in entries if not f.startswith('undo.backup.')]
400
404
401 return [b'/'.join((self._storepath, f)) for f in entries]
405 return [b'/'.join((self._storepath, f)) for f in entries]
402
406
403 # Required by verify.
407 # Required by verify.
404 def checksize(self):
408 def checksize(self):
405 return 0, 0
409 return 0, 0
406
410
407 def add(self, text, meta, transaction, linkrev, p1, p2):
411 def add(self, text, meta, transaction, linkrev, p1, p2):
408 transaction.addbackup(self._indexpath)
409
410 if meta or text.startswith(b'\1\n'):
412 if meta or text.startswith(b'\1\n'):
411 text = filelog.packmeta(meta, text)
413 text = filelog.packmeta(meta, text)
412
414
413 return self.addrevision(text, transaction, linkrev, p1, p2)
415 return self.addrevision(text, transaction, linkrev, p1, p2)
414
416
415 def addrevision(self, text, transaction, linkrev, p1, p2, node=None,
417 def addrevision(self, text, transaction, linkrev, p1, p2, node=None,
416 flags=0):
418 flags=revlog.REVIDX_DEFAULT_FLAGS, cachedelta=None):
417 validatenode(p1)
419 validatenode(p1)
418 validatenode(p2)
420 validatenode(p2)
419
421
420 if flags:
422 if flags:
421 node = node or revlog.hash(text, p1, p2)
423 node = node or revlog.hash(text, p1, p2)
422
424
423 rawtext, validatehash = self._processflags(text, flags, 'write')
425 rawtext, validatehash = self._processflags(text, flags, 'write')
424
426
425 node = node or revlog.hash(text, p1, p2)
427 node = node or revlog.hash(text, p1, p2)
426
428
427 if node in self._indexbynode:
429 if node in self._indexbynode:
428 return node
430 return node
429
431
430 if validatehash:
432 if validatehash:
431 self.checkhash(rawtext, node, p1=p1, p2=p2)
433 self.checkhash(rawtext, node, p1=p1, p2=p2)
432
434
435 return self._addrawrevision(node, rawtext, transaction, linkrev, p1, p2,
436 flags)
437
438 def _addrawrevision(self, node, rawtext, transaction, link, p1, p2, flags):
439 transaction.addbackup(self._indexpath)
440
433 path = b'/'.join([self._storepath, hex(node)])
441 path = b'/'.join([self._storepath, hex(node)])
434
442
435 self._svfs.write(path, text)
443 self._svfs.write(path, rawtext)
436
444
437 self._indexdata.append({
445 self._indexdata.append({
438 b'node': node,
446 b'node': node,
439 b'p1': p1,
447 b'p1': p1,
440 b'p2': p2,
448 b'p2': p2,
441 b'linkrev': linkrev,
449 b'linkrev': link,
442 b'flags': flags,
450 b'flags': flags,
443 })
451 })
444
452
445 self._reflectindexupdate()
453 self._reflectindexupdate()
446
454
447 return node
455 return node
448
456
449 def _reflectindexupdate(self):
457 def _reflectindexupdate(self):
450 self._refreshindex()
458 self._refreshindex()
451 self._svfs.write(self._indexpath, cbor.dumps(self._indexdata))
459 self._svfs.write(self._indexpath, cbor.dumps(self._indexdata))
452
460
453 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
461 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
454 nodes = []
462 nodes = []
455
463
456 transaction.addbackup(self._indexpath)
464 transaction.addbackup(self._indexpath)
457
465
458 for node, p1, p2, linknode, deltabase, delta, flags in deltas:
466 for node, p1, p2, linknode, deltabase, delta, flags in deltas:
459 linkrev = linkmapper(linknode)
467 linkrev = linkmapper(linknode)
468 flags = flags or revlog.REVIDX_DEFAULT_FLAGS
460
469
461 nodes.append(node)
470 nodes.append(node)
462
471
463 if node in self._indexbynode:
472 if node in self._indexbynode:
464 continue
473 continue
465
474
466 # Need to resolve the fulltext from the delta base.
475 # Need to resolve the fulltext from the delta base.
467 if deltabase == nullid:
476 if deltabase == nullid:
468 text = mdiff.patch(b'', delta)
477 text = mdiff.patch(b'', delta)
469 else:
478 else:
470 text = mdiff.patch(self.revision(deltabase), delta)
479 text = mdiff.patch(self.revision(deltabase), delta)
471
480
472 self.addrevision(text, transaction, linkrev, p1, p2, flags)
481 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
482 flags)
473
483
474 if addrevisioncb:
484 if addrevisioncb:
475 addrevisioncb(self, node)
485 addrevisioncb(self, node)
476
486
477 return nodes
487 return nodes
478
488
479 def revdiff(self, rev1, rev2):
489 def revdiff(self, rev1, rev2):
480 validaterev(rev1)
490 validaterev(rev1)
481 validaterev(rev2)
491 validaterev(rev2)
482
492
483 node1 = self.node(rev1)
493 node1 = self.node(rev1)
484 node2 = self.node(rev2)
494 node2 = self.node(rev2)
485
495
486 return mdiff.textdiff(self.revision(node1, raw=True),
496 return mdiff.textdiff(self.revision(node1, raw=True),
487 self.revision(node2, raw=True))
497 self.revision(node2, raw=True))
488
498
489 def headrevs(self):
499 def headrevs(self):
490 # Assume all revisions are heads by default.
500 # Assume all revisions are heads by default.
491 revishead = {rev: True for rev in self._indexbyrev}
501 revishead = {rev: True for rev in self._indexbyrev}
492
502
493 for rev, entry in self._indexbyrev.items():
503 for rev, entry in self._indexbyrev.items():
494 # Unset head flag for all seen parents.
504 # Unset head flag for all seen parents.
495 revishead[self.rev(entry[b'p1'])] = False
505 revishead[self.rev(entry[b'p1'])] = False
496 revishead[self.rev(entry[b'p2'])] = False
506 revishead[self.rev(entry[b'p2'])] = False
497
507
498 return [rev for rev, ishead in sorted(revishead.items())
508 return [rev for rev, ishead in sorted(revishead.items())
499 if ishead]
509 if ishead]
500
510
501 def heads(self, start=None, stop=None):
511 def heads(self, start=None, stop=None):
502 # This is copied from revlog.py.
512 # This is copied from revlog.py.
503 if start is None and stop is None:
513 if start is None and stop is None:
504 if not len(self):
514 if not len(self):
505 return [nullid]
515 return [nullid]
506 return [self.node(r) for r in self.headrevs()]
516 return [self.node(r) for r in self.headrevs()]
507
517
508 if start is None:
518 if start is None:
509 start = nullid
519 start = nullid
510 if stop is None:
520 if stop is None:
511 stop = []
521 stop = []
512 stoprevs = set([self.rev(n) for n in stop])
522 stoprevs = set([self.rev(n) for n in stop])
513 startrev = self.rev(start)
523 startrev = self.rev(start)
514 reachable = {startrev}
524 reachable = {startrev}
515 heads = {startrev}
525 heads = {startrev}
516
526
517 parentrevs = self.parentrevs
527 parentrevs = self.parentrevs
518 for r in self.revs(start=startrev + 1):
528 for r in self.revs(start=startrev + 1):
519 for p in parentrevs(r):
529 for p in parentrevs(r):
520 if p in reachable:
530 if p in reachable:
521 if r not in stoprevs:
531 if r not in stoprevs:
522 reachable.add(r)
532 reachable.add(r)
523 heads.add(r)
533 heads.add(r)
524 if p in heads and p not in stoprevs:
534 if p in heads and p not in stoprevs:
525 heads.remove(p)
535 heads.remove(p)
526
536
527 return [self.node(r) for r in heads]
537 return [self.node(r) for r in heads]
528
538
529 def children(self, node):
539 def children(self, node):
530 validatenode(node)
540 validatenode(node)
531
541
532 # This is a copy of revlog.children().
542 # This is a copy of revlog.children().
533 c = []
543 c = []
534 p = self.rev(node)
544 p = self.rev(node)
535 for r in self.revs(start=p + 1):
545 for r in self.revs(start=p + 1):
536 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
546 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
537 if prevs:
547 if prevs:
538 for pr in prevs:
548 for pr in prevs:
539 if pr == p:
549 if pr == p:
540 c.append(self.node(r))
550 c.append(self.node(r))
541 elif p == nullrev:
551 elif p == nullrev:
542 c.append(self.node(r))
552 c.append(self.node(r))
543 return c
553 return c
544
554
545 def getstrippoint(self, minlink):
555 def getstrippoint(self, minlink):
546
556
547 # This is largely a copy of revlog.getstrippoint().
557 # This is largely a copy of revlog.getstrippoint().
548 brokenrevs = set()
558 brokenrevs = set()
549 strippoint = len(self)
559 strippoint = len(self)
550
560
551 heads = {}
561 heads = {}
552 futurelargelinkrevs = set()
562 futurelargelinkrevs = set()
553 for head in self.headrevs():
563 for head in self.headrevs():
554 headlinkrev = self.linkrev(head)
564 headlinkrev = self.linkrev(head)
555 heads[head] = headlinkrev
565 heads[head] = headlinkrev
556 if headlinkrev >= minlink:
566 if headlinkrev >= minlink:
557 futurelargelinkrevs.add(headlinkrev)
567 futurelargelinkrevs.add(headlinkrev)
558
568
559 # This algorithm involves walking down the rev graph, starting at the
569 # This algorithm involves walking down the rev graph, starting at the
560 # heads. Since the revs are topologically sorted according to linkrev,
570 # heads. Since the revs are topologically sorted according to linkrev,
561 # once all head linkrevs are below the minlink, we know there are
571 # once all head linkrevs are below the minlink, we know there are
562 # no more revs that could have a linkrev greater than minlink.
572 # no more revs that could have a linkrev greater than minlink.
563 # So we can stop walking.
573 # So we can stop walking.
564 while futurelargelinkrevs:
574 while futurelargelinkrevs:
565 strippoint -= 1
575 strippoint -= 1
566 linkrev = heads.pop(strippoint)
576 linkrev = heads.pop(strippoint)
567
577
568 if linkrev < minlink:
578 if linkrev < minlink:
569 brokenrevs.add(strippoint)
579 brokenrevs.add(strippoint)
570 else:
580 else:
571 futurelargelinkrevs.remove(linkrev)
581 futurelargelinkrevs.remove(linkrev)
572
582
573 for p in self.parentrevs(strippoint):
583 for p in self.parentrevs(strippoint):
574 if p != nullrev:
584 if p != nullrev:
575 plinkrev = self.linkrev(p)
585 plinkrev = self.linkrev(p)
576 heads[p] = plinkrev
586 heads[p] = plinkrev
577 if plinkrev >= minlink:
587 if plinkrev >= minlink:
578 futurelargelinkrevs.add(plinkrev)
588 futurelargelinkrevs.add(plinkrev)
579
589
580 return strippoint, brokenrevs
590 return strippoint, brokenrevs
581
591
582 def strip(self, minlink, transaction):
592 def strip(self, minlink, transaction):
583 if not len(self):
593 if not len(self):
584 return
594 return
585
595
586 rev, _ignored = self.getstrippoint(minlink)
596 rev, _ignored = self.getstrippoint(minlink)
587 if rev == len(self):
597 if rev == len(self):
588 return
598 return
589
599
590 # Purge index data starting at the requested revision.
600 # Purge index data starting at the requested revision.
591 self._indexdata[rev:] = []
601 self._indexdata[rev:] = []
592 self._reflectindexupdate()
602 self._reflectindexupdate()
593
603
594 def issimplestorefile(f, kind, st):
604 def issimplestorefile(f, kind, st):
595 if kind != stat.S_IFREG:
605 if kind != stat.S_IFREG:
596 return False
606 return False
597
607
598 if store.isrevlog(f, kind, st):
608 if store.isrevlog(f, kind, st):
599 return False
609 return False
600
610
601 # Ignore transaction undo files.
611 # Ignore transaction undo files.
602 if f.startswith('undo.'):
612 if f.startswith('undo.'):
603 return False
613 return False
604
614
605 # Otherwise assume it belongs to the simple store.
615 # Otherwise assume it belongs to the simple store.
606 return True
616 return True
607
617
608 class simplestore(store.encodedstore):
618 class simplestore(store.encodedstore):
609 def datafiles(self):
619 def datafiles(self):
610 for x in super(simplestore, self).datafiles():
620 for x in super(simplestore, self).datafiles():
611 yield x
621 yield x
612
622
613 # Supplement with non-revlog files.
623 # Supplement with non-revlog files.
614 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
624 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
615
625
616 for unencoded, encoded, size in extrafiles:
626 for unencoded, encoded, size in extrafiles:
617 try:
627 try:
618 unencoded = store.decodefilename(unencoded)
628 unencoded = store.decodefilename(unencoded)
619 except KeyError:
629 except KeyError:
620 unencoded = None
630 unencoded = None
621
631
622 yield unencoded, encoded, size
632 yield unencoded, encoded, size
623
633
624 def reposetup(ui, repo):
634 def reposetup(ui, repo):
625 if not repo.local():
635 if not repo.local():
626 return
636 return
627
637
628 if isinstance(repo, bundlerepo.bundlerepository):
638 if isinstance(repo, bundlerepo.bundlerepository):
629 raise error.Abort(_('cannot use simple store with bundlerepo'))
639 raise error.Abort(_('cannot use simple store with bundlerepo'))
630
640
631 class simplestorerepo(repo.__class__):
641 class simplestorerepo(repo.__class__):
632 def file(self, f):
642 def file(self, f):
633 return filestorage(self.svfs, f)
643 return filestorage(self.svfs, f)
634
644
635 repo.__class__ = simplestorerepo
645 repo.__class__ = simplestorerepo
636
646
637 def featuresetup(ui, supported):
647 def featuresetup(ui, supported):
638 supported.add(REQUIREMENT)
648 supported.add(REQUIREMENT)
639
649
640 def newreporequirements(orig, repo):
650 def newreporequirements(orig, repo):
641 """Modifies default requirements for new repos to use the simple store."""
651 """Modifies default requirements for new repos to use the simple store."""
642 requirements = orig(repo)
652 requirements = orig(repo)
643
653
644 # These requirements are only used to affect creation of the store
654 # These requirements are only used to affect creation of the store
645 # object. We have our own store. So we can remove them.
655 # object. We have our own store. So we can remove them.
646 # TODO do this once we feel like taking the test hit.
656 # TODO do this once we feel like taking the test hit.
647 #if 'fncache' in requirements:
657 #if 'fncache' in requirements:
648 # requirements.remove('fncache')
658 # requirements.remove('fncache')
649 #if 'dotencode' in requirements:
659 #if 'dotencode' in requirements:
650 # requirements.remove('dotencode')
660 # requirements.remove('dotencode')
651
661
652 requirements.add(REQUIREMENT)
662 requirements.add(REQUIREMENT)
653
663
654 return requirements
664 return requirements
655
665
656 def makestore(orig, requirements, path, vfstype):
666 def makestore(orig, requirements, path, vfstype):
657 if REQUIREMENT not in requirements:
667 if REQUIREMENT not in requirements:
658 return orig(requirements, path, vfstype)
668 return orig(requirements, path, vfstype)
659
669
660 return simplestore(path, vfstype)
670 return simplestore(path, vfstype)
661
671
662 def verifierinit(orig, self, *args, **kwargs):
672 def verifierinit(orig, self, *args, **kwargs):
663 orig(self, *args, **kwargs)
673 orig(self, *args, **kwargs)
664
674
665 # We don't care that files in the store don't align with what is
675 # We don't care that files in the store don't align with what is
666 # advertised. So suppress these warnings.
676 # advertised. So suppress these warnings.
667 self.warnorphanstorefiles = False
677 self.warnorphanstorefiles = False
668
678
669 def extsetup(ui):
679 def extsetup(ui):
670 localrepo.featuresetupfuncs.add(featuresetup)
680 localrepo.featuresetupfuncs.add(featuresetup)
671
681
672 extensions.wrapfunction(localrepo, 'newreporequirements',
682 extensions.wrapfunction(localrepo, 'newreporequirements',
673 newreporequirements)
683 newreporequirements)
674 extensions.wrapfunction(store, 'store', makestore)
684 extensions.wrapfunction(store, 'store', makestore)
675 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
685 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
@@ -1,260 +1,300 b''
1 # Create server
1 # Create server
2 $ hg init server
2 $ hg init server
3 $ cd server
3 $ cd server
4 $ cat >> .hg/hgrc << EOF
4 $ cat >> .hg/hgrc << EOF
5 > [extensions]
5 > [extensions]
6 > extension=$TESTDIR/flagprocessorext.py
6 > extension=$TESTDIR/flagprocessorext.py
7 > EOF
7 > EOF
8 $ cd ../
8 $ cd ../
9
9
10 # Clone server and enable extensions
10 # Clone server and enable extensions
11 $ hg clone -q server client
11 $ hg clone -q server client
12 $ cd client
12 $ cd client
13 $ cat >> .hg/hgrc << EOF
13 $ cat >> .hg/hgrc << EOF
14 > [extensions]
14 > [extensions]
15 > extension=$TESTDIR/flagprocessorext.py
15 > extension=$TESTDIR/flagprocessorext.py
16 > EOF
16 > EOF
17
17
18 # Commit file that will trigger the noop extension
18 # Commit file that will trigger the noop extension
19 $ echo '[NOOP]' > noop
19 $ echo '[NOOP]' > noop
20 $ hg commit -Aqm "noop"
20 $ hg commit -Aqm "noop"
21
21
22 # Commit file that will trigger the base64 extension
22 # Commit file that will trigger the base64 extension
23 $ echo '[BASE64]' > base64
23 $ echo '[BASE64]' > base64
24 $ hg commit -Aqm 'base64'
24 $ hg commit -Aqm 'base64'
25
25
26 # Commit file that will trigger the gzip extension
26 # Commit file that will trigger the gzip extension
27 $ echo '[GZIP]' > gzip
27 $ echo '[GZIP]' > gzip
28 $ hg commit -Aqm 'gzip'
28 $ hg commit -Aqm 'gzip'
29
29
30 # Commit file that will trigger noop and base64
30 # Commit file that will trigger noop and base64
31 $ echo '[NOOP][BASE64]' > noop-base64
31 $ echo '[NOOP][BASE64]' > noop-base64
32 $ hg commit -Aqm 'noop+base64'
32 $ hg commit -Aqm 'noop+base64'
33
33
34 # Commit file that will trigger noop and gzip
34 # Commit file that will trigger noop and gzip
35 $ echo '[NOOP][GZIP]' > noop-gzip
35 $ echo '[NOOP][GZIP]' > noop-gzip
36 $ hg commit -Aqm 'noop+gzip'
36 $ hg commit -Aqm 'noop+gzip'
37
37
38 # Commit file that will trigger base64 and gzip
38 # Commit file that will trigger base64 and gzip
39 $ echo '[BASE64][GZIP]' > base64-gzip
39 $ echo '[BASE64][GZIP]' > base64-gzip
40 $ hg commit -Aqm 'base64+gzip'
40 $ hg commit -Aqm 'base64+gzip'
41
41
42 # Commit file that will trigger base64, gzip and noop
42 # Commit file that will trigger base64, gzip and noop
43 $ echo '[BASE64][GZIP][NOOP]' > base64-gzip-noop
43 $ echo '[BASE64][GZIP][NOOP]' > base64-gzip-noop
44 $ hg commit -Aqm 'base64+gzip+noop'
44 $ hg commit -Aqm 'base64+gzip+noop'
45
45
46 # TEST: ensure the revision data is consistent
46 # TEST: ensure the revision data is consistent
47 $ hg cat noop
47 $ hg cat noop
48 [NOOP]
48 [NOOP]
49 $ hg debugdata noop 0
49 $ hg debugdata noop 0
50 [NOOP]
50 [NOOP]
51
51
52 $ hg cat -r . base64
52 $ hg cat -r . base64
53 [BASE64]
53 [BASE64]
54 $ hg debugdata base64 0
54 $ hg debugdata base64 0
55 W0JBU0U2NF0K (no-eol)
55 W0JBU0U2NF0K (no-eol)
56
56
57 $ hg cat -r . gzip
57 $ hg cat -r . gzip
58 [GZIP]
58 [GZIP]
59 $ hg debugdata gzip 0
59 $ hg debugdata gzip 0
60 x\x9c\x8bv\x8f\xf2\x0c\x88\xe5\x02\x00\x08\xc8\x01\xfd (no-eol) (esc)
60 x\x9c\x8bv\x8f\xf2\x0c\x88\xe5\x02\x00\x08\xc8\x01\xfd (no-eol) (esc)
61
61
62 $ hg cat -r . noop-base64
62 $ hg cat -r . noop-base64
63 [NOOP][BASE64]
63 [NOOP][BASE64]
64 $ hg debugdata noop-base64 0
64 $ hg debugdata noop-base64 0
65 W05PT1BdW0JBU0U2NF0K (no-eol)
65 W05PT1BdW0JBU0U2NF0K (no-eol)
66
66
67 $ hg cat -r . noop-gzip
67 $ hg cat -r . noop-gzip
68 [NOOP][GZIP]
68 [NOOP][GZIP]
69 $ hg debugdata noop-gzip 0
69 $ hg debugdata noop-gzip 0
70 x\x9c\x8b\xf6\xf3\xf7\x0f\x88\x8dv\x8f\xf2\x0c\x88\xe5\x02\x00\x1dH\x03\xf1 (no-eol) (esc)
70 x\x9c\x8b\xf6\xf3\xf7\x0f\x88\x8dv\x8f\xf2\x0c\x88\xe5\x02\x00\x1dH\x03\xf1 (no-eol) (esc)
71
71
72 $ hg cat -r . base64-gzip
72 $ hg cat -r . base64-gzip
73 [BASE64][GZIP]
73 [BASE64][GZIP]
74 $ hg debugdata base64-gzip 0
74 $ hg debugdata base64-gzip 0
75 eJyLdnIMdjUziY12j/IMiOUCACLBBDo= (no-eol)
75 eJyLdnIMdjUziY12j/IMiOUCACLBBDo= (no-eol)
76
76
77 $ hg cat -r . base64-gzip-noop
77 $ hg cat -r . base64-gzip-noop
78 [BASE64][GZIP][NOOP]
78 [BASE64][GZIP][NOOP]
79 $ hg debugdata base64-gzip-noop 0
79 $ hg debugdata base64-gzip-noop 0
80 eJyLdnIMdjUziY12j/IMiI328/cPiOUCAESjBi4= (no-eol)
80 eJyLdnIMdjUziY12j/IMiI328/cPiOUCAESjBi4= (no-eol)
81
81
82 # Push to the server
82 # Push to the server
83 $ hg push
83 $ hg push
84 pushing to $TESTTMP/server
84 pushing to $TESTTMP/server
85 searching for changes
85 searching for changes
86 adding changesets
86 adding changesets
87 adding manifests
87 adding manifests
88 adding file changes
88 adding file changes
89 added 7 changesets with 7 changes to 7 files
89 added 7 changesets with 7 changes to 7 files
90
90
91 Ensure the data got to the server OK
92
93 $ cd ../server
94 $ hg cat -r 6e48f4215d24 noop
95 [NOOP]
96 $ hg debugdata noop 0
97 [NOOP]
98
99 $ hg cat -r 6e48f4215d24 base64
100 [BASE64]
101 $ hg debugdata base64 0
102 W0JBU0U2NF0K (no-eol)
103
104 $ hg cat -r 6e48f4215d24 gzip
105 [GZIP]
106 $ hg debugdata gzip 0
107 x\x9c\x8bv\x8f\xf2\x0c\x88\xe5\x02\x00\x08\xc8\x01\xfd (no-eol) (esc)
108
109 $ hg cat -r 6e48f4215d24 noop-base64
110 [NOOP][BASE64]
111 $ hg debugdata noop-base64 0
112 W05PT1BdW0JBU0U2NF0K (no-eol)
113
114 $ hg cat -r 6e48f4215d24 noop-gzip
115 [NOOP][GZIP]
116 $ hg debugdata noop-gzip 0
117 x\x9c\x8b\xf6\xf3\xf7\x0f\x88\x8dv\x8f\xf2\x0c\x88\xe5\x02\x00\x1dH\x03\xf1 (no-eol) (esc)
118
119 $ hg cat -r 6e48f4215d24 base64-gzip
120 [BASE64][GZIP]
121 $ hg debugdata base64-gzip 0
122 eJyLdnIMdjUziY12j/IMiOUCACLBBDo= (no-eol)
123
124 $ hg cat -r 6e48f4215d24 base64-gzip-noop
125 [BASE64][GZIP][NOOP]
126 $ hg debugdata base64-gzip-noop 0
127 eJyLdnIMdjUziY12j/IMiI328/cPiOUCAESjBi4= (no-eol)
128
91 # Initialize new client (not cloning) and setup extension
129 # Initialize new client (not cloning) and setup extension
92 $ cd ..
130 $ cd ..
93 $ hg init client2
131 $ hg init client2
94 $ cd client2
132 $ cd client2
95 $ cat >> .hg/hgrc << EOF
133 $ cat >> .hg/hgrc << EOF
96 > [paths]
134 > [paths]
97 > default = $TESTTMP/server
135 > default = $TESTTMP/server
98 > [extensions]
136 > [extensions]
99 > extension=$TESTDIR/flagprocessorext.py
137 > extension=$TESTDIR/flagprocessorext.py
100 > EOF
138 > EOF
101
139
102 # Pull from server and update to latest revision
140 # Pull from server and update to latest revision
103 $ hg pull default
141 $ hg pull default
104 pulling from $TESTTMP/server
142 pulling from $TESTTMP/server
105 requesting all changes
143 requesting all changes
106 adding changesets
144 adding changesets
107 adding manifests
145 adding manifests
108 adding file changes
146 adding file changes
109 added 7 changesets with 7 changes to 7 files
147 added 7 changesets with 7 changes to 7 files
110 new changesets 07b1b9442c5b:6e48f4215d24
148 new changesets 07b1b9442c5b:6e48f4215d24
111 (run 'hg update' to get a working copy)
149 (run 'hg update' to get a working copy)
112 $ hg update
150 $ hg update
113 7 files updated, 0 files merged, 0 files removed, 0 files unresolved
151 7 files updated, 0 files merged, 0 files removed, 0 files unresolved
114
152
115 # TEST: ensure the revision data is consistent
153 # TEST: ensure the revision data is consistent
116 $ hg cat noop
154 $ hg cat noop
117 [NOOP]
155 [NOOP]
118 $ hg debugdata noop 0
156 $ hg debugdata noop 0
119 [NOOP]
157 [NOOP]
120
158
121 $ hg cat -r . base64
159 $ hg cat -r . base64
122 [BASE64]
160 [BASE64]
123 $ hg debugdata base64 0
161 $ hg debugdata base64 0
124 W0JBU0U2NF0K (no-eol)
162 W0JBU0U2NF0K (no-eol)
125
163
126 $ hg cat -r . gzip
164 $ hg cat -r . gzip
127 [GZIP]
165 [GZIP]
128 $ hg debugdata gzip 0
166 $ hg debugdata gzip 0
129 x\x9c\x8bv\x8f\xf2\x0c\x88\xe5\x02\x00\x08\xc8\x01\xfd (no-eol) (esc)
167 x\x9c\x8bv\x8f\xf2\x0c\x88\xe5\x02\x00\x08\xc8\x01\xfd (no-eol) (esc)
130
168
131 $ hg cat -r . noop-base64
169 $ hg cat -r . noop-base64
132 [NOOP][BASE64]
170 [NOOP][BASE64]
133 $ hg debugdata noop-base64 0
171 $ hg debugdata noop-base64 0
134 W05PT1BdW0JBU0U2NF0K (no-eol)
172 W05PT1BdW0JBU0U2NF0K (no-eol)
135
173
136 $ hg cat -r . noop-gzip
174 $ hg cat -r . noop-gzip
137 [NOOP][GZIP]
175 [NOOP][GZIP]
138 $ hg debugdata noop-gzip 0
176 $ hg debugdata noop-gzip 0
139 x\x9c\x8b\xf6\xf3\xf7\x0f\x88\x8dv\x8f\xf2\x0c\x88\xe5\x02\x00\x1dH\x03\xf1 (no-eol) (esc)
177 x\x9c\x8b\xf6\xf3\xf7\x0f\x88\x8dv\x8f\xf2\x0c\x88\xe5\x02\x00\x1dH\x03\xf1 (no-eol) (esc)
140
178
141 $ hg cat -r . base64-gzip
179 $ hg cat -r . base64-gzip
142 [BASE64][GZIP]
180 [BASE64][GZIP]
143 $ hg debugdata base64-gzip 0
181 $ hg debugdata base64-gzip 0
144 eJyLdnIMdjUziY12j/IMiOUCACLBBDo= (no-eol)
182 eJyLdnIMdjUziY12j/IMiOUCACLBBDo= (no-eol)
145
183
146 $ hg cat -r . base64-gzip-noop
184 $ hg cat -r . base64-gzip-noop
147 [BASE64][GZIP][NOOP]
185 [BASE64][GZIP][NOOP]
148 $ hg debugdata base64-gzip-noop 0
186 $ hg debugdata base64-gzip-noop 0
149 eJyLdnIMdjUziY12j/IMiI328/cPiOUCAESjBi4= (no-eol)
187 eJyLdnIMdjUziY12j/IMiI328/cPiOUCAESjBi4= (no-eol)
150
188
151 # TEST: ensure a missing processor is handled
189 # TEST: ensure a missing processor is handled
152 $ echo '[FAIL][BASE64][GZIP][NOOP]' > fail-base64-gzip-noop
190 $ echo '[FAIL][BASE64][GZIP][NOOP]' > fail-base64-gzip-noop
153 $ hg commit -Aqm 'fail+base64+gzip+noop'
191 $ hg commit -Aqm 'fail+base64+gzip+noop'
154 abort: missing processor for flag '0x1'!
192 abort: missing processor for flag '0x1'!
155 [255]
193 [255]
156 $ rm fail-base64-gzip-noop
194 $ rm fail-base64-gzip-noop
157
195
158 # TEST: ensure we cannot register several flag processors on the same flag
196 # TEST: ensure we cannot register several flag processors on the same flag
159 $ cat >> .hg/hgrc << EOF
197 $ cat >> .hg/hgrc << EOF
160 > [extensions]
198 > [extensions]
161 > extension=$TESTDIR/flagprocessorext.py
199 > extension=$TESTDIR/flagprocessorext.py
162 > duplicate=$TESTDIR/flagprocessorext.py
200 > duplicate=$TESTDIR/flagprocessorext.py
163 > EOF
201 > EOF
164 $ hg debugrebuilddirstate
202 $ hg debugrebuilddirstate
165 Traceback (most recent call last):
203 Traceback (most recent call last):
166 File "*/mercurial/extensions.py", line *, in _runextsetup (glob)
204 File "*/mercurial/extensions.py", line *, in _runextsetup (glob)
167 extsetup(ui)
205 extsetup(ui)
168 File "*/tests/flagprocessorext.py", line *, in extsetup (glob)
206 File "*/tests/flagprocessorext.py", line *, in extsetup (glob)
169 validatehash,
207 validatehash,
170 File "*/mercurial/revlog.py", line *, in addflagprocessor (glob)
208 File "*/mercurial/revlog.py", line *, in addflagprocessor (glob)
171 raise error.Abort(msg)
209 raise error.Abort(msg)
172 Abort: cannot register multiple processors on flag '0x8'.
210 Abort: cannot register multiple processors on flag '0x8'.
173 *** failed to set up extension duplicate: cannot register multiple processors on flag '0x8'.
211 *** failed to set up extension duplicate: cannot register multiple processors on flag '0x8'.
174 $ hg st 2>&1 | egrep 'cannot register multiple processors|flagprocessorext'
212 $ hg st 2>&1 | egrep 'cannot register multiple processors|flagprocessorext'
175 File "*/tests/flagprocessorext.py", line *, in extsetup (glob)
213 File "*/tests/flagprocessorext.py", line *, in extsetup (glob)
176 Abort: cannot register multiple processors on flag '0x8'.
214 Abort: cannot register multiple processors on flag '0x8'.
177 *** failed to set up extension duplicate: cannot register multiple processors on flag '0x8'.
215 *** failed to set up extension duplicate: cannot register multiple processors on flag '0x8'.
178 File "*/tests/flagprocessorext.py", line *, in b64decode (glob)
216 File "*/tests/flagprocessorext.py", line *, in b64decode (glob)
179
217
180 $ cd ..
218 $ cd ..
181
219
182 # TEST: bundle repo
220 # TEST: bundle repo
183 $ hg init bundletest
221 $ hg init bundletest
184 $ cd bundletest
222 $ cd bundletest
185
223
186 $ cat >> .hg/hgrc << EOF
224 $ cat >> .hg/hgrc << EOF
187 > [extensions]
225 > [extensions]
188 > flagprocessor=$TESTDIR/flagprocessorext.py
226 > flagprocessor=$TESTDIR/flagprocessorext.py
189 > EOF
227 > EOF
190
228
191 $ for i in 0 single two three 4; do
229 $ for i in 0 single two three 4; do
192 > echo '[BASE64]a-bit-longer-'$i > base64
230 > echo '[BASE64]a-bit-longer-'$i > base64
193 > hg commit -m base64-$i -A base64
231 > hg commit -m base64-$i -A base64
194 > done
232 > done
195
233
196 $ hg update 2 -q
234 $ hg update 2 -q
197 $ echo '[BASE64]a-bit-longer-branching' > base64
235 $ echo '[BASE64]a-bit-longer-branching' > base64
198 $ hg commit -q -m branching
236 $ hg commit -q -m branching
199
237
238 #if repobundlerepo
200 $ hg bundle --base 1 bundle.hg
239 $ hg bundle --base 1 bundle.hg
201 4 changesets found
240 4 changesets found
202 $ hg --config extensions.strip= strip -r 2 --no-backup --force -q
241 $ hg --config extensions.strip= strip -r 2 --no-backup --force -q
203 $ hg -R bundle.hg log --stat -T '{rev} {desc}\n' base64
242 $ hg -R bundle.hg log --stat -T '{rev} {desc}\n' base64
204 5 branching
243 5 branching
205 base64 | 2 +-
244 base64 | 2 +-
206 1 files changed, 1 insertions(+), 1 deletions(-)
245 1 files changed, 1 insertions(+), 1 deletions(-)
207
246
208 4 base64-4
247 4 base64-4
209 base64 | 2 +-
248 base64 | 2 +-
210 1 files changed, 1 insertions(+), 1 deletions(-)
249 1 files changed, 1 insertions(+), 1 deletions(-)
211
250
212 3 base64-three
251 3 base64-three
213 base64 | 2 +-
252 base64 | 2 +-
214 1 files changed, 1 insertions(+), 1 deletions(-)
253 1 files changed, 1 insertions(+), 1 deletions(-)
215
254
216 2 base64-two
255 2 base64-two
217 base64 | 2 +-
256 base64 | 2 +-
218 1 files changed, 1 insertions(+), 1 deletions(-)
257 1 files changed, 1 insertions(+), 1 deletions(-)
219
258
220 1 base64-single
259 1 base64-single
221 base64 | 2 +-
260 base64 | 2 +-
222 1 files changed, 1 insertions(+), 1 deletions(-)
261 1 files changed, 1 insertions(+), 1 deletions(-)
223
262
224 0 base64-0
263 0 base64-0
225 base64 | 1 +
264 base64 | 1 +
226 1 files changed, 1 insertions(+), 0 deletions(-)
265 1 files changed, 1 insertions(+), 0 deletions(-)
227
266
228
267
229 $ hg bundle -R bundle.hg --base 1 bundle-again.hg -q
268 $ hg bundle -R bundle.hg --base 1 bundle-again.hg -q
230 $ hg -R bundle-again.hg log --stat -T '{rev} {desc}\n' base64
269 $ hg -R bundle-again.hg log --stat -T '{rev} {desc}\n' base64
231 5 branching
270 5 branching
232 base64 | 2 +-
271 base64 | 2 +-
233 1 files changed, 1 insertions(+), 1 deletions(-)
272 1 files changed, 1 insertions(+), 1 deletions(-)
234
273
235 4 base64-4
274 4 base64-4
236 base64 | 2 +-
275 base64 | 2 +-
237 1 files changed, 1 insertions(+), 1 deletions(-)
276 1 files changed, 1 insertions(+), 1 deletions(-)
238
277
239 3 base64-three
278 3 base64-three
240 base64 | 2 +-
279 base64 | 2 +-
241 1 files changed, 1 insertions(+), 1 deletions(-)
280 1 files changed, 1 insertions(+), 1 deletions(-)
242
281
243 2 base64-two
282 2 base64-two
244 base64 | 2 +-
283 base64 | 2 +-
245 1 files changed, 1 insertions(+), 1 deletions(-)
284 1 files changed, 1 insertions(+), 1 deletions(-)
246
285
247 1 base64-single
286 1 base64-single
248 base64 | 2 +-
287 base64 | 2 +-
249 1 files changed, 1 insertions(+), 1 deletions(-)
288 1 files changed, 1 insertions(+), 1 deletions(-)
250
289
251 0 base64-0
290 0 base64-0
252 base64 | 1 +
291 base64 | 1 +
253 1 files changed, 1 insertions(+), 0 deletions(-)
292 1 files changed, 1 insertions(+), 0 deletions(-)
254
293
255 $ rm bundle.hg bundle-again.hg
294 $ rm bundle.hg bundle-again.hg
295 #endif
256
296
257 # TEST: hg status
297 # TEST: hg status
258
298
259 $ hg status
299 $ hg status
260 $ hg diff
300 $ hg diff
General Comments 0
You need to be logged in to leave comments. Login now