##// END OF EJS Templates
wireprotov2: support response caching...
Gregory Szorc -
r40057:c537144f default
parent child Browse files
Show More
This diff has been collapsed as it changes many lines, (645 lines changed) Show them Hide them
@@ -0,0 +1,645
1 $ . $TESTDIR/wireprotohelpers.sh
2 $ cat >> $HGRCPATH << EOF
3 > [extensions]
4 > blackbox =
5 > [blackbox]
6 > track = simplecache
7 > EOF
8 $ hg init server
9 $ enablehttpv2 server
10 $ cd server
11 $ cat >> .hg/hgrc << EOF
12 > [extensions]
13 > simplecache = $TESTDIR/wireprotosimplecache.py
14 > EOF
15
16 $ echo a0 > a
17 $ echo b0 > b
18 $ hg -q commit -A -m 'commit 0'
19 $ echo a1 > a
20 $ hg commit -m 'commit 1'
21 $ echo b1 > b
22 $ hg commit -m 'commit 2'
23 $ echo a2 > a
24 $ echo b2 > b
25 $ hg commit -m 'commit 3'
26
27 $ hg log -G -T '{rev}:{node} {desc}'
28 @ 3:50590a86f3ff5d1e9a1624a7a6957884565cc8e8 commit 3
29 |
30 o 2:4d01eda50c6ac5f7e89cbe1880143a32f559c302 commit 2
31 |
32 o 1:4432d83626e8a98655f062ec1f2a43b07f7fbbb0 commit 1
33 |
34 o 0:3390ef850073fbc2f0dfff2244342c8e9229013a commit 0
35
36
37 $ hg --debug debugindex -m
38 rev linkrev nodeid p1 p2
39 0 0 992f4779029a3df8d0666d00bb924f69634e2641 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000
40 1 1 a988fb43583e871d1ed5750ee074c6d840bbbfc8 992f4779029a3df8d0666d00bb924f69634e2641 0000000000000000000000000000000000000000
41 2 2 a8853dafacfca6fc807055a660d8b835141a3bb4 a988fb43583e871d1ed5750ee074c6d840bbbfc8 0000000000000000000000000000000000000000
42 3 3 3fe11dfbb13645782b0addafbe75a87c210ffddc a8853dafacfca6fc807055a660d8b835141a3bb4 0000000000000000000000000000000000000000
43
44 $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
45 $ cat hg.pid > $DAEMON_PIDS
46
47 Performing the same request should result in same result, with 2nd response
48 coming from cache.
49
50 $ sendhttpv2peer << EOF
51 > command manifestdata
52 > nodes eval:[b'\x99\x2f\x47\x79\x02\x9a\x3d\xf8\xd0\x66\x6d\x00\xbb\x92\x4f\x69\x63\x4e\x26\x41']
53 > tree eval:b''
54 > fields eval:[b'parents']
55 > EOF
56 creating http peer for wire protocol version 2
57 sending manifestdata command
58 s> POST /api/exp-http-v2-0002/ro/manifestdata HTTP/1.1\r\n
59 s> Accept-Encoding: identity\r\n
60 s> accept: application/mercurial-exp-framing-0005\r\n
61 s> content-type: application/mercurial-exp-framing-0005\r\n
62 s> content-length: 83\r\n
63 s> host: $LOCALIP:$HGPORT\r\n (glob)
64 s> user-agent: Mercurial debugwireproto\r\n
65 s> \r\n
66 s> K\x00\x00\x01\x00\x01\x01\x11\xa2Dargs\xa3Ffields\x81GparentsEnodes\x81T\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&ADtree@DnameLmanifestdata
67 s> makefile('rb', None)
68 s> HTTP/1.1 200 OK\r\n
69 s> Server: testing stub value\r\n
70 s> Date: $HTTP_DATE$\r\n
71 s> Content-Type: application/mercurial-exp-framing-0005\r\n
72 s> Transfer-Encoding: chunked\r\n
73 s> \r\n
74 s> 13\r\n
75 s> \x0b\x00\x00\x01\x00\x02\x011
76 s> \xa1FstatusBok
77 s> \r\n
78 received frame(size=11; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=continuation)
79 s> 63\r\n
80 s> [\x00\x00\x01\x00\x02\x001
81 s> \xa1Jtotalitems\x01\xa2DnodeT\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&AGparents\x82T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00
82 s> \r\n
83 received frame(size=91; request=1; stream=2; streamflags=; type=command-response; flags=continuation)
84 s> 8\r\n
85 s> \x00\x00\x00\x01\x00\x02\x002
86 s> \r\n
87 s> 0\r\n
88 s> \r\n
89 received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
90 response: gen[
91 {
92 b'totalitems': 1
93 },
94 {
95 b'node': b'\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
96 b'parents': [
97 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
98 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
99 ]
100 }
101 ]
102
103 $ sendhttpv2peer << EOF
104 > command manifestdata
105 > nodes eval:[b'\x99\x2f\x47\x79\x02\x9a\x3d\xf8\xd0\x66\x6d\x00\xbb\x92\x4f\x69\x63\x4e\x26\x41']
106 > tree eval:b''
107 > fields eval:[b'parents']
108 > EOF
109 creating http peer for wire protocol version 2
110 sending manifestdata command
111 s> POST /api/exp-http-v2-0002/ro/manifestdata HTTP/1.1\r\n
112 s> Accept-Encoding: identity\r\n
113 s> accept: application/mercurial-exp-framing-0005\r\n
114 s> content-type: application/mercurial-exp-framing-0005\r\n
115 s> content-length: 83\r\n
116 s> host: $LOCALIP:$HGPORT\r\n (glob)
117 s> user-agent: Mercurial debugwireproto\r\n
118 s> \r\n
119 s> K\x00\x00\x01\x00\x01\x01\x11\xa2Dargs\xa3Ffields\x81GparentsEnodes\x81T\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&ADtree@DnameLmanifestdata
120 s> makefile('rb', None)
121 s> HTTP/1.1 200 OK\r\n
122 s> Server: testing stub value\r\n
123 s> Date: $HTTP_DATE$\r\n
124 s> Content-Type: application/mercurial-exp-framing-0005\r\n
125 s> Transfer-Encoding: chunked\r\n
126 s> \r\n
127 s> 13\r\n
128 s> \x0b\x00\x00\x01\x00\x02\x011
129 s> \xa1FstatusBok
130 s> \r\n
131 received frame(size=11; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=continuation)
132 s> 63\r\n
133 s> [\x00\x00\x01\x00\x02\x001
134 s> \xa1Jtotalitems\x01\xa2DnodeT\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&AGparents\x82T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00
135 s> \r\n
136 received frame(size=91; request=1; stream=2; streamflags=; type=command-response; flags=continuation)
137 s> 8\r\n
138 s> \x00\x00\x00\x01\x00\x02\x002
139 s> \r\n
140 s> 0\r\n
141 s> \r\n
142 received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
143 response: gen[
144 {
145 b'totalitems': 1
146 },
147 {
148 b'node': b'\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
149 b'parents': [
150 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
151 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
152 ]
153 }
154 ]
155
156 Sending different request doesn't yield cache hit.
157
158 $ sendhttpv2peer << EOF
159 > command manifestdata
160 > nodes eval:[b'\x99\x2f\x47\x79\x02\x9a\x3d\xf8\xd0\x66\x6d\x00\xbb\x92\x4f\x69\x63\x4e\x26\x41', b'\xa9\x88\xfb\x43\x58\x3e\x87\x1d\x1e\xd5\x75\x0e\xe0\x74\xc6\xd8\x40\xbb\xbf\xc8']
161 > tree eval:b''
162 > fields eval:[b'parents']
163 > EOF
164 creating http peer for wire protocol version 2
165 sending manifestdata command
166 s> POST /api/exp-http-v2-0002/ro/manifestdata HTTP/1.1\r\n
167 s> Accept-Encoding: identity\r\n
168 s> accept: application/mercurial-exp-framing-0005\r\n
169 s> content-type: application/mercurial-exp-framing-0005\r\n
170 s> content-length: 104\r\n
171 s> host: $LOCALIP:$HGPORT\r\n (glob)
172 s> user-agent: Mercurial debugwireproto\r\n
173 s> \r\n
174 s> `\x00\x00\x01\x00\x01\x01\x11\xa2Dargs\xa3Ffields\x81GparentsEnodes\x82T\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&AT\xa9\x88\xfbCX>\x87\x1d\x1e\xd5u\x0e\xe0t\xc6\xd8@\xbb\xbf\xc8Dtree@DnameLmanifestdata
175 s> makefile('rb', None)
176 s> HTTP/1.1 200 OK\r\n
177 s> Server: testing stub value\r\n
178 s> Date: $HTTP_DATE$\r\n
179 s> Content-Type: application/mercurial-exp-framing-0005\r\n
180 s> Transfer-Encoding: chunked\r\n
181 s> \r\n
182 s> 13\r\n
183 s> \x0b\x00\x00\x01\x00\x02\x011
184 s> \xa1FstatusBok
185 s> \r\n
186 received frame(size=11; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=continuation)
187 s> b1\r\n
188 s> \xa9\x00\x00\x01\x00\x02\x001
189 s> \xa1Jtotalitems\x02\xa2DnodeT\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&AGparents\x82T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa2DnodeT\xa9\x88\xfbCX>\x87\x1d\x1e\xd5u\x0e\xe0t\xc6\xd8@\xbb\xbf\xc8Gparents\x82T\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&AT\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00
190 s> \r\n
191 received frame(size=169; request=1; stream=2; streamflags=; type=command-response; flags=continuation)
192 s> 8\r\n
193 s> \x00\x00\x00\x01\x00\x02\x002
194 s> \r\n
195 s> 0\r\n
196 s> \r\n
197 received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
198 response: gen[
199 {
200 b'totalitems': 2
201 },
202 {
203 b'node': b'\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
204 b'parents': [
205 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
206 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
207 ]
208 },
209 {
210 b'node': b'\xa9\x88\xfbCX>\x87\x1d\x1e\xd5u\x0e\xe0t\xc6\xd8@\xbb\xbf\xc8',
211 b'parents': [
212 b'\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
213 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
214 ]
215 }
216 ]
217
218 $ cat .hg/blackbox.log
219 *> cacher constructed for manifestdata (glob)
220 *> cache miss for c045a581599d58608efd3d93d8129841f2af04a0 (glob)
221 *> storing cache entry for c045a581599d58608efd3d93d8129841f2af04a0 (glob)
222 *> cacher constructed for manifestdata (glob)
223 *> cache hit for c045a581599d58608efd3d93d8129841f2af04a0 (glob)
224 *> cacher constructed for manifestdata (glob)
225 *> cache miss for 6ed2f740a1cdd12c9e99c4f27695543143c26a11 (glob)
226 *> storing cache entry for 6ed2f740a1cdd12c9e99c4f27695543143c26a11 (glob)
227
228 $ cat error.log
229
230 $ killdaemons.py
231 $ rm .hg/blackbox.log
232
233 Try with object caching mode
234
235 $ cat >> .hg/hgrc << EOF
236 > [simplecache]
237 > cacheobjects = true
238 > EOF
239
240 $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
241 $ cat hg.pid > $DAEMON_PIDS
242
243 $ sendhttpv2peer << EOF
244 > command manifestdata
245 > nodes eval:[b'\x99\x2f\x47\x79\x02\x9a\x3d\xf8\xd0\x66\x6d\x00\xbb\x92\x4f\x69\x63\x4e\x26\x41']
246 > tree eval:b''
247 > fields eval:[b'parents']
248 > EOF
249 creating http peer for wire protocol version 2
250 sending manifestdata command
251 s> POST /api/exp-http-v2-0002/ro/manifestdata HTTP/1.1\r\n
252 s> Accept-Encoding: identity\r\n
253 s> accept: application/mercurial-exp-framing-0005\r\n
254 s> content-type: application/mercurial-exp-framing-0005\r\n
255 s> content-length: 83\r\n
256 s> host: $LOCALIP:$HGPORT\r\n (glob)
257 s> user-agent: Mercurial debugwireproto\r\n
258 s> \r\n
259 s> K\x00\x00\x01\x00\x01\x01\x11\xa2Dargs\xa3Ffields\x81GparentsEnodes\x81T\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&ADtree@DnameLmanifestdata
260 s> makefile('rb', None)
261 s> HTTP/1.1 200 OK\r\n
262 s> Server: testing stub value\r\n
263 s> Date: $HTTP_DATE$\r\n
264 s> Content-Type: application/mercurial-exp-framing-0005\r\n
265 s> Transfer-Encoding: chunked\r\n
266 s> \r\n
267 s> 13\r\n
268 s> \x0b\x00\x00\x01\x00\x02\x011
269 s> \xa1FstatusBok
270 s> \r\n
271 received frame(size=11; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=continuation)
272 s> 63\r\n
273 s> [\x00\x00\x01\x00\x02\x001
274 s> \xa1Jtotalitems\x01\xa2DnodeT\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&AGparents\x82T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00
275 s> \r\n
276 received frame(size=91; request=1; stream=2; streamflags=; type=command-response; flags=continuation)
277 s> 8\r\n
278 s> \x00\x00\x00\x01\x00\x02\x002
279 s> \r\n
280 s> 0\r\n
281 s> \r\n
282 received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
283 response: gen[
284 {
285 b'totalitems': 1
286 },
287 {
288 b'node': b'\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
289 b'parents': [
290 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
291 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
292 ]
293 }
294 ]
295
296 $ sendhttpv2peer << EOF
297 > command manifestdata
298 > nodes eval:[b'\x99\x2f\x47\x79\x02\x9a\x3d\xf8\xd0\x66\x6d\x00\xbb\x92\x4f\x69\x63\x4e\x26\x41']
299 > tree eval:b''
300 > fields eval:[b'parents']
301 > EOF
302 creating http peer for wire protocol version 2
303 sending manifestdata command
304 s> POST /api/exp-http-v2-0002/ro/manifestdata HTTP/1.1\r\n
305 s> Accept-Encoding: identity\r\n
306 s> accept: application/mercurial-exp-framing-0005\r\n
307 s> content-type: application/mercurial-exp-framing-0005\r\n
308 s> content-length: 83\r\n
309 s> host: $LOCALIP:$HGPORT\r\n (glob)
310 s> user-agent: Mercurial debugwireproto\r\n
311 s> \r\n
312 s> K\x00\x00\x01\x00\x01\x01\x11\xa2Dargs\xa3Ffields\x81GparentsEnodes\x81T\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&ADtree@DnameLmanifestdata
313 s> makefile('rb', None)
314 s> HTTP/1.1 200 OK\r\n
315 s> Server: testing stub value\r\n
316 s> Date: $HTTP_DATE$\r\n
317 s> Content-Type: application/mercurial-exp-framing-0005\r\n
318 s> Transfer-Encoding: chunked\r\n
319 s> \r\n
320 s> 13\r\n
321 s> \x0b\x00\x00\x01\x00\x02\x011
322 s> \xa1FstatusBok
323 s> \r\n
324 received frame(size=11; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=continuation)
325 s> 63\r\n
326 s> [\x00\x00\x01\x00\x02\x001
327 s> \xa1Jtotalitems\x01\xa2DnodeT\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&AGparents\x82T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00
328 s> \r\n
329 received frame(size=91; request=1; stream=2; streamflags=; type=command-response; flags=continuation)
330 s> 8\r\n
331 s> \x00\x00\x00\x01\x00\x02\x002
332 s> \r\n
333 s> 0\r\n
334 s> \r\n
335 received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
336 response: gen[
337 {
338 b'totalitems': 1
339 },
340 {
341 b'node': b'\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
342 b'parents': [
343 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
344 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
345 ]
346 }
347 ]
348
349 $ cat .hg/blackbox.log
350 *> cacher constructed for manifestdata (glob)
351 *> cache miss for c045a581599d58608efd3d93d8129841f2af04a0 (glob)
352 *> storing cache entry for c045a581599d58608efd3d93d8129841f2af04a0 (glob)
353 *> cacher constructed for manifestdata (glob)
354 *> cache hit for c045a581599d58608efd3d93d8129841f2af04a0 (glob)
355
356 $ cat error.log
357
358 $ killdaemons.py
359 $ rm .hg/blackbox.log
360
361 A non-cacheable command does not instantiate cacher
362
363 $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
364 $ cat hg.pid > $DAEMON_PIDS
365 $ sendhttpv2peer << EOF
366 > command capabilities
367 > EOF
368 creating http peer for wire protocol version 2
369 sending capabilities command
370 s> POST /api/exp-http-v2-0002/ro/capabilities HTTP/1.1\r\n
371 s> Accept-Encoding: identity\r\n
372 s> accept: application/mercurial-exp-framing-0005\r\n
373 s> content-type: application/mercurial-exp-framing-0005\r\n
374 s> content-length: 27\r\n
375 s> host: $LOCALIP:$HGPORT\r\n (glob)
376 s> user-agent: Mercurial debugwireproto\r\n
377 s> \r\n
378 s> \x13\x00\x00\x01\x00\x01\x01\x11\xa1DnameLcapabilities
379 s> makefile('rb', None)
380 s> HTTP/1.1 200 OK\r\n
381 s> Server: testing stub value\r\n
382 s> Date: $HTTP_DATE$\r\n
383 s> Content-Type: application/mercurial-exp-framing-0005\r\n
384 s> Transfer-Encoding: chunked\r\n
385 s> \r\n
386 s> 13\r\n
387 s> \x0b\x00\x00\x01\x00\x02\x011
388 s> \xa1FstatusBok
389 s> \r\n
390 received frame(size=11; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=continuation)
391 s> 52b\r\n
392 s> #\x05\x00\x01\x00\x02\x001
393 s> \xa5Hcommands\xaaIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionInoderange\xa3Gdefault\xf6Hrequired\xf4DtypeDlistEnodes\xa3Gdefault\xf6Hrequired\xf4DtypeDlistJnodesdepth\xa3Gdefault\xf6Hrequired\xf4DtypeCintKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullGpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushKcompression\x82\xa1DnameDzstd\xa1DnameDzlibQframingmediatypes\x81X&application/mercurial-exp-framing-0005Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x82LgeneraldeltaHrevlogv1
394 s> \r\n
395 received frame(size=1315; request=1; stream=2; streamflags=; type=command-response; flags=continuation)
396 s> 8\r\n
397 s> \x00\x00\x00\x01\x00\x02\x002
398 s> \r\n
399 s> 0\r\n
400 s> \r\n
401 received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
402 response: gen[
403 {
404 b'commands': {
405 b'branchmap': {
406 b'args': {},
407 b'permissions': [
408 b'pull'
409 ]
410 },
411 b'capabilities': {
412 b'args': {},
413 b'permissions': [
414 b'pull'
415 ]
416 },
417 b'changesetdata': {
418 b'args': {
419 b'fields': {
420 b'default': set([]),
421 b'required': False,
422 b'type': b'set',
423 b'validvalues': set([
424 b'bookmarks',
425 b'parents',
426 b'phase',
427 b'revision'
428 ])
429 },
430 b'noderange': {
431 b'default': None,
432 b'required': False,
433 b'type': b'list'
434 },
435 b'nodes': {
436 b'default': None,
437 b'required': False,
438 b'type': b'list'
439 },
440 b'nodesdepth': {
441 b'default': None,
442 b'required': False,
443 b'type': b'int'
444 }
445 },
446 b'permissions': [
447 b'pull'
448 ]
449 },
450 b'filedata': {
451 b'args': {
452 b'fields': {
453 b'default': set([]),
454 b'required': False,
455 b'type': b'set',
456 b'validvalues': set([
457 b'parents',
458 b'revision'
459 ])
460 },
461 b'haveparents': {
462 b'default': False,
463 b'required': False,
464 b'type': b'bool'
465 },
466 b'nodes': {
467 b'required': True,
468 b'type': b'list'
469 },
470 b'path': {
471 b'required': True,
472 b'type': b'bytes'
473 }
474 },
475 b'permissions': [
476 b'pull'
477 ]
478 },
479 b'heads': {
480 b'args': {
481 b'publiconly': {
482 b'default': False,
483 b'required': False,
484 b'type': b'bool'
485 }
486 },
487 b'permissions': [
488 b'pull'
489 ]
490 },
491 b'known': {
492 b'args': {
493 b'nodes': {
494 b'default': [],
495 b'required': False,
496 b'type': b'list'
497 }
498 },
499 b'permissions': [
500 b'pull'
501 ]
502 },
503 b'listkeys': {
504 b'args': {
505 b'namespace': {
506 b'required': True,
507 b'type': b'bytes'
508 }
509 },
510 b'permissions': [
511 b'pull'
512 ]
513 },
514 b'lookup': {
515 b'args': {
516 b'key': {
517 b'required': True,
518 b'type': b'bytes'
519 }
520 },
521 b'permissions': [
522 b'pull'
523 ]
524 },
525 b'manifestdata': {
526 b'args': {
527 b'fields': {
528 b'default': set([]),
529 b'required': False,
530 b'type': b'set',
531 b'validvalues': set([
532 b'parents',
533 b'revision'
534 ])
535 },
536 b'haveparents': {
537 b'default': False,
538 b'required': False,
539 b'type': b'bool'
540 },
541 b'nodes': {
542 b'required': True,
543 b'type': b'list'
544 },
545 b'tree': {
546 b'required': True,
547 b'type': b'bytes'
548 }
549 },
550 b'permissions': [
551 b'pull'
552 ]
553 },
554 b'pushkey': {
555 b'args': {
556 b'key': {
557 b'required': True,
558 b'type': b'bytes'
559 },
560 b'namespace': {
561 b'required': True,
562 b'type': b'bytes'
563 },
564 b'new': {
565 b'required': True,
566 b'type': b'bytes'
567 },
568 b'old': {
569 b'required': True,
570 b'type': b'bytes'
571 }
572 },
573 b'permissions': [
574 b'push'
575 ]
576 }
577 },
578 b'compression': [
579 {
580 b'name': b'zstd'
581 },
582 {
583 b'name': b'zlib'
584 }
585 ],
586 b'framingmediatypes': [
587 b'application/mercurial-exp-framing-0005'
588 ],
589 b'pathfilterprefixes': set([
590 b'path:',
591 b'rootfilesin:'
592 ]),
593 b'rawrepoformats': [
594 b'generaldelta',
595 b'revlogv1'
596 ]
597 }
598 ]
599
600 $ test -f .hg/blackbox.log
601 [1]
602
603 An error is not cached
604
605 $ sendhttpv2peer << EOF
606 > command manifestdata
607 > nodes eval:[b'\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa']
608 > tree eval:b''
609 > fields eval:[b'parents']
610 > EOF
611 creating http peer for wire protocol version 2
612 sending manifestdata command
613 s> POST /api/exp-http-v2-0002/ro/manifestdata HTTP/1.1\r\n
614 s> Accept-Encoding: identity\r\n
615 s> accept: application/mercurial-exp-framing-0005\r\n
616 s> content-type: application/mercurial-exp-framing-0005\r\n
617 s> content-length: 83\r\n
618 s> host: $LOCALIP:$HGPORT\r\n (glob)
619 s> user-agent: Mercurial debugwireproto\r\n
620 s> \r\n
621 s> K\x00\x00\x01\x00\x01\x01\x11\xa2Dargs\xa3Ffields\x81GparentsEnodes\x81T\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaaDtree@DnameLmanifestdata
622 s> makefile('rb', None)
623 s> HTTP/1.1 200 OK\r\n
624 s> Server: testing stub value\r\n
625 s> Date: $HTTP_DATE$\r\n
626 s> Content-Type: application/mercurial-exp-framing-0005\r\n
627 s> Transfer-Encoding: chunked\r\n
628 s> \r\n
629 s> 51\r\n
630 s> I\x00\x00\x01\x00\x02\x012
631 s> \xa2Eerror\xa2Dargs\x81T\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaaGmessagePunknown node: %sFstatusEerror
632 s> \r\n
633 received frame(size=73; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=eos)
634 s> 0\r\n
635 s> \r\n
636 abort: unknown node: \xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa! (esc)
637 [255]
638
639 $ cat .hg/blackbox.log
640 *> cacher constructed for manifestdata (glob)
641 *> cache miss for 9d1bb421d99e913d45f2d099aa49728514292dd2 (glob)
642 *> cacher exiting due to error (glob)
643
644 $ killdaemons.py
645 $ rm .hg/blackbox.log
@@ -0,0 +1,100
1 # wireprotosimplecache.py - Extension providing in-memory wire protocol cache
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
7
8 from __future__ import absolute_import
9
10 from mercurial import (
11 extensions,
12 registrar,
13 repository,
14 util,
15 wireprototypes,
16 wireprotov2server,
17 )
18 from mercurial.utils import (
19 interfaceutil,
20 )
21
22 CACHE = None
23
24 configtable = {}
25 configitem = registrar.configitem(configtable)
26
27 configitem('simplecache', 'cacheobjects',
28 default=False)
29
30 @interfaceutil.implementer(repository.iwireprotocolcommandcacher)
31 class memorycacher(object):
32 def __init__(self, ui, command, encodefn):
33 self.ui = ui
34 self.encodefn = encodefn
35 self.key = None
36 self.cacheobjects = ui.configbool('simplecache', 'cacheobjects')
37 self.buffered = []
38
39 ui.log('simplecache', 'cacher constructed for %s\n', command)
40
41 def __enter__(self):
42 return self
43
44 def __exit__(self, exctype, excvalue, exctb):
45 if exctype:
46 self.ui.log('simplecache', 'cacher exiting due to error\n')
47
48 def adjustcachekeystate(self, state):
49 # Needed in order to make tests deterministic. Don't copy this
50 # pattern for production caches!
51 del state[b'repo']
52
53 def setcachekey(self, key):
54 self.key = key
55 return True
56
57 def lookup(self):
58 if self.key not in CACHE:
59 self.ui.log('simplecache', 'cache miss for %s\n', self.key)
60 return None
61
62 entry = CACHE[self.key]
63 self.ui.log('simplecache', 'cache hit for %s\n', self.key)
64
65 if self.cacheobjects:
66 return {
67 'objs': entry,
68 }
69 else:
70 return {
71 'objs': [wireprototypes.encodedresponse(entry)],
72 }
73
74 def onobject(self, obj):
75 if self.cacheobjects:
76 self.buffered.append(obj)
77 else:
78 self.buffered.extend(self.encodefn(obj))
79
80 yield obj
81
82 def onfinished(self):
83 self.ui.log('simplecache', 'storing cache entry for %s\n', self.key)
84 if self.cacheobjects:
85 CACHE[self.key] = self.buffered
86 else:
87 CACHE[self.key] = b''.join(self.buffered)
88
89 return []
90
91 def makeresponsecacher(orig, repo, proto, command, args, objencoderfn):
92 return memorycacher(repo.ui, command, objencoderfn)
93
94 def extsetup(ui):
95 global CACHE
96
97 CACHE = util.lrucachedict(10000)
98
99 extensions.wrapfunction(wireprotov2server, 'makeresponsecacher',
100 makeresponsecacher)
@@ -1,1659 +1,1816
1 # repository.py - Interfaces and base classes for repositories and peers.
1 # repository.py - Interfaces and base classes for repositories and peers.
2 #
2 #
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from . import (
11 from . import (
12 error,
12 error,
13 )
13 )
14 from .utils import (
14 from .utils import (
15 interfaceutil,
15 interfaceutil,
16 )
16 )
17
17
18 # When narrowing is finalized and no longer subject to format changes,
18 # When narrowing is finalized and no longer subject to format changes,
19 # we should move this to just "narrow" or similar.
19 # we should move this to just "narrow" or similar.
20 NARROW_REQUIREMENT = 'narrowhg-experimental'
20 NARROW_REQUIREMENT = 'narrowhg-experimental'
21
21
22 # Local repository feature string.
22 # Local repository feature string.
23
23
24 # Revlogs are being used for file storage.
24 # Revlogs are being used for file storage.
25 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
25 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
26 # The storage part of the repository is shared from an external source.
26 # The storage part of the repository is shared from an external source.
27 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
27 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
28 # LFS supported for backing file storage.
28 # LFS supported for backing file storage.
29 REPO_FEATURE_LFS = b'lfs'
29 REPO_FEATURE_LFS = b'lfs'
30
30
31 class ipeerconnection(interfaceutil.Interface):
31 class ipeerconnection(interfaceutil.Interface):
32 """Represents a "connection" to a repository.
32 """Represents a "connection" to a repository.
33
33
34 This is the base interface for representing a connection to a repository.
34 This is the base interface for representing a connection to a repository.
35 It holds basic properties and methods applicable to all peer types.
35 It holds basic properties and methods applicable to all peer types.
36
36
37 This is not a complete interface definition and should not be used
37 This is not a complete interface definition and should not be used
38 outside of this module.
38 outside of this module.
39 """
39 """
40 ui = interfaceutil.Attribute("""ui.ui instance""")
40 ui = interfaceutil.Attribute("""ui.ui instance""")
41
41
42 def url():
42 def url():
43 """Returns a URL string representing this peer.
43 """Returns a URL string representing this peer.
44
44
45 Currently, implementations expose the raw URL used to construct the
45 Currently, implementations expose the raw URL used to construct the
46 instance. It may contain credentials as part of the URL. The
46 instance. It may contain credentials as part of the URL. The
47 expectations of the value aren't well-defined and this could lead to
47 expectations of the value aren't well-defined and this could lead to
48 data leakage.
48 data leakage.
49
49
50 TODO audit/clean consumers and more clearly define the contents of this
50 TODO audit/clean consumers and more clearly define the contents of this
51 value.
51 value.
52 """
52 """
53
53
54 def local():
54 def local():
55 """Returns a local repository instance.
55 """Returns a local repository instance.
56
56
57 If the peer represents a local repository, returns an object that
57 If the peer represents a local repository, returns an object that
58 can be used to interface with it. Otherwise returns ``None``.
58 can be used to interface with it. Otherwise returns ``None``.
59 """
59 """
60
60
61 def peer():
61 def peer():
62 """Returns an object conforming to this interface.
62 """Returns an object conforming to this interface.
63
63
64 Most implementations will ``return self``.
64 Most implementations will ``return self``.
65 """
65 """
66
66
67 def canpush():
67 def canpush():
68 """Returns a boolean indicating if this peer can be pushed to."""
68 """Returns a boolean indicating if this peer can be pushed to."""
69
69
70 def close():
70 def close():
71 """Close the connection to this peer.
71 """Close the connection to this peer.
72
72
73 This is called when the peer will no longer be used. Resources
73 This is called when the peer will no longer be used. Resources
74 associated with the peer should be cleaned up.
74 associated with the peer should be cleaned up.
75 """
75 """
76
76
77 class ipeercapabilities(interfaceutil.Interface):
77 class ipeercapabilities(interfaceutil.Interface):
78 """Peer sub-interface related to capabilities."""
78 """Peer sub-interface related to capabilities."""
79
79
80 def capable(name):
80 def capable(name):
81 """Determine support for a named capability.
81 """Determine support for a named capability.
82
82
83 Returns ``False`` if capability not supported.
83 Returns ``False`` if capability not supported.
84
84
85 Returns ``True`` if boolean capability is supported. Returns a string
85 Returns ``True`` if boolean capability is supported. Returns a string
86 if capability support is non-boolean.
86 if capability support is non-boolean.
87
87
88 Capability strings may or may not map to wire protocol capabilities.
88 Capability strings may or may not map to wire protocol capabilities.
89 """
89 """
90
90
91 def requirecap(name, purpose):
91 def requirecap(name, purpose):
92 """Require a capability to be present.
92 """Require a capability to be present.
93
93
94 Raises a ``CapabilityError`` if the capability isn't present.
94 Raises a ``CapabilityError`` if the capability isn't present.
95 """
95 """
96
96
97 class ipeercommands(interfaceutil.Interface):
97 class ipeercommands(interfaceutil.Interface):
98 """Client-side interface for communicating over the wire protocol.
98 """Client-side interface for communicating over the wire protocol.
99
99
100 This interface is used as a gateway to the Mercurial wire protocol.
100 This interface is used as a gateway to the Mercurial wire protocol.
101 methods commonly call wire protocol commands of the same name.
101 methods commonly call wire protocol commands of the same name.
102 """
102 """
103
103
104 def branchmap():
104 def branchmap():
105 """Obtain heads in named branches.
105 """Obtain heads in named branches.
106
106
107 Returns a dict mapping branch name to an iterable of nodes that are
107 Returns a dict mapping branch name to an iterable of nodes that are
108 heads on that branch.
108 heads on that branch.
109 """
109 """
110
110
111 def capabilities():
111 def capabilities():
112 """Obtain capabilities of the peer.
112 """Obtain capabilities of the peer.
113
113
114 Returns a set of string capabilities.
114 Returns a set of string capabilities.
115 """
115 """
116
116
117 def clonebundles():
117 def clonebundles():
118 """Obtains the clone bundles manifest for the repo.
118 """Obtains the clone bundles manifest for the repo.
119
119
120 Returns the manifest as unparsed bytes.
120 Returns the manifest as unparsed bytes.
121 """
121 """
122
122
123 def debugwireargs(one, two, three=None, four=None, five=None):
123 def debugwireargs(one, two, three=None, four=None, five=None):
124 """Used to facilitate debugging of arguments passed over the wire."""
124 """Used to facilitate debugging of arguments passed over the wire."""
125
125
126 def getbundle(source, **kwargs):
126 def getbundle(source, **kwargs):
127 """Obtain remote repository data as a bundle.
127 """Obtain remote repository data as a bundle.
128
128
129 This command is how the bulk of repository data is transferred from
129 This command is how the bulk of repository data is transferred from
130 the peer to the local repository
130 the peer to the local repository
131
131
132 Returns a generator of bundle data.
132 Returns a generator of bundle data.
133 """
133 """
134
134
135 def heads():
135 def heads():
136 """Determine all known head revisions in the peer.
136 """Determine all known head revisions in the peer.
137
137
138 Returns an iterable of binary nodes.
138 Returns an iterable of binary nodes.
139 """
139 """
140
140
141 def known(nodes):
141 def known(nodes):
142 """Determine whether multiple nodes are known.
142 """Determine whether multiple nodes are known.
143
143
144 Accepts an iterable of nodes whose presence to check for.
144 Accepts an iterable of nodes whose presence to check for.
145
145
146 Returns an iterable of booleans indicating of the corresponding node
146 Returns an iterable of booleans indicating of the corresponding node
147 at that index is known to the peer.
147 at that index is known to the peer.
148 """
148 """
149
149
150 def listkeys(namespace):
150 def listkeys(namespace):
151 """Obtain all keys in a pushkey namespace.
151 """Obtain all keys in a pushkey namespace.
152
152
153 Returns an iterable of key names.
153 Returns an iterable of key names.
154 """
154 """
155
155
156 def lookup(key):
156 def lookup(key):
157 """Resolve a value to a known revision.
157 """Resolve a value to a known revision.
158
158
159 Returns a binary node of the resolved revision on success.
159 Returns a binary node of the resolved revision on success.
160 """
160 """
161
161
162 def pushkey(namespace, key, old, new):
162 def pushkey(namespace, key, old, new):
163 """Set a value using the ``pushkey`` protocol.
163 """Set a value using the ``pushkey`` protocol.
164
164
165 Arguments correspond to the pushkey namespace and key to operate on and
165 Arguments correspond to the pushkey namespace and key to operate on and
166 the old and new values for that key.
166 the old and new values for that key.
167
167
168 Returns a string with the peer result. The value inside varies by the
168 Returns a string with the peer result. The value inside varies by the
169 namespace.
169 namespace.
170 """
170 """
171
171
172 def stream_out():
172 def stream_out():
173 """Obtain streaming clone data.
173 """Obtain streaming clone data.
174
174
175 Successful result should be a generator of data chunks.
175 Successful result should be a generator of data chunks.
176 """
176 """
177
177
178 def unbundle(bundle, heads, url):
178 def unbundle(bundle, heads, url):
179 """Transfer repository data to the peer.
179 """Transfer repository data to the peer.
180
180
181 This is how the bulk of data during a push is transferred.
181 This is how the bulk of data during a push is transferred.
182
182
183 Returns the integer number of heads added to the peer.
183 Returns the integer number of heads added to the peer.
184 """
184 """
185
185
186 class ipeerlegacycommands(interfaceutil.Interface):
186 class ipeerlegacycommands(interfaceutil.Interface):
187 """Interface for implementing support for legacy wire protocol commands.
187 """Interface for implementing support for legacy wire protocol commands.
188
188
189 Wire protocol commands transition to legacy status when they are no longer
189 Wire protocol commands transition to legacy status when they are no longer
190 used by modern clients. To facilitate identifying which commands are
190 used by modern clients. To facilitate identifying which commands are
191 legacy, the interfaces are split.
191 legacy, the interfaces are split.
192 """
192 """
193
193
194 def between(pairs):
194 def between(pairs):
195 """Obtain nodes between pairs of nodes.
195 """Obtain nodes between pairs of nodes.
196
196
197 ``pairs`` is an iterable of node pairs.
197 ``pairs`` is an iterable of node pairs.
198
198
199 Returns an iterable of iterables of nodes corresponding to each
199 Returns an iterable of iterables of nodes corresponding to each
200 requested pair.
200 requested pair.
201 """
201 """
202
202
203 def branches(nodes):
203 def branches(nodes):
204 """Obtain ancestor changesets of specific nodes back to a branch point.
204 """Obtain ancestor changesets of specific nodes back to a branch point.
205
205
206 For each requested node, the peer finds the first ancestor node that is
206 For each requested node, the peer finds the first ancestor node that is
207 a DAG root or is a merge.
207 a DAG root or is a merge.
208
208
209 Returns an iterable of iterables with the resolved values for each node.
209 Returns an iterable of iterables with the resolved values for each node.
210 """
210 """
211
211
212 def changegroup(nodes, source):
212 def changegroup(nodes, source):
213 """Obtain a changegroup with data for descendants of specified nodes."""
213 """Obtain a changegroup with data for descendants of specified nodes."""
214
214
215 def changegroupsubset(bases, heads, source):
215 def changegroupsubset(bases, heads, source):
216 pass
216 pass
217
217
218 class ipeercommandexecutor(interfaceutil.Interface):
218 class ipeercommandexecutor(interfaceutil.Interface):
219 """Represents a mechanism to execute remote commands.
219 """Represents a mechanism to execute remote commands.
220
220
221 This is the primary interface for requesting that wire protocol commands
221 This is the primary interface for requesting that wire protocol commands
222 be executed. Instances of this interface are active in a context manager
222 be executed. Instances of this interface are active in a context manager
223 and have a well-defined lifetime. When the context manager exits, all
223 and have a well-defined lifetime. When the context manager exits, all
224 outstanding requests are waited on.
224 outstanding requests are waited on.
225 """
225 """
226
226
227 def callcommand(name, args):
227 def callcommand(name, args):
228 """Request that a named command be executed.
228 """Request that a named command be executed.
229
229
230 Receives the command name and a dictionary of command arguments.
230 Receives the command name and a dictionary of command arguments.
231
231
232 Returns a ``concurrent.futures.Future`` that will resolve to the
232 Returns a ``concurrent.futures.Future`` that will resolve to the
233 result of that command request. That exact value is left up to
233 result of that command request. That exact value is left up to
234 the implementation and possibly varies by command.
234 the implementation and possibly varies by command.
235
235
236 Not all commands can coexist with other commands in an executor
236 Not all commands can coexist with other commands in an executor
237 instance: it depends on the underlying wire protocol transport being
237 instance: it depends on the underlying wire protocol transport being
238 used and the command itself.
238 used and the command itself.
239
239
240 Implementations MAY call ``sendcommands()`` automatically if the
240 Implementations MAY call ``sendcommands()`` automatically if the
241 requested command can not coexist with other commands in this executor.
241 requested command can not coexist with other commands in this executor.
242
242
243 Implementations MAY call ``sendcommands()`` automatically when the
243 Implementations MAY call ``sendcommands()`` automatically when the
244 future's ``result()`` is called. So, consumers using multiple
244 future's ``result()`` is called. So, consumers using multiple
245 commands with an executor MUST ensure that ``result()`` is not called
245 commands with an executor MUST ensure that ``result()`` is not called
246 until all command requests have been issued.
246 until all command requests have been issued.
247 """
247 """
248
248
249 def sendcommands():
249 def sendcommands():
250 """Trigger submission of queued command requests.
250 """Trigger submission of queued command requests.
251
251
252 Not all transports submit commands as soon as they are requested to
252 Not all transports submit commands as soon as they are requested to
253 run. When called, this method forces queued command requests to be
253 run. When called, this method forces queued command requests to be
254 issued. It will no-op if all commands have already been sent.
254 issued. It will no-op if all commands have already been sent.
255
255
256 When called, no more new commands may be issued with this executor.
256 When called, no more new commands may be issued with this executor.
257 """
257 """
258
258
259 def close():
259 def close():
260 """Signal that this command request is finished.
260 """Signal that this command request is finished.
261
261
262 When called, no more new commands may be issued. All outstanding
262 When called, no more new commands may be issued. All outstanding
263 commands that have previously been issued are waited on before
263 commands that have previously been issued are waited on before
264 returning. This not only includes waiting for the futures to resolve,
264 returning. This not only includes waiting for the futures to resolve,
265 but also waiting for all response data to arrive. In other words,
265 but also waiting for all response data to arrive. In other words,
266 calling this waits for all on-wire state for issued command requests
266 calling this waits for all on-wire state for issued command requests
267 to finish.
267 to finish.
268
268
269 When used as a context manager, this method is called when exiting the
269 When used as a context manager, this method is called when exiting the
270 context manager.
270 context manager.
271
271
272 This method may call ``sendcommands()`` if there are buffered commands.
272 This method may call ``sendcommands()`` if there are buffered commands.
273 """
273 """
274
274
275 class ipeerrequests(interfaceutil.Interface):
275 class ipeerrequests(interfaceutil.Interface):
276 """Interface for executing commands on a peer."""
276 """Interface for executing commands on a peer."""
277
277
278 def commandexecutor():
278 def commandexecutor():
279 """A context manager that resolves to an ipeercommandexecutor.
279 """A context manager that resolves to an ipeercommandexecutor.
280
280
281 The object this resolves to can be used to issue command requests
281 The object this resolves to can be used to issue command requests
282 to the peer.
282 to the peer.
283
283
284 Callers should call its ``callcommand`` method to issue command
284 Callers should call its ``callcommand`` method to issue command
285 requests.
285 requests.
286
286
287 A new executor should be obtained for each distinct set of commands
287 A new executor should be obtained for each distinct set of commands
288 (possibly just a single command) that the consumer wants to execute
288 (possibly just a single command) that the consumer wants to execute
289 as part of a single operation or round trip. This is because some
289 as part of a single operation or round trip. This is because some
290 peers are half-duplex and/or don't support persistent connections.
290 peers are half-duplex and/or don't support persistent connections.
291 e.g. in the case of HTTP peers, commands sent to an executor represent
291 e.g. in the case of HTTP peers, commands sent to an executor represent
292 a single HTTP request. While some peers may support multiple command
292 a single HTTP request. While some peers may support multiple command
293 sends over the wire per executor, consumers need to code to the least
293 sends over the wire per executor, consumers need to code to the least
294 capable peer. So it should be assumed that command executors buffer
294 capable peer. So it should be assumed that command executors buffer
295 called commands until they are told to send them and that each
295 called commands until they are told to send them and that each
296 command executor could result in a new connection or wire-level request
296 command executor could result in a new connection or wire-level request
297 being issued.
297 being issued.
298 """
298 """
299
299
300 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
300 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
301 """Unified interface for peer repositories.
301 """Unified interface for peer repositories.
302
302
303 All peer instances must conform to this interface.
303 All peer instances must conform to this interface.
304 """
304 """
305
305
306 @interfaceutil.implementer(ipeerbase)
306 @interfaceutil.implementer(ipeerbase)
307 class peer(object):
307 class peer(object):
308 """Base class for peer repositories."""
308 """Base class for peer repositories."""
309
309
310 def capable(self, name):
310 def capable(self, name):
311 caps = self.capabilities()
311 caps = self.capabilities()
312 if name in caps:
312 if name in caps:
313 return True
313 return True
314
314
315 name = '%s=' % name
315 name = '%s=' % name
316 for cap in caps:
316 for cap in caps:
317 if cap.startswith(name):
317 if cap.startswith(name):
318 return cap[len(name):]
318 return cap[len(name):]
319
319
320 return False
320 return False
321
321
322 def requirecap(self, name, purpose):
322 def requirecap(self, name, purpose):
323 if self.capable(name):
323 if self.capable(name):
324 return
324 return
325
325
326 raise error.CapabilityError(
326 raise error.CapabilityError(
327 _('cannot %s; remote repository does not support the %r '
327 _('cannot %s; remote repository does not support the %r '
328 'capability') % (purpose, name))
328 'capability') % (purpose, name))
329
329
330 class iverifyproblem(interfaceutil.Interface):
330 class iverifyproblem(interfaceutil.Interface):
331 """Represents a problem with the integrity of the repository.
331 """Represents a problem with the integrity of the repository.
332
332
333 Instances of this interface are emitted to describe an integrity issue
333 Instances of this interface are emitted to describe an integrity issue
334 with a repository (e.g. corrupt storage, missing data, etc).
334 with a repository (e.g. corrupt storage, missing data, etc).
335
335
336 Instances are essentially messages associated with severity.
336 Instances are essentially messages associated with severity.
337 """
337 """
338 warning = interfaceutil.Attribute(
338 warning = interfaceutil.Attribute(
339 """Message indicating a non-fatal problem.""")
339 """Message indicating a non-fatal problem.""")
340
340
341 error = interfaceutil.Attribute(
341 error = interfaceutil.Attribute(
342 """Message indicating a fatal problem.""")
342 """Message indicating a fatal problem.""")
343
343
344 node = interfaceutil.Attribute(
344 node = interfaceutil.Attribute(
345 """Revision encountering the problem.
345 """Revision encountering the problem.
346
346
347 ``None`` means the problem doesn't apply to a single revision.
347 ``None`` means the problem doesn't apply to a single revision.
348 """)
348 """)
349
349
350 class irevisiondelta(interfaceutil.Interface):
350 class irevisiondelta(interfaceutil.Interface):
351 """Represents a delta between one revision and another.
351 """Represents a delta between one revision and another.
352
352
353 Instances convey enough information to allow a revision to be exchanged
353 Instances convey enough information to allow a revision to be exchanged
354 with another repository.
354 with another repository.
355
355
356 Instances represent the fulltext revision data or a delta against
356 Instances represent the fulltext revision data or a delta against
357 another revision. Therefore the ``revision`` and ``delta`` attributes
357 another revision. Therefore the ``revision`` and ``delta`` attributes
358 are mutually exclusive.
358 are mutually exclusive.
359
359
360 Typically used for changegroup generation.
360 Typically used for changegroup generation.
361 """
361 """
362
362
363 node = interfaceutil.Attribute(
363 node = interfaceutil.Attribute(
364 """20 byte node of this revision.""")
364 """20 byte node of this revision.""")
365
365
366 p1node = interfaceutil.Attribute(
366 p1node = interfaceutil.Attribute(
367 """20 byte node of 1st parent of this revision.""")
367 """20 byte node of 1st parent of this revision.""")
368
368
369 p2node = interfaceutil.Attribute(
369 p2node = interfaceutil.Attribute(
370 """20 byte node of 2nd parent of this revision.""")
370 """20 byte node of 2nd parent of this revision.""")
371
371
372 linknode = interfaceutil.Attribute(
372 linknode = interfaceutil.Attribute(
373 """20 byte node of the changelog revision this node is linked to.""")
373 """20 byte node of the changelog revision this node is linked to.""")
374
374
375 flags = interfaceutil.Attribute(
375 flags = interfaceutil.Attribute(
376 """2 bytes of integer flags that apply to this revision.""")
376 """2 bytes of integer flags that apply to this revision.""")
377
377
378 basenode = interfaceutil.Attribute(
378 basenode = interfaceutil.Attribute(
379 """20 byte node of the revision this data is a delta against.
379 """20 byte node of the revision this data is a delta against.
380
380
381 ``nullid`` indicates that the revision is a full revision and not
381 ``nullid`` indicates that the revision is a full revision and not
382 a delta.
382 a delta.
383 """)
383 """)
384
384
385 baserevisionsize = interfaceutil.Attribute(
385 baserevisionsize = interfaceutil.Attribute(
386 """Size of base revision this delta is against.
386 """Size of base revision this delta is against.
387
387
388 May be ``None`` if ``basenode`` is ``nullid``.
388 May be ``None`` if ``basenode`` is ``nullid``.
389 """)
389 """)
390
390
391 revision = interfaceutil.Attribute(
391 revision = interfaceutil.Attribute(
392 """Raw fulltext of revision data for this node.""")
392 """Raw fulltext of revision data for this node.""")
393
393
394 delta = interfaceutil.Attribute(
394 delta = interfaceutil.Attribute(
395 """Delta between ``basenode`` and ``node``.
395 """Delta between ``basenode`` and ``node``.
396
396
397 Stored in the bdiff delta format.
397 Stored in the bdiff delta format.
398 """)
398 """)
399
399
400 class ifilerevisionssequence(interfaceutil.Interface):
400 class ifilerevisionssequence(interfaceutil.Interface):
401 """Contains index data for all revisions of a file.
401 """Contains index data for all revisions of a file.
402
402
403 Types implementing this behave like lists of tuples. The index
403 Types implementing this behave like lists of tuples. The index
404 in the list corresponds to the revision number. The values contain
404 in the list corresponds to the revision number. The values contain
405 index metadata.
405 index metadata.
406
406
407 The *null* revision (revision number -1) is always the last item
407 The *null* revision (revision number -1) is always the last item
408 in the index.
408 in the index.
409 """
409 """
410
410
411 def __len__():
411 def __len__():
412 """The total number of revisions."""
412 """The total number of revisions."""
413
413
414 def __getitem__(rev):
414 def __getitem__(rev):
415 """Returns the object having a specific revision number.
415 """Returns the object having a specific revision number.
416
416
417 Returns an 8-tuple with the following fields:
417 Returns an 8-tuple with the following fields:
418
418
419 offset+flags
419 offset+flags
420 Contains the offset and flags for the revision. 64-bit unsigned
420 Contains the offset and flags for the revision. 64-bit unsigned
421 integer where first 6 bytes are the offset and the next 2 bytes
421 integer where first 6 bytes are the offset and the next 2 bytes
422 are flags. The offset can be 0 if it is not used by the store.
422 are flags. The offset can be 0 if it is not used by the store.
423 compressed size
423 compressed size
424 Size of the revision data in the store. It can be 0 if it isn't
424 Size of the revision data in the store. It can be 0 if it isn't
425 needed by the store.
425 needed by the store.
426 uncompressed size
426 uncompressed size
427 Fulltext size. It can be 0 if it isn't needed by the store.
427 Fulltext size. It can be 0 if it isn't needed by the store.
428 base revision
428 base revision
429 Revision number of revision the delta for storage is encoded
429 Revision number of revision the delta for storage is encoded
430 against. -1 indicates not encoded against a base revision.
430 against. -1 indicates not encoded against a base revision.
431 link revision
431 link revision
432 Revision number of changelog revision this entry is related to.
432 Revision number of changelog revision this entry is related to.
433 p1 revision
433 p1 revision
434 Revision number of 1st parent. -1 if no 1st parent.
434 Revision number of 1st parent. -1 if no 1st parent.
435 p2 revision
435 p2 revision
436 Revision number of 2nd parent. -1 if no 1st parent.
436 Revision number of 2nd parent. -1 if no 1st parent.
437 node
437 node
438 Binary node value for this revision number.
438 Binary node value for this revision number.
439
439
440 Negative values should index off the end of the sequence. ``-1``
440 Negative values should index off the end of the sequence. ``-1``
441 should return the null revision. ``-2`` should return the most
441 should return the null revision. ``-2`` should return the most
442 recent revision.
442 recent revision.
443 """
443 """
444
444
445 def __contains__(rev):
445 def __contains__(rev):
446 """Whether a revision number exists."""
446 """Whether a revision number exists."""
447
447
448 def insert(self, i, entry):
448 def insert(self, i, entry):
449 """Add an item to the index at specific revision."""
449 """Add an item to the index at specific revision."""
450
450
451 class ifileindex(interfaceutil.Interface):
451 class ifileindex(interfaceutil.Interface):
452 """Storage interface for index data of a single file.
452 """Storage interface for index data of a single file.
453
453
454 File storage data is divided into index metadata and data storage.
454 File storage data is divided into index metadata and data storage.
455 This interface defines the index portion of the interface.
455 This interface defines the index portion of the interface.
456
456
457 The index logically consists of:
457 The index logically consists of:
458
458
459 * A mapping between revision numbers and nodes.
459 * A mapping between revision numbers and nodes.
460 * DAG data (storing and querying the relationship between nodes).
460 * DAG data (storing and querying the relationship between nodes).
461 * Metadata to facilitate storage.
461 * Metadata to facilitate storage.
462 """
462 """
463 def __len__():
463 def __len__():
464 """Obtain the number of revisions stored for this file."""
464 """Obtain the number of revisions stored for this file."""
465
465
466 def __iter__():
466 def __iter__():
467 """Iterate over revision numbers for this file."""
467 """Iterate over revision numbers for this file."""
468
468
469 def revs(start=0, stop=None):
469 def revs(start=0, stop=None):
470 """Iterate over revision numbers for this file, with control."""
470 """Iterate over revision numbers for this file, with control."""
471
471
472 def parents(node):
472 def parents(node):
473 """Returns a 2-tuple of parent nodes for a revision.
473 """Returns a 2-tuple of parent nodes for a revision.
474
474
475 Values will be ``nullid`` if the parent is empty.
475 Values will be ``nullid`` if the parent is empty.
476 """
476 """
477
477
478 def parentrevs(rev):
478 def parentrevs(rev):
479 """Like parents() but operates on revision numbers."""
479 """Like parents() but operates on revision numbers."""
480
480
481 def rev(node):
481 def rev(node):
482 """Obtain the revision number given a node.
482 """Obtain the revision number given a node.
483
483
484 Raises ``error.LookupError`` if the node is not known.
484 Raises ``error.LookupError`` if the node is not known.
485 """
485 """
486
486
487 def node(rev):
487 def node(rev):
488 """Obtain the node value given a revision number.
488 """Obtain the node value given a revision number.
489
489
490 Raises ``IndexError`` if the node is not known.
490 Raises ``IndexError`` if the node is not known.
491 """
491 """
492
492
493 def lookup(node):
493 def lookup(node):
494 """Attempt to resolve a value to a node.
494 """Attempt to resolve a value to a node.
495
495
496 Value can be a binary node, hex node, revision number, or a string
496 Value can be a binary node, hex node, revision number, or a string
497 that can be converted to an integer.
497 that can be converted to an integer.
498
498
499 Raises ``error.LookupError`` if a node could not be resolved.
499 Raises ``error.LookupError`` if a node could not be resolved.
500 """
500 """
501
501
502 def linkrev(rev):
502 def linkrev(rev):
503 """Obtain the changeset revision number a revision is linked to."""
503 """Obtain the changeset revision number a revision is linked to."""
504
504
505 def iscensored(rev):
505 def iscensored(rev):
506 """Return whether a revision's content has been censored."""
506 """Return whether a revision's content has been censored."""
507
507
508 def commonancestorsheads(node1, node2):
508 def commonancestorsheads(node1, node2):
509 """Obtain an iterable of nodes containing heads of common ancestors.
509 """Obtain an iterable of nodes containing heads of common ancestors.
510
510
511 See ``ancestor.commonancestorsheads()``.
511 See ``ancestor.commonancestorsheads()``.
512 """
512 """
513
513
514 def descendants(revs):
514 def descendants(revs):
515 """Obtain descendant revision numbers for a set of revision numbers.
515 """Obtain descendant revision numbers for a set of revision numbers.
516
516
517 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
517 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
518 """
518 """
519
519
520 def heads(start=None, stop=None):
520 def heads(start=None, stop=None):
521 """Obtain a list of nodes that are DAG heads, with control.
521 """Obtain a list of nodes that are DAG heads, with control.
522
522
523 The set of revisions examined can be limited by specifying
523 The set of revisions examined can be limited by specifying
524 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
524 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
525 iterable of nodes. DAG traversal starts at earlier revision
525 iterable of nodes. DAG traversal starts at earlier revision
526 ``start`` and iterates forward until any node in ``stop`` is
526 ``start`` and iterates forward until any node in ``stop`` is
527 encountered.
527 encountered.
528 """
528 """
529
529
530 def children(node):
530 def children(node):
531 """Obtain nodes that are children of a node.
531 """Obtain nodes that are children of a node.
532
532
533 Returns a list of nodes.
533 Returns a list of nodes.
534 """
534 """
535
535
536 class ifiledata(interfaceutil.Interface):
536 class ifiledata(interfaceutil.Interface):
537 """Storage interface for data storage of a specific file.
537 """Storage interface for data storage of a specific file.
538
538
539 This complements ``ifileindex`` and provides an interface for accessing
539 This complements ``ifileindex`` and provides an interface for accessing
540 data for a tracked file.
540 data for a tracked file.
541 """
541 """
542 def size(rev):
542 def size(rev):
543 """Obtain the fulltext size of file data.
543 """Obtain the fulltext size of file data.
544
544
545 Any metadata is excluded from size measurements.
545 Any metadata is excluded from size measurements.
546 """
546 """
547
547
548 def revision(node, raw=False):
548 def revision(node, raw=False):
549 """"Obtain fulltext data for a node.
549 """"Obtain fulltext data for a node.
550
550
551 By default, any storage transformations are applied before the data
551 By default, any storage transformations are applied before the data
552 is returned. If ``raw`` is True, non-raw storage transformations
552 is returned. If ``raw`` is True, non-raw storage transformations
553 are not applied.
553 are not applied.
554
554
555 The fulltext data may contain a header containing metadata. Most
555 The fulltext data may contain a header containing metadata. Most
556 consumers should use ``read()`` to obtain the actual file data.
556 consumers should use ``read()`` to obtain the actual file data.
557 """
557 """
558
558
559 def read(node):
559 def read(node):
560 """Resolve file fulltext data.
560 """Resolve file fulltext data.
561
561
562 This is similar to ``revision()`` except any metadata in the data
562 This is similar to ``revision()`` except any metadata in the data
563 headers is stripped.
563 headers is stripped.
564 """
564 """
565
565
566 def renamed(node):
566 def renamed(node):
567 """Obtain copy metadata for a node.
567 """Obtain copy metadata for a node.
568
568
569 Returns ``False`` if no copy metadata is stored or a 2-tuple of
569 Returns ``False`` if no copy metadata is stored or a 2-tuple of
570 (path, node) from which this revision was copied.
570 (path, node) from which this revision was copied.
571 """
571 """
572
572
573 def cmp(node, fulltext):
573 def cmp(node, fulltext):
574 """Compare fulltext to another revision.
574 """Compare fulltext to another revision.
575
575
576 Returns True if the fulltext is different from what is stored.
576 Returns True if the fulltext is different from what is stored.
577
577
578 This takes copy metadata into account.
578 This takes copy metadata into account.
579
579
580 TODO better document the copy metadata and censoring logic.
580 TODO better document the copy metadata and censoring logic.
581 """
581 """
582
582
583 def emitrevisions(nodes,
583 def emitrevisions(nodes,
584 nodesorder=None,
584 nodesorder=None,
585 revisiondata=False,
585 revisiondata=False,
586 assumehaveparentrevisions=False,
586 assumehaveparentrevisions=False,
587 deltaprevious=False):
587 deltaprevious=False):
588 """Produce ``irevisiondelta`` for revisions.
588 """Produce ``irevisiondelta`` for revisions.
589
589
590 Given an iterable of nodes, emits objects conforming to the
590 Given an iterable of nodes, emits objects conforming to the
591 ``irevisiondelta`` interface that describe revisions in storage.
591 ``irevisiondelta`` interface that describe revisions in storage.
592
592
593 This method is a generator.
593 This method is a generator.
594
594
595 The input nodes may be unordered. Implementations must ensure that a
595 The input nodes may be unordered. Implementations must ensure that a
596 node's parents are emitted before the node itself. Transitively, this
596 node's parents are emitted before the node itself. Transitively, this
597 means that a node may only be emitted once all its ancestors in
597 means that a node may only be emitted once all its ancestors in
598 ``nodes`` have also been emitted.
598 ``nodes`` have also been emitted.
599
599
600 By default, emits "index" data (the ``node``, ``p1node``, and
600 By default, emits "index" data (the ``node``, ``p1node``, and
601 ``p2node`` attributes). If ``revisiondata`` is set, revision data
601 ``p2node`` attributes). If ``revisiondata`` is set, revision data
602 will also be present on the emitted objects.
602 will also be present on the emitted objects.
603
603
604 With default argument values, implementations can choose to emit
604 With default argument values, implementations can choose to emit
605 either fulltext revision data or a delta. When emitting deltas,
605 either fulltext revision data or a delta. When emitting deltas,
606 implementations must consider whether the delta's base revision
606 implementations must consider whether the delta's base revision
607 fulltext is available to the receiver.
607 fulltext is available to the receiver.
608
608
609 The base revision fulltext is guaranteed to be available if any of
609 The base revision fulltext is guaranteed to be available if any of
610 the following are met:
610 the following are met:
611
611
612 * Its fulltext revision was emitted by this method call.
612 * Its fulltext revision was emitted by this method call.
613 * A delta for that revision was emitted by this method call.
613 * A delta for that revision was emitted by this method call.
614 * ``assumehaveparentrevisions`` is True and the base revision is a
614 * ``assumehaveparentrevisions`` is True and the base revision is a
615 parent of the node.
615 parent of the node.
616
616
617 ``nodesorder`` can be used to control the order that revisions are
617 ``nodesorder`` can be used to control the order that revisions are
618 emitted. By default, revisions can be reordered as long as they are
618 emitted. By default, revisions can be reordered as long as they are
619 in DAG topological order (see above). If the value is ``nodes``,
619 in DAG topological order (see above). If the value is ``nodes``,
620 the iteration order from ``nodes`` should be used. If the value is
620 the iteration order from ``nodes`` should be used. If the value is
621 ``storage``, then the native order from the backing storage layer
621 ``storage``, then the native order from the backing storage layer
622 is used. (Not all storage layers will have strong ordering and behavior
622 is used. (Not all storage layers will have strong ordering and behavior
623 of this mode is storage-dependent.) ``nodes`` ordering can force
623 of this mode is storage-dependent.) ``nodes`` ordering can force
624 revisions to be emitted before their ancestors, so consumers should
624 revisions to be emitted before their ancestors, so consumers should
625 use it with care.
625 use it with care.
626
626
627 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
627 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
628 be set and it is the caller's responsibility to resolve it, if needed.
628 be set and it is the caller's responsibility to resolve it, if needed.
629
629
630 If ``deltaprevious`` is True and revision data is requested, all
630 If ``deltaprevious`` is True and revision data is requested, all
631 revision data should be emitted as deltas against the revision
631 revision data should be emitted as deltas against the revision
632 emitted just prior. The initial revision should be a delta against
632 emitted just prior. The initial revision should be a delta against
633 its 1st parent.
633 its 1st parent.
634 """
634 """
635
635
636 class ifilemutation(interfaceutil.Interface):
636 class ifilemutation(interfaceutil.Interface):
637 """Storage interface for mutation events of a tracked file."""
637 """Storage interface for mutation events of a tracked file."""
638
638
639 def add(filedata, meta, transaction, linkrev, p1, p2):
639 def add(filedata, meta, transaction, linkrev, p1, p2):
640 """Add a new revision to the store.
640 """Add a new revision to the store.
641
641
642 Takes file data, dictionary of metadata, a transaction, linkrev,
642 Takes file data, dictionary of metadata, a transaction, linkrev,
643 and parent nodes.
643 and parent nodes.
644
644
645 Returns the node that was added.
645 Returns the node that was added.
646
646
647 May no-op if a revision matching the supplied data is already stored.
647 May no-op if a revision matching the supplied data is already stored.
648 """
648 """
649
649
650 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
650 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
651 flags=0, cachedelta=None):
651 flags=0, cachedelta=None):
652 """Add a new revision to the store.
652 """Add a new revision to the store.
653
653
654 This is similar to ``add()`` except it operates at a lower level.
654 This is similar to ``add()`` except it operates at a lower level.
655
655
656 The data passed in already contains a metadata header, if any.
656 The data passed in already contains a metadata header, if any.
657
657
658 ``node`` and ``flags`` can be used to define the expected node and
658 ``node`` and ``flags`` can be used to define the expected node and
659 the flags to use with storage.
659 the flags to use with storage.
660
660
661 ``add()`` is usually called when adding files from e.g. the working
661 ``add()`` is usually called when adding files from e.g. the working
662 directory. ``addrevision()`` is often called by ``add()`` and for
662 directory. ``addrevision()`` is often called by ``add()`` and for
663 scenarios where revision data has already been computed, such as when
663 scenarios where revision data has already been computed, such as when
664 applying raw data from a peer repo.
664 applying raw data from a peer repo.
665 """
665 """
666
666
667 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
667 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
668 """Process a series of deltas for storage.
668 """Process a series of deltas for storage.
669
669
670 ``deltas`` is an iterable of 7-tuples of
670 ``deltas`` is an iterable of 7-tuples of
671 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
671 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
672 to add.
672 to add.
673
673
674 The ``delta`` field contains ``mpatch`` data to apply to a base
674 The ``delta`` field contains ``mpatch`` data to apply to a base
675 revision, identified by ``deltabase``. The base node can be
675 revision, identified by ``deltabase``. The base node can be
676 ``nullid``, in which case the header from the delta can be ignored
676 ``nullid``, in which case the header from the delta can be ignored
677 and the delta used as the fulltext.
677 and the delta used as the fulltext.
678
678
679 ``addrevisioncb`` should be called for each node as it is committed.
679 ``addrevisioncb`` should be called for each node as it is committed.
680
680
681 Returns a list of nodes that were processed. A node will be in the list
681 Returns a list of nodes that were processed. A node will be in the list
682 even if it existed in the store previously.
682 even if it existed in the store previously.
683 """
683 """
684
684
685 def censorrevision(tr, node, tombstone=b''):
685 def censorrevision(tr, node, tombstone=b''):
686 """Remove the content of a single revision.
686 """Remove the content of a single revision.
687
687
688 The specified ``node`` will have its content purged from storage.
688 The specified ``node`` will have its content purged from storage.
689 Future attempts to access the revision data for this node will
689 Future attempts to access the revision data for this node will
690 result in failure.
690 result in failure.
691
691
692 A ``tombstone`` message can optionally be stored. This message may be
692 A ``tombstone`` message can optionally be stored. This message may be
693 displayed to users when they attempt to access the missing revision
693 displayed to users when they attempt to access the missing revision
694 data.
694 data.
695
695
696 Storage backends may have stored deltas against the previous content
696 Storage backends may have stored deltas against the previous content
697 in this revision. As part of censoring a revision, these storage
697 in this revision. As part of censoring a revision, these storage
698 backends are expected to rewrite any internally stored deltas such
698 backends are expected to rewrite any internally stored deltas such
699 that they no longer reference the deleted content.
699 that they no longer reference the deleted content.
700 """
700 """
701
701
702 def getstrippoint(minlink):
702 def getstrippoint(minlink):
703 """Find the minimum revision that must be stripped to strip a linkrev.
703 """Find the minimum revision that must be stripped to strip a linkrev.
704
704
705 Returns a 2-tuple containing the minimum revision number and a set
705 Returns a 2-tuple containing the minimum revision number and a set
706 of all revisions numbers that would be broken by this strip.
706 of all revisions numbers that would be broken by this strip.
707
707
708 TODO this is highly revlog centric and should be abstracted into
708 TODO this is highly revlog centric and should be abstracted into
709 a higher-level deletion API. ``repair.strip()`` relies on this.
709 a higher-level deletion API. ``repair.strip()`` relies on this.
710 """
710 """
711
711
712 def strip(minlink, transaction):
712 def strip(minlink, transaction):
713 """Remove storage of items starting at a linkrev.
713 """Remove storage of items starting at a linkrev.
714
714
715 This uses ``getstrippoint()`` to determine the first node to remove.
715 This uses ``getstrippoint()`` to determine the first node to remove.
716 Then it effectively truncates storage for all revisions after that.
716 Then it effectively truncates storage for all revisions after that.
717
717
718 TODO this is highly revlog centric and should be abstracted into a
718 TODO this is highly revlog centric and should be abstracted into a
719 higher-level deletion API.
719 higher-level deletion API.
720 """
720 """
721
721
722 class ifilestorage(ifileindex, ifiledata, ifilemutation):
722 class ifilestorage(ifileindex, ifiledata, ifilemutation):
723 """Complete storage interface for a single tracked file."""
723 """Complete storage interface for a single tracked file."""
724
724
725 def files():
725 def files():
726 """Obtain paths that are backing storage for this file.
726 """Obtain paths that are backing storage for this file.
727
727
728 TODO this is used heavily by verify code and there should probably
728 TODO this is used heavily by verify code and there should probably
729 be a better API for that.
729 be a better API for that.
730 """
730 """
731
731
732 def storageinfo(exclusivefiles=False, sharedfiles=False,
732 def storageinfo(exclusivefiles=False, sharedfiles=False,
733 revisionscount=False, trackedsize=False,
733 revisionscount=False, trackedsize=False,
734 storedsize=False):
734 storedsize=False):
735 """Obtain information about storage for this file's data.
735 """Obtain information about storage for this file's data.
736
736
737 Returns a dict describing storage for this tracked path. The keys
737 Returns a dict describing storage for this tracked path. The keys
738 in the dict map to arguments of the same. The arguments are bools
738 in the dict map to arguments of the same. The arguments are bools
739 indicating whether to calculate and obtain that data.
739 indicating whether to calculate and obtain that data.
740
740
741 exclusivefiles
741 exclusivefiles
742 Iterable of (vfs, path) describing files that are exclusively
742 Iterable of (vfs, path) describing files that are exclusively
743 used to back storage for this tracked path.
743 used to back storage for this tracked path.
744
744
745 sharedfiles
745 sharedfiles
746 Iterable of (vfs, path) describing files that are used to back
746 Iterable of (vfs, path) describing files that are used to back
747 storage for this tracked path. Those files may also provide storage
747 storage for this tracked path. Those files may also provide storage
748 for other stored entities.
748 for other stored entities.
749
749
750 revisionscount
750 revisionscount
751 Number of revisions available for retrieval.
751 Number of revisions available for retrieval.
752
752
753 trackedsize
753 trackedsize
754 Total size in bytes of all tracked revisions. This is a sum of the
754 Total size in bytes of all tracked revisions. This is a sum of the
755 length of the fulltext of all revisions.
755 length of the fulltext of all revisions.
756
756
757 storedsize
757 storedsize
758 Total size in bytes used to store data for all tracked revisions.
758 Total size in bytes used to store data for all tracked revisions.
759 This is commonly less than ``trackedsize`` due to internal usage
759 This is commonly less than ``trackedsize`` due to internal usage
760 of deltas rather than fulltext revisions.
760 of deltas rather than fulltext revisions.
761
761
762 Not all storage backends may support all queries are have a reasonable
762 Not all storage backends may support all queries are have a reasonable
763 value to use. In that case, the value should be set to ``None`` and
763 value to use. In that case, the value should be set to ``None`` and
764 callers are expected to handle this special value.
764 callers are expected to handle this special value.
765 """
765 """
766
766
767 def verifyintegrity(state):
767 def verifyintegrity(state):
768 """Verifies the integrity of file storage.
768 """Verifies the integrity of file storage.
769
769
770 ``state`` is a dict holding state of the verifier process. It can be
770 ``state`` is a dict holding state of the verifier process. It can be
771 used to communicate data between invocations of multiple storage
771 used to communicate data between invocations of multiple storage
772 primitives.
772 primitives.
773
773
774 If individual revisions cannot have their revision content resolved,
774 If individual revisions cannot have their revision content resolved,
775 the method is expected to set the ``skipread`` key to a set of nodes
775 the method is expected to set the ``skipread`` key to a set of nodes
776 that encountered problems.
776 that encountered problems.
777
777
778 The method yields objects conforming to the ``iverifyproblem``
778 The method yields objects conforming to the ``iverifyproblem``
779 interface.
779 interface.
780 """
780 """
781
781
782 class idirs(interfaceutil.Interface):
782 class idirs(interfaceutil.Interface):
783 """Interface representing a collection of directories from paths.
783 """Interface representing a collection of directories from paths.
784
784
785 This interface is essentially a derived data structure representing
785 This interface is essentially a derived data structure representing
786 directories from a collection of paths.
786 directories from a collection of paths.
787 """
787 """
788
788
789 def addpath(path):
789 def addpath(path):
790 """Add a path to the collection.
790 """Add a path to the collection.
791
791
792 All directories in the path will be added to the collection.
792 All directories in the path will be added to the collection.
793 """
793 """
794
794
795 def delpath(path):
795 def delpath(path):
796 """Remove a path from the collection.
796 """Remove a path from the collection.
797
797
798 If the removal was the last path in a particular directory, the
798 If the removal was the last path in a particular directory, the
799 directory is removed from the collection.
799 directory is removed from the collection.
800 """
800 """
801
801
802 def __iter__():
802 def __iter__():
803 """Iterate over the directories in this collection of paths."""
803 """Iterate over the directories in this collection of paths."""
804
804
805 def __contains__(path):
805 def __contains__(path):
806 """Whether a specific directory is in this collection."""
806 """Whether a specific directory is in this collection."""
807
807
808 class imanifestdict(interfaceutil.Interface):
808 class imanifestdict(interfaceutil.Interface):
809 """Interface representing a manifest data structure.
809 """Interface representing a manifest data structure.
810
810
811 A manifest is effectively a dict mapping paths to entries. Each entry
811 A manifest is effectively a dict mapping paths to entries. Each entry
812 consists of a binary node and extra flags affecting that entry.
812 consists of a binary node and extra flags affecting that entry.
813 """
813 """
814
814
815 def __getitem__(path):
815 def __getitem__(path):
816 """Returns the binary node value for a path in the manifest.
816 """Returns the binary node value for a path in the manifest.
817
817
818 Raises ``KeyError`` if the path does not exist in the manifest.
818 Raises ``KeyError`` if the path does not exist in the manifest.
819
819
820 Equivalent to ``self.find(path)[0]``.
820 Equivalent to ``self.find(path)[0]``.
821 """
821 """
822
822
823 def find(path):
823 def find(path):
824 """Returns the entry for a path in the manifest.
824 """Returns the entry for a path in the manifest.
825
825
826 Returns a 2-tuple of (node, flags).
826 Returns a 2-tuple of (node, flags).
827
827
828 Raises ``KeyError`` if the path does not exist in the manifest.
828 Raises ``KeyError`` if the path does not exist in the manifest.
829 """
829 """
830
830
831 def __len__():
831 def __len__():
832 """Return the number of entries in the manifest."""
832 """Return the number of entries in the manifest."""
833
833
834 def __nonzero__():
834 def __nonzero__():
835 """Returns True if the manifest has entries, False otherwise."""
835 """Returns True if the manifest has entries, False otherwise."""
836
836
837 __bool__ = __nonzero__
837 __bool__ = __nonzero__
838
838
839 def __setitem__(path, node):
839 def __setitem__(path, node):
840 """Define the node value for a path in the manifest.
840 """Define the node value for a path in the manifest.
841
841
842 If the path is already in the manifest, its flags will be copied to
842 If the path is already in the manifest, its flags will be copied to
843 the new entry.
843 the new entry.
844 """
844 """
845
845
846 def __contains__(path):
846 def __contains__(path):
847 """Whether a path exists in the manifest."""
847 """Whether a path exists in the manifest."""
848
848
849 def __delitem__(path):
849 def __delitem__(path):
850 """Remove a path from the manifest.
850 """Remove a path from the manifest.
851
851
852 Raises ``KeyError`` if the path is not in the manifest.
852 Raises ``KeyError`` if the path is not in the manifest.
853 """
853 """
854
854
855 def __iter__():
855 def __iter__():
856 """Iterate over paths in the manifest."""
856 """Iterate over paths in the manifest."""
857
857
858 def iterkeys():
858 def iterkeys():
859 """Iterate over paths in the manifest."""
859 """Iterate over paths in the manifest."""
860
860
861 def keys():
861 def keys():
862 """Obtain a list of paths in the manifest."""
862 """Obtain a list of paths in the manifest."""
863
863
864 def filesnotin(other, match=None):
864 def filesnotin(other, match=None):
865 """Obtain the set of paths in this manifest but not in another.
865 """Obtain the set of paths in this manifest but not in another.
866
866
867 ``match`` is an optional matcher function to be applied to both
867 ``match`` is an optional matcher function to be applied to both
868 manifests.
868 manifests.
869
869
870 Returns a set of paths.
870 Returns a set of paths.
871 """
871 """
872
872
873 def dirs():
873 def dirs():
874 """Returns an object implementing the ``idirs`` interface."""
874 """Returns an object implementing the ``idirs`` interface."""
875
875
876 def hasdir(dir):
876 def hasdir(dir):
877 """Returns a bool indicating if a directory is in this manifest."""
877 """Returns a bool indicating if a directory is in this manifest."""
878
878
879 def matches(match):
879 def matches(match):
880 """Generate a new manifest filtered through a matcher.
880 """Generate a new manifest filtered through a matcher.
881
881
882 Returns an object conforming to the ``imanifestdict`` interface.
882 Returns an object conforming to the ``imanifestdict`` interface.
883 """
883 """
884
884
885 def walk(match):
885 def walk(match):
886 """Generator of paths in manifest satisfying a matcher.
886 """Generator of paths in manifest satisfying a matcher.
887
887
888 This is equivalent to ``self.matches(match).iterkeys()`` except a new
888 This is equivalent to ``self.matches(match).iterkeys()`` except a new
889 manifest object is not created.
889 manifest object is not created.
890
890
891 If the matcher has explicit files listed and they don't exist in
891 If the matcher has explicit files listed and they don't exist in
892 the manifest, ``match.bad()`` is called for each missing file.
892 the manifest, ``match.bad()`` is called for each missing file.
893 """
893 """
894
894
895 def diff(other, match=None, clean=False):
895 def diff(other, match=None, clean=False):
896 """Find differences between this manifest and another.
896 """Find differences between this manifest and another.
897
897
898 This manifest is compared to ``other``.
898 This manifest is compared to ``other``.
899
899
900 If ``match`` is provided, the two manifests are filtered against this
900 If ``match`` is provided, the two manifests are filtered against this
901 matcher and only entries satisfying the matcher are compared.
901 matcher and only entries satisfying the matcher are compared.
902
902
903 If ``clean`` is True, unchanged files are included in the returned
903 If ``clean`` is True, unchanged files are included in the returned
904 object.
904 object.
905
905
906 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
906 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
907 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
907 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
908 represents the node and flags for this manifest and ``(node2, flag2)``
908 represents the node and flags for this manifest and ``(node2, flag2)``
909 are the same for the other manifest.
909 are the same for the other manifest.
910 """
910 """
911
911
912 def setflag(path, flag):
912 def setflag(path, flag):
913 """Set the flag value for a given path.
913 """Set the flag value for a given path.
914
914
915 Raises ``KeyError`` if the path is not already in the manifest.
915 Raises ``KeyError`` if the path is not already in the manifest.
916 """
916 """
917
917
918 def get(path, default=None):
918 def get(path, default=None):
919 """Obtain the node value for a path or a default value if missing."""
919 """Obtain the node value for a path or a default value if missing."""
920
920
921 def flags(path, default=''):
921 def flags(path, default=''):
922 """Return the flags value for a path or a default value if missing."""
922 """Return the flags value for a path or a default value if missing."""
923
923
924 def copy():
924 def copy():
925 """Return a copy of this manifest."""
925 """Return a copy of this manifest."""
926
926
927 def items():
927 def items():
928 """Returns an iterable of (path, node) for items in this manifest."""
928 """Returns an iterable of (path, node) for items in this manifest."""
929
929
930 def iteritems():
930 def iteritems():
931 """Identical to items()."""
931 """Identical to items()."""
932
932
933 def iterentries():
933 def iterentries():
934 """Returns an iterable of (path, node, flags) for this manifest.
934 """Returns an iterable of (path, node, flags) for this manifest.
935
935
936 Similar to ``iteritems()`` except items are a 3-tuple and include
936 Similar to ``iteritems()`` except items are a 3-tuple and include
937 flags.
937 flags.
938 """
938 """
939
939
940 def text():
940 def text():
941 """Obtain the raw data representation for this manifest.
941 """Obtain the raw data representation for this manifest.
942
942
943 Result is used to create a manifest revision.
943 Result is used to create a manifest revision.
944 """
944 """
945
945
946 def fastdelta(base, changes):
946 def fastdelta(base, changes):
947 """Obtain a delta between this manifest and another given changes.
947 """Obtain a delta between this manifest and another given changes.
948
948
949 ``base`` in the raw data representation for another manifest.
949 ``base`` in the raw data representation for another manifest.
950
950
951 ``changes`` is an iterable of ``(path, to_delete)``.
951 ``changes`` is an iterable of ``(path, to_delete)``.
952
952
953 Returns a 2-tuple containing ``bytearray(self.text())`` and the
953 Returns a 2-tuple containing ``bytearray(self.text())`` and the
954 delta between ``base`` and this manifest.
954 delta between ``base`` and this manifest.
955 """
955 """
956
956
957 class imanifestrevisionbase(interfaceutil.Interface):
957 class imanifestrevisionbase(interfaceutil.Interface):
958 """Base interface representing a single revision of a manifest.
958 """Base interface representing a single revision of a manifest.
959
959
960 Should not be used as a primary interface: should always be inherited
960 Should not be used as a primary interface: should always be inherited
961 as part of a larger interface.
961 as part of a larger interface.
962 """
962 """
963
963
964 def new():
964 def new():
965 """Obtain a new manifest instance.
965 """Obtain a new manifest instance.
966
966
967 Returns an object conforming to the ``imanifestrevisionwritable``
967 Returns an object conforming to the ``imanifestrevisionwritable``
968 interface. The instance will be associated with the same
968 interface. The instance will be associated with the same
969 ``imanifestlog`` collection as this instance.
969 ``imanifestlog`` collection as this instance.
970 """
970 """
971
971
972 def copy():
972 def copy():
973 """Obtain a copy of this manifest instance.
973 """Obtain a copy of this manifest instance.
974
974
975 Returns an object conforming to the ``imanifestrevisionwritable``
975 Returns an object conforming to the ``imanifestrevisionwritable``
976 interface. The instance will be associated with the same
976 interface. The instance will be associated with the same
977 ``imanifestlog`` collection as this instance.
977 ``imanifestlog`` collection as this instance.
978 """
978 """
979
979
980 def read():
980 def read():
981 """Obtain the parsed manifest data structure.
981 """Obtain the parsed manifest data structure.
982
982
983 The returned object conforms to the ``imanifestdict`` interface.
983 The returned object conforms to the ``imanifestdict`` interface.
984 """
984 """
985
985
986 class imanifestrevisionstored(imanifestrevisionbase):
986 class imanifestrevisionstored(imanifestrevisionbase):
987 """Interface representing a manifest revision committed to storage."""
987 """Interface representing a manifest revision committed to storage."""
988
988
989 def node():
989 def node():
990 """The binary node for this manifest."""
990 """The binary node for this manifest."""
991
991
992 parents = interfaceutil.Attribute(
992 parents = interfaceutil.Attribute(
993 """List of binary nodes that are parents for this manifest revision."""
993 """List of binary nodes that are parents for this manifest revision."""
994 )
994 )
995
995
996 def readdelta(shallow=False):
996 def readdelta(shallow=False):
997 """Obtain the manifest data structure representing changes from parent.
997 """Obtain the manifest data structure representing changes from parent.
998
998
999 This manifest is compared to its 1st parent. A new manifest representing
999 This manifest is compared to its 1st parent. A new manifest representing
1000 those differences is constructed.
1000 those differences is constructed.
1001
1001
1002 The returned object conforms to the ``imanifestdict`` interface.
1002 The returned object conforms to the ``imanifestdict`` interface.
1003 """
1003 """
1004
1004
1005 def readfast(shallow=False):
1005 def readfast(shallow=False):
1006 """Calls either ``read()`` or ``readdelta()``.
1006 """Calls either ``read()`` or ``readdelta()``.
1007
1007
1008 The faster of the two options is called.
1008 The faster of the two options is called.
1009 """
1009 """
1010
1010
1011 def find(key):
1011 def find(key):
1012 """Calls self.read().find(key)``.
1012 """Calls self.read().find(key)``.
1013
1013
1014 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1014 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1015 """
1015 """
1016
1016
1017 class imanifestrevisionwritable(imanifestrevisionbase):
1017 class imanifestrevisionwritable(imanifestrevisionbase):
1018 """Interface representing a manifest revision that can be committed."""
1018 """Interface representing a manifest revision that can be committed."""
1019
1019
1020 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1020 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1021 """Add this revision to storage.
1021 """Add this revision to storage.
1022
1022
1023 Takes a transaction object, the changeset revision number it will
1023 Takes a transaction object, the changeset revision number it will
1024 be associated with, its parent nodes, and lists of added and
1024 be associated with, its parent nodes, and lists of added and
1025 removed paths.
1025 removed paths.
1026
1026
1027 If match is provided, storage can choose not to inspect or write out
1027 If match is provided, storage can choose not to inspect or write out
1028 items that do not match. Storage is still required to be able to provide
1028 items that do not match. Storage is still required to be able to provide
1029 the full manifest in the future for any directories written (these
1029 the full manifest in the future for any directories written (these
1030 manifests should not be "narrowed on disk").
1030 manifests should not be "narrowed on disk").
1031
1031
1032 Returns the binary node of the created revision.
1032 Returns the binary node of the created revision.
1033 """
1033 """
1034
1034
1035 class imanifeststorage(interfaceutil.Interface):
1035 class imanifeststorage(interfaceutil.Interface):
1036 """Storage interface for manifest data."""
1036 """Storage interface for manifest data."""
1037
1037
1038 tree = interfaceutil.Attribute(
1038 tree = interfaceutil.Attribute(
1039 """The path to the directory this manifest tracks.
1039 """The path to the directory this manifest tracks.
1040
1040
1041 The empty bytestring represents the root manifest.
1041 The empty bytestring represents the root manifest.
1042 """)
1042 """)
1043
1043
1044 index = interfaceutil.Attribute(
1044 index = interfaceutil.Attribute(
1045 """An ``ifilerevisionssequence`` instance.""")
1045 """An ``ifilerevisionssequence`` instance.""")
1046
1046
1047 indexfile = interfaceutil.Attribute(
1047 indexfile = interfaceutil.Attribute(
1048 """Path of revlog index file.
1048 """Path of revlog index file.
1049
1049
1050 TODO this is revlog specific and should not be exposed.
1050 TODO this is revlog specific and should not be exposed.
1051 """)
1051 """)
1052
1052
1053 opener = interfaceutil.Attribute(
1053 opener = interfaceutil.Attribute(
1054 """VFS opener to use to access underlying files used for storage.
1054 """VFS opener to use to access underlying files used for storage.
1055
1055
1056 TODO this is revlog specific and should not be exposed.
1056 TODO this is revlog specific and should not be exposed.
1057 """)
1057 """)
1058
1058
1059 version = interfaceutil.Attribute(
1059 version = interfaceutil.Attribute(
1060 """Revlog version number.
1060 """Revlog version number.
1061
1061
1062 TODO this is revlog specific and should not be exposed.
1062 TODO this is revlog specific and should not be exposed.
1063 """)
1063 """)
1064
1064
1065 _generaldelta = interfaceutil.Attribute(
1065 _generaldelta = interfaceutil.Attribute(
1066 """Whether generaldelta storage is being used.
1066 """Whether generaldelta storage is being used.
1067
1067
1068 TODO this is revlog specific and should not be exposed.
1068 TODO this is revlog specific and should not be exposed.
1069 """)
1069 """)
1070
1070
1071 fulltextcache = interfaceutil.Attribute(
1071 fulltextcache = interfaceutil.Attribute(
1072 """Dict with cache of fulltexts.
1072 """Dict with cache of fulltexts.
1073
1073
1074 TODO this doesn't feel appropriate for the storage interface.
1074 TODO this doesn't feel appropriate for the storage interface.
1075 """)
1075 """)
1076
1076
1077 def __len__():
1077 def __len__():
1078 """Obtain the number of revisions stored for this manifest."""
1078 """Obtain the number of revisions stored for this manifest."""
1079
1079
1080 def __iter__():
1080 def __iter__():
1081 """Iterate over revision numbers for this manifest."""
1081 """Iterate over revision numbers for this manifest."""
1082
1082
1083 def rev(node):
1083 def rev(node):
1084 """Obtain the revision number given a binary node.
1084 """Obtain the revision number given a binary node.
1085
1085
1086 Raises ``error.LookupError`` if the node is not known.
1086 Raises ``error.LookupError`` if the node is not known.
1087 """
1087 """
1088
1088
1089 def node(rev):
1089 def node(rev):
1090 """Obtain the node value given a revision number.
1090 """Obtain the node value given a revision number.
1091
1091
1092 Raises ``error.LookupError`` if the revision is not known.
1092 Raises ``error.LookupError`` if the revision is not known.
1093 """
1093 """
1094
1094
1095 def lookup(value):
1095 def lookup(value):
1096 """Attempt to resolve a value to a node.
1096 """Attempt to resolve a value to a node.
1097
1097
1098 Value can be a binary node, hex node, revision number, or a bytes
1098 Value can be a binary node, hex node, revision number, or a bytes
1099 that can be converted to an integer.
1099 that can be converted to an integer.
1100
1100
1101 Raises ``error.LookupError`` if a ndoe could not be resolved.
1101 Raises ``error.LookupError`` if a ndoe could not be resolved.
1102 """
1102 """
1103
1103
1104 def parents(node):
1104 def parents(node):
1105 """Returns a 2-tuple of parent nodes for a node.
1105 """Returns a 2-tuple of parent nodes for a node.
1106
1106
1107 Values will be ``nullid`` if the parent is empty.
1107 Values will be ``nullid`` if the parent is empty.
1108 """
1108 """
1109
1109
1110 def parentrevs(rev):
1110 def parentrevs(rev):
1111 """Like parents() but operates on revision numbers."""
1111 """Like parents() but operates on revision numbers."""
1112
1112
1113 def linkrev(rev):
1113 def linkrev(rev):
1114 """Obtain the changeset revision number a revision is linked to."""
1114 """Obtain the changeset revision number a revision is linked to."""
1115
1115
1116 def revision(node, _df=None, raw=False):
1116 def revision(node, _df=None, raw=False):
1117 """Obtain fulltext data for a node."""
1117 """Obtain fulltext data for a node."""
1118
1118
1119 def revdiff(rev1, rev2):
1119 def revdiff(rev1, rev2):
1120 """Obtain a delta between two revision numbers.
1120 """Obtain a delta between two revision numbers.
1121
1121
1122 The returned data is the result of ``bdiff.bdiff()`` on the raw
1122 The returned data is the result of ``bdiff.bdiff()`` on the raw
1123 revision data.
1123 revision data.
1124 """
1124 """
1125
1125
1126 def cmp(node, fulltext):
1126 def cmp(node, fulltext):
1127 """Compare fulltext to another revision.
1127 """Compare fulltext to another revision.
1128
1128
1129 Returns True if the fulltext is different from what is stored.
1129 Returns True if the fulltext is different from what is stored.
1130 """
1130 """
1131
1131
1132 def emitrevisions(nodes,
1132 def emitrevisions(nodes,
1133 nodesorder=None,
1133 nodesorder=None,
1134 revisiondata=False,
1134 revisiondata=False,
1135 assumehaveparentrevisions=False):
1135 assumehaveparentrevisions=False):
1136 """Produce ``irevisiondelta`` describing revisions.
1136 """Produce ``irevisiondelta`` describing revisions.
1137
1137
1138 See the documentation for ``ifiledata`` for more.
1138 See the documentation for ``ifiledata`` for more.
1139 """
1139 """
1140
1140
1141 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1141 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1142 """Process a series of deltas for storage.
1142 """Process a series of deltas for storage.
1143
1143
1144 See the documentation in ``ifilemutation`` for more.
1144 See the documentation in ``ifilemutation`` for more.
1145 """
1145 """
1146
1146
1147 def rawsize(rev):
1147 def rawsize(rev):
1148 """Obtain the size of tracked data.
1148 """Obtain the size of tracked data.
1149
1149
1150 Is equivalent to ``len(m.revision(node, raw=True))``.
1150 Is equivalent to ``len(m.revision(node, raw=True))``.
1151
1151
1152 TODO this method is only used by upgrade code and may be removed.
1152 TODO this method is only used by upgrade code and may be removed.
1153 """
1153 """
1154
1154
1155 def getstrippoint(minlink):
1155 def getstrippoint(minlink):
1156 """Find minimum revision that must be stripped to strip a linkrev.
1156 """Find minimum revision that must be stripped to strip a linkrev.
1157
1157
1158 See the documentation in ``ifilemutation`` for more.
1158 See the documentation in ``ifilemutation`` for more.
1159 """
1159 """
1160
1160
1161 def strip(minlink, transaction):
1161 def strip(minlink, transaction):
1162 """Remove storage of items starting at a linkrev.
1162 """Remove storage of items starting at a linkrev.
1163
1163
1164 See the documentation in ``ifilemutation`` for more.
1164 See the documentation in ``ifilemutation`` for more.
1165 """
1165 """
1166
1166
1167 def checksize():
1167 def checksize():
1168 """Obtain the expected sizes of backing files.
1168 """Obtain the expected sizes of backing files.
1169
1169
1170 TODO this is used by verify and it should not be part of the interface.
1170 TODO this is used by verify and it should not be part of the interface.
1171 """
1171 """
1172
1172
1173 def files():
1173 def files():
1174 """Obtain paths that are backing storage for this manifest.
1174 """Obtain paths that are backing storage for this manifest.
1175
1175
1176 TODO this is used by verify and there should probably be a better API
1176 TODO this is used by verify and there should probably be a better API
1177 for this functionality.
1177 for this functionality.
1178 """
1178 """
1179
1179
1180 def deltaparent(rev):
1180 def deltaparent(rev):
1181 """Obtain the revision that a revision is delta'd against.
1181 """Obtain the revision that a revision is delta'd against.
1182
1182
1183 TODO delta encoding is an implementation detail of storage and should
1183 TODO delta encoding is an implementation detail of storage and should
1184 not be exposed to the storage interface.
1184 not be exposed to the storage interface.
1185 """
1185 """
1186
1186
1187 def clone(tr, dest, **kwargs):
1187 def clone(tr, dest, **kwargs):
1188 """Clone this instance to another."""
1188 """Clone this instance to another."""
1189
1189
1190 def clearcaches(clear_persisted_data=False):
1190 def clearcaches(clear_persisted_data=False):
1191 """Clear any caches associated with this instance."""
1191 """Clear any caches associated with this instance."""
1192
1192
1193 def dirlog(d):
1193 def dirlog(d):
1194 """Obtain a manifest storage instance for a tree."""
1194 """Obtain a manifest storage instance for a tree."""
1195
1195
1196 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1196 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1197 match=None):
1197 match=None):
1198 """Add a revision to storage.
1198 """Add a revision to storage.
1199
1199
1200 ``m`` is an object conforming to ``imanifestdict``.
1200 ``m`` is an object conforming to ``imanifestdict``.
1201
1201
1202 ``link`` is the linkrev revision number.
1202 ``link`` is the linkrev revision number.
1203
1203
1204 ``p1`` and ``p2`` are the parent revision numbers.
1204 ``p1`` and ``p2`` are the parent revision numbers.
1205
1205
1206 ``added`` and ``removed`` are iterables of added and removed paths,
1206 ``added`` and ``removed`` are iterables of added and removed paths,
1207 respectively.
1207 respectively.
1208
1208
1209 ``readtree`` is a function that can be used to read the child tree(s)
1209 ``readtree`` is a function that can be used to read the child tree(s)
1210 when recursively writing the full tree structure when using
1210 when recursively writing the full tree structure when using
1211 treemanifets.
1211 treemanifets.
1212
1212
1213 ``match`` is a matcher that can be used to hint to storage that not all
1213 ``match`` is a matcher that can be used to hint to storage that not all
1214 paths must be inspected; this is an optimization and can be safely
1214 paths must be inspected; this is an optimization and can be safely
1215 ignored. Note that the storage must still be able to reproduce a full
1215 ignored. Note that the storage must still be able to reproduce a full
1216 manifest including files that did not match.
1216 manifest including files that did not match.
1217 """
1217 """
1218
1218
1219 def storageinfo(exclusivefiles=False, sharedfiles=False,
1219 def storageinfo(exclusivefiles=False, sharedfiles=False,
1220 revisionscount=False, trackedsize=False,
1220 revisionscount=False, trackedsize=False,
1221 storedsize=False):
1221 storedsize=False):
1222 """Obtain information about storage for this manifest's data.
1222 """Obtain information about storage for this manifest's data.
1223
1223
1224 See ``ifilestorage.storageinfo()`` for a description of this method.
1224 See ``ifilestorage.storageinfo()`` for a description of this method.
1225 This one behaves the same way, except for manifest data.
1225 This one behaves the same way, except for manifest data.
1226 """
1226 """
1227
1227
1228 class imanifestlog(interfaceutil.Interface):
1228 class imanifestlog(interfaceutil.Interface):
1229 """Interface representing a collection of manifest snapshots.
1229 """Interface representing a collection of manifest snapshots.
1230
1230
1231 Represents the root manifest in a repository.
1231 Represents the root manifest in a repository.
1232
1232
1233 Also serves as a means to access nested tree manifests and to cache
1233 Also serves as a means to access nested tree manifests and to cache
1234 tree manifests.
1234 tree manifests.
1235 """
1235 """
1236
1236
1237 def __getitem__(node):
1237 def __getitem__(node):
1238 """Obtain a manifest instance for a given binary node.
1238 """Obtain a manifest instance for a given binary node.
1239
1239
1240 Equivalent to calling ``self.get('', node)``.
1240 Equivalent to calling ``self.get('', node)``.
1241
1241
1242 The returned object conforms to the ``imanifestrevisionstored``
1242 The returned object conforms to the ``imanifestrevisionstored``
1243 interface.
1243 interface.
1244 """
1244 """
1245
1245
1246 def get(tree, node, verify=True):
1246 def get(tree, node, verify=True):
1247 """Retrieve the manifest instance for a given directory and binary node.
1247 """Retrieve the manifest instance for a given directory and binary node.
1248
1248
1249 ``node`` always refers to the node of the root manifest (which will be
1249 ``node`` always refers to the node of the root manifest (which will be
1250 the only manifest if flat manifests are being used).
1250 the only manifest if flat manifests are being used).
1251
1251
1252 If ``tree`` is the empty string, the root manifest is returned.
1252 If ``tree`` is the empty string, the root manifest is returned.
1253 Otherwise the manifest for the specified directory will be returned
1253 Otherwise the manifest for the specified directory will be returned
1254 (requires tree manifests).
1254 (requires tree manifests).
1255
1255
1256 If ``verify`` is True, ``LookupError`` is raised if the node is not
1256 If ``verify`` is True, ``LookupError`` is raised if the node is not
1257 known.
1257 known.
1258
1258
1259 The returned object conforms to the ``imanifestrevisionstored``
1259 The returned object conforms to the ``imanifestrevisionstored``
1260 interface.
1260 interface.
1261 """
1261 """
1262
1262
1263 def getstorage(tree):
1263 def getstorage(tree):
1264 """Retrieve an interface to storage for a particular tree.
1264 """Retrieve an interface to storage for a particular tree.
1265
1265
1266 If ``tree`` is the empty bytestring, storage for the root manifest will
1266 If ``tree`` is the empty bytestring, storage for the root manifest will
1267 be returned. Otherwise storage for a tree manifest is returned.
1267 be returned. Otherwise storage for a tree manifest is returned.
1268
1268
1269 TODO formalize interface for returned object.
1269 TODO formalize interface for returned object.
1270 """
1270 """
1271
1271
1272 def clearcaches():
1272 def clearcaches():
1273 """Clear caches associated with this collection."""
1273 """Clear caches associated with this collection."""
1274
1274
1275 def rev(node):
1275 def rev(node):
1276 """Obtain the revision number for a binary node.
1276 """Obtain the revision number for a binary node.
1277
1277
1278 Raises ``error.LookupError`` if the node is not known.
1278 Raises ``error.LookupError`` if the node is not known.
1279 """
1279 """
1280
1280
1281 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1281 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1282 """Local repository sub-interface providing access to tracked file storage.
1282 """Local repository sub-interface providing access to tracked file storage.
1283
1283
1284 This interface defines how a repository accesses storage for a single
1284 This interface defines how a repository accesses storage for a single
1285 tracked file path.
1285 tracked file path.
1286 """
1286 """
1287
1287
1288 def file(f):
1288 def file(f):
1289 """Obtain a filelog for a tracked path.
1289 """Obtain a filelog for a tracked path.
1290
1290
1291 The returned type conforms to the ``ifilestorage`` interface.
1291 The returned type conforms to the ``ifilestorage`` interface.
1292 """
1292 """
1293
1293
1294 class ilocalrepositorymain(interfaceutil.Interface):
1294 class ilocalrepositorymain(interfaceutil.Interface):
1295 """Main interface for local repositories.
1295 """Main interface for local repositories.
1296
1296
1297 This currently captures the reality of things - not how things should be.
1297 This currently captures the reality of things - not how things should be.
1298 """
1298 """
1299
1299
1300 supportedformats = interfaceutil.Attribute(
1300 supportedformats = interfaceutil.Attribute(
1301 """Set of requirements that apply to stream clone.
1301 """Set of requirements that apply to stream clone.
1302
1302
1303 This is actually a class attribute and is shared among all instances.
1303 This is actually a class attribute and is shared among all instances.
1304 """)
1304 """)
1305
1305
1306 supported = interfaceutil.Attribute(
1306 supported = interfaceutil.Attribute(
1307 """Set of requirements that this repo is capable of opening.""")
1307 """Set of requirements that this repo is capable of opening.""")
1308
1308
1309 requirements = interfaceutil.Attribute(
1309 requirements = interfaceutil.Attribute(
1310 """Set of requirements this repo uses.""")
1310 """Set of requirements this repo uses.""")
1311
1311
1312 features = interfaceutil.Attribute(
1312 features = interfaceutil.Attribute(
1313 """Set of "features" this repository supports.
1313 """Set of "features" this repository supports.
1314
1314
1315 A "feature" is a loosely-defined term. It can refer to a feature
1315 A "feature" is a loosely-defined term. It can refer to a feature
1316 in the classical sense or can describe an implementation detail
1316 in the classical sense or can describe an implementation detail
1317 of the repository. For example, a ``readonly`` feature may denote
1317 of the repository. For example, a ``readonly`` feature may denote
1318 the repository as read-only. Or a ``revlogfilestore`` feature may
1318 the repository as read-only. Or a ``revlogfilestore`` feature may
1319 denote that the repository is using revlogs for file storage.
1319 denote that the repository is using revlogs for file storage.
1320
1320
1321 The intent of features is to provide a machine-queryable mechanism
1321 The intent of features is to provide a machine-queryable mechanism
1322 for repo consumers to test for various repository characteristics.
1322 for repo consumers to test for various repository characteristics.
1323
1323
1324 Features are similar to ``requirements``. The main difference is that
1324 Features are similar to ``requirements``. The main difference is that
1325 requirements are stored on-disk and represent requirements to open the
1325 requirements are stored on-disk and represent requirements to open the
1326 repository. Features are more run-time capabilities of the repository
1326 repository. Features are more run-time capabilities of the repository
1327 and more granular capabilities (which may be derived from requirements).
1327 and more granular capabilities (which may be derived from requirements).
1328 """)
1328 """)
1329
1329
1330 filtername = interfaceutil.Attribute(
1330 filtername = interfaceutil.Attribute(
1331 """Name of the repoview that is active on this repo.""")
1331 """Name of the repoview that is active on this repo.""")
1332
1332
1333 wvfs = interfaceutil.Attribute(
1333 wvfs = interfaceutil.Attribute(
1334 """VFS used to access the working directory.""")
1334 """VFS used to access the working directory.""")
1335
1335
1336 vfs = interfaceutil.Attribute(
1336 vfs = interfaceutil.Attribute(
1337 """VFS rooted at the .hg directory.
1337 """VFS rooted at the .hg directory.
1338
1338
1339 Used to access repository data not in the store.
1339 Used to access repository data not in the store.
1340 """)
1340 """)
1341
1341
1342 svfs = interfaceutil.Attribute(
1342 svfs = interfaceutil.Attribute(
1343 """VFS rooted at the store.
1343 """VFS rooted at the store.
1344
1344
1345 Used to access repository data in the store. Typically .hg/store.
1345 Used to access repository data in the store. Typically .hg/store.
1346 But can point elsewhere if the store is shared.
1346 But can point elsewhere if the store is shared.
1347 """)
1347 """)
1348
1348
1349 root = interfaceutil.Attribute(
1349 root = interfaceutil.Attribute(
1350 """Path to the root of the working directory.""")
1350 """Path to the root of the working directory.""")
1351
1351
1352 path = interfaceutil.Attribute(
1352 path = interfaceutil.Attribute(
1353 """Path to the .hg directory.""")
1353 """Path to the .hg directory.""")
1354
1354
1355 origroot = interfaceutil.Attribute(
1355 origroot = interfaceutil.Attribute(
1356 """The filesystem path that was used to construct the repo.""")
1356 """The filesystem path that was used to construct the repo.""")
1357
1357
1358 auditor = interfaceutil.Attribute(
1358 auditor = interfaceutil.Attribute(
1359 """A pathauditor for the working directory.
1359 """A pathauditor for the working directory.
1360
1360
1361 This checks if a path refers to a nested repository.
1361 This checks if a path refers to a nested repository.
1362
1362
1363 Operates on the filesystem.
1363 Operates on the filesystem.
1364 """)
1364 """)
1365
1365
1366 nofsauditor = interfaceutil.Attribute(
1366 nofsauditor = interfaceutil.Attribute(
1367 """A pathauditor for the working directory.
1367 """A pathauditor for the working directory.
1368
1368
1369 This is like ``auditor`` except it doesn't do filesystem checks.
1369 This is like ``auditor`` except it doesn't do filesystem checks.
1370 """)
1370 """)
1371
1371
1372 baseui = interfaceutil.Attribute(
1372 baseui = interfaceutil.Attribute(
1373 """Original ui instance passed into constructor.""")
1373 """Original ui instance passed into constructor.""")
1374
1374
1375 ui = interfaceutil.Attribute(
1375 ui = interfaceutil.Attribute(
1376 """Main ui instance for this instance.""")
1376 """Main ui instance for this instance.""")
1377
1377
1378 sharedpath = interfaceutil.Attribute(
1378 sharedpath = interfaceutil.Attribute(
1379 """Path to the .hg directory of the repo this repo was shared from.""")
1379 """Path to the .hg directory of the repo this repo was shared from.""")
1380
1380
1381 store = interfaceutil.Attribute(
1381 store = interfaceutil.Attribute(
1382 """A store instance.""")
1382 """A store instance.""")
1383
1383
1384 spath = interfaceutil.Attribute(
1384 spath = interfaceutil.Attribute(
1385 """Path to the store.""")
1385 """Path to the store.""")
1386
1386
1387 sjoin = interfaceutil.Attribute(
1387 sjoin = interfaceutil.Attribute(
1388 """Alias to self.store.join.""")
1388 """Alias to self.store.join.""")
1389
1389
1390 cachevfs = interfaceutil.Attribute(
1390 cachevfs = interfaceutil.Attribute(
1391 """A VFS used to access the cache directory.
1391 """A VFS used to access the cache directory.
1392
1392
1393 Typically .hg/cache.
1393 Typically .hg/cache.
1394 """)
1394 """)
1395
1395
1396 filteredrevcache = interfaceutil.Attribute(
1396 filteredrevcache = interfaceutil.Attribute(
1397 """Holds sets of revisions to be filtered.""")
1397 """Holds sets of revisions to be filtered.""")
1398
1398
1399 names = interfaceutil.Attribute(
1399 names = interfaceutil.Attribute(
1400 """A ``namespaces`` instance.""")
1400 """A ``namespaces`` instance.""")
1401
1401
1402 def close():
1402 def close():
1403 """Close the handle on this repository."""
1403 """Close the handle on this repository."""
1404
1404
1405 def peer():
1405 def peer():
1406 """Obtain an object conforming to the ``peer`` interface."""
1406 """Obtain an object conforming to the ``peer`` interface."""
1407
1407
1408 def unfiltered():
1408 def unfiltered():
1409 """Obtain an unfiltered/raw view of this repo."""
1409 """Obtain an unfiltered/raw view of this repo."""
1410
1410
1411 def filtered(name, visibilityexceptions=None):
1411 def filtered(name, visibilityexceptions=None):
1412 """Obtain a named view of this repository."""
1412 """Obtain a named view of this repository."""
1413
1413
1414 obsstore = interfaceutil.Attribute(
1414 obsstore = interfaceutil.Attribute(
1415 """A store of obsolescence data.""")
1415 """A store of obsolescence data.""")
1416
1416
1417 changelog = interfaceutil.Attribute(
1417 changelog = interfaceutil.Attribute(
1418 """A handle on the changelog revlog.""")
1418 """A handle on the changelog revlog.""")
1419
1419
1420 manifestlog = interfaceutil.Attribute(
1420 manifestlog = interfaceutil.Attribute(
1421 """An instance conforming to the ``imanifestlog`` interface.
1421 """An instance conforming to the ``imanifestlog`` interface.
1422
1422
1423 Provides access to manifests for the repository.
1423 Provides access to manifests for the repository.
1424 """)
1424 """)
1425
1425
1426 dirstate = interfaceutil.Attribute(
1426 dirstate = interfaceutil.Attribute(
1427 """Working directory state.""")
1427 """Working directory state.""")
1428
1428
1429 narrowpats = interfaceutil.Attribute(
1429 narrowpats = interfaceutil.Attribute(
1430 """Matcher patterns for this repository's narrowspec.""")
1430 """Matcher patterns for this repository's narrowspec.""")
1431
1431
1432 def narrowmatch():
1432 def narrowmatch():
1433 """Obtain a matcher for the narrowspec."""
1433 """Obtain a matcher for the narrowspec."""
1434
1434
1435 def setnarrowpats(newincludes, newexcludes):
1435 def setnarrowpats(newincludes, newexcludes):
1436 """Define the narrowspec for this repository."""
1436 """Define the narrowspec for this repository."""
1437
1437
1438 def __getitem__(changeid):
1438 def __getitem__(changeid):
1439 """Try to resolve a changectx."""
1439 """Try to resolve a changectx."""
1440
1440
1441 def __contains__(changeid):
1441 def __contains__(changeid):
1442 """Whether a changeset exists."""
1442 """Whether a changeset exists."""
1443
1443
1444 def __nonzero__():
1444 def __nonzero__():
1445 """Always returns True."""
1445 """Always returns True."""
1446 return True
1446 return True
1447
1447
1448 __bool__ = __nonzero__
1448 __bool__ = __nonzero__
1449
1449
1450 def __len__():
1450 def __len__():
1451 """Returns the number of changesets in the repo."""
1451 """Returns the number of changesets in the repo."""
1452
1452
1453 def __iter__():
1453 def __iter__():
1454 """Iterate over revisions in the changelog."""
1454 """Iterate over revisions in the changelog."""
1455
1455
1456 def revs(expr, *args):
1456 def revs(expr, *args):
1457 """Evaluate a revset.
1457 """Evaluate a revset.
1458
1458
1459 Emits revisions.
1459 Emits revisions.
1460 """
1460 """
1461
1461
1462 def set(expr, *args):
1462 def set(expr, *args):
1463 """Evaluate a revset.
1463 """Evaluate a revset.
1464
1464
1465 Emits changectx instances.
1465 Emits changectx instances.
1466 """
1466 """
1467
1467
1468 def anyrevs(specs, user=False, localalias=None):
1468 def anyrevs(specs, user=False, localalias=None):
1469 """Find revisions matching one of the given revsets."""
1469 """Find revisions matching one of the given revsets."""
1470
1470
1471 def url():
1471 def url():
1472 """Returns a string representing the location of this repo."""
1472 """Returns a string representing the location of this repo."""
1473
1473
1474 def hook(name, throw=False, **args):
1474 def hook(name, throw=False, **args):
1475 """Call a hook."""
1475 """Call a hook."""
1476
1476
1477 def tags():
1477 def tags():
1478 """Return a mapping of tag to node."""
1478 """Return a mapping of tag to node."""
1479
1479
1480 def tagtype(tagname):
1480 def tagtype(tagname):
1481 """Return the type of a given tag."""
1481 """Return the type of a given tag."""
1482
1482
1483 def tagslist():
1483 def tagslist():
1484 """Return a list of tags ordered by revision."""
1484 """Return a list of tags ordered by revision."""
1485
1485
1486 def nodetags(node):
1486 def nodetags(node):
1487 """Return the tags associated with a node."""
1487 """Return the tags associated with a node."""
1488
1488
1489 def nodebookmarks(node):
1489 def nodebookmarks(node):
1490 """Return the list of bookmarks pointing to the specified node."""
1490 """Return the list of bookmarks pointing to the specified node."""
1491
1491
1492 def branchmap():
1492 def branchmap():
1493 """Return a mapping of branch to heads in that branch."""
1493 """Return a mapping of branch to heads in that branch."""
1494
1494
1495 def revbranchcache():
1495 def revbranchcache():
1496 pass
1496 pass
1497
1497
1498 def branchtip(branchtip, ignoremissing=False):
1498 def branchtip(branchtip, ignoremissing=False):
1499 """Return the tip node for a given branch."""
1499 """Return the tip node for a given branch."""
1500
1500
1501 def lookup(key):
1501 def lookup(key):
1502 """Resolve the node for a revision."""
1502 """Resolve the node for a revision."""
1503
1503
1504 def lookupbranch(key):
1504 def lookupbranch(key):
1505 """Look up the branch name of the given revision or branch name."""
1505 """Look up the branch name of the given revision or branch name."""
1506
1506
1507 def known(nodes):
1507 def known(nodes):
1508 """Determine whether a series of nodes is known.
1508 """Determine whether a series of nodes is known.
1509
1509
1510 Returns a list of bools.
1510 Returns a list of bools.
1511 """
1511 """
1512
1512
1513 def local():
1513 def local():
1514 """Whether the repository is local."""
1514 """Whether the repository is local."""
1515 return True
1515 return True
1516
1516
1517 def publishing():
1517 def publishing():
1518 """Whether the repository is a publishing repository."""
1518 """Whether the repository is a publishing repository."""
1519
1519
1520 def cancopy():
1520 def cancopy():
1521 pass
1521 pass
1522
1522
1523 def shared():
1523 def shared():
1524 """The type of shared repository or None."""
1524 """The type of shared repository or None."""
1525
1525
1526 def wjoin(f, *insidef):
1526 def wjoin(f, *insidef):
1527 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1527 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1528
1528
1529 def setparents(p1, p2):
1529 def setparents(p1, p2):
1530 """Set the parent nodes of the working directory."""
1530 """Set the parent nodes of the working directory."""
1531
1531
1532 def filectx(path, changeid=None, fileid=None):
1532 def filectx(path, changeid=None, fileid=None):
1533 """Obtain a filectx for the given file revision."""
1533 """Obtain a filectx for the given file revision."""
1534
1534
1535 def getcwd():
1535 def getcwd():
1536 """Obtain the current working directory from the dirstate."""
1536 """Obtain the current working directory from the dirstate."""
1537
1537
1538 def pathto(f, cwd=None):
1538 def pathto(f, cwd=None):
1539 """Obtain the relative path to a file."""
1539 """Obtain the relative path to a file."""
1540
1540
1541 def adddatafilter(name, fltr):
1541 def adddatafilter(name, fltr):
1542 pass
1542 pass
1543
1543
1544 def wread(filename):
1544 def wread(filename):
1545 """Read a file from wvfs, using data filters."""
1545 """Read a file from wvfs, using data filters."""
1546
1546
1547 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1547 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1548 """Write data to a file in the wvfs, using data filters."""
1548 """Write data to a file in the wvfs, using data filters."""
1549
1549
1550 def wwritedata(filename, data):
1550 def wwritedata(filename, data):
1551 """Resolve data for writing to the wvfs, using data filters."""
1551 """Resolve data for writing to the wvfs, using data filters."""
1552
1552
1553 def currenttransaction():
1553 def currenttransaction():
1554 """Obtain the current transaction instance or None."""
1554 """Obtain the current transaction instance or None."""
1555
1555
1556 def transaction(desc, report=None):
1556 def transaction(desc, report=None):
1557 """Open a new transaction to write to the repository."""
1557 """Open a new transaction to write to the repository."""
1558
1558
1559 def undofiles():
1559 def undofiles():
1560 """Returns a list of (vfs, path) for files to undo transactions."""
1560 """Returns a list of (vfs, path) for files to undo transactions."""
1561
1561
1562 def recover():
1562 def recover():
1563 """Roll back an interrupted transaction."""
1563 """Roll back an interrupted transaction."""
1564
1564
1565 def rollback(dryrun=False, force=False):
1565 def rollback(dryrun=False, force=False):
1566 """Undo the last transaction.
1566 """Undo the last transaction.
1567
1567
1568 DANGEROUS.
1568 DANGEROUS.
1569 """
1569 """
1570
1570
1571 def updatecaches(tr=None, full=False):
1571 def updatecaches(tr=None, full=False):
1572 """Warm repo caches."""
1572 """Warm repo caches."""
1573
1573
1574 def invalidatecaches():
1574 def invalidatecaches():
1575 """Invalidate cached data due to the repository mutating."""
1575 """Invalidate cached data due to the repository mutating."""
1576
1576
1577 def invalidatevolatilesets():
1577 def invalidatevolatilesets():
1578 pass
1578 pass
1579
1579
1580 def invalidatedirstate():
1580 def invalidatedirstate():
1581 """Invalidate the dirstate."""
1581 """Invalidate the dirstate."""
1582
1582
1583 def invalidate(clearfilecache=False):
1583 def invalidate(clearfilecache=False):
1584 pass
1584 pass
1585
1585
1586 def invalidateall():
1586 def invalidateall():
1587 pass
1587 pass
1588
1588
1589 def lock(wait=True):
1589 def lock(wait=True):
1590 """Lock the repository store and return a lock instance."""
1590 """Lock the repository store and return a lock instance."""
1591
1591
1592 def wlock(wait=True):
1592 def wlock(wait=True):
1593 """Lock the non-store parts of the repository."""
1593 """Lock the non-store parts of the repository."""
1594
1594
1595 def currentwlock():
1595 def currentwlock():
1596 """Return the wlock if it's held or None."""
1596 """Return the wlock if it's held or None."""
1597
1597
1598 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1598 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1599 pass
1599 pass
1600
1600
1601 def commit(text='', user=None, date=None, match=None, force=False,
1601 def commit(text='', user=None, date=None, match=None, force=False,
1602 editor=False, extra=None):
1602 editor=False, extra=None):
1603 """Add a new revision to the repository."""
1603 """Add a new revision to the repository."""
1604
1604
1605 def commitctx(ctx, error=False):
1605 def commitctx(ctx, error=False):
1606 """Commit a commitctx instance to the repository."""
1606 """Commit a commitctx instance to the repository."""
1607
1607
1608 def destroying():
1608 def destroying():
1609 """Inform the repository that nodes are about to be destroyed."""
1609 """Inform the repository that nodes are about to be destroyed."""
1610
1610
1611 def destroyed():
1611 def destroyed():
1612 """Inform the repository that nodes have been destroyed."""
1612 """Inform the repository that nodes have been destroyed."""
1613
1613
1614 def status(node1='.', node2=None, match=None, ignored=False,
1614 def status(node1='.', node2=None, match=None, ignored=False,
1615 clean=False, unknown=False, listsubrepos=False):
1615 clean=False, unknown=False, listsubrepos=False):
1616 """Convenience method to call repo[x].status()."""
1616 """Convenience method to call repo[x].status()."""
1617
1617
1618 def addpostdsstatus(ps):
1618 def addpostdsstatus(ps):
1619 pass
1619 pass
1620
1620
1621 def postdsstatus():
1621 def postdsstatus():
1622 pass
1622 pass
1623
1623
1624 def clearpostdsstatus():
1624 def clearpostdsstatus():
1625 pass
1625 pass
1626
1626
1627 def heads(start=None):
1627 def heads(start=None):
1628 """Obtain list of nodes that are DAG heads."""
1628 """Obtain list of nodes that are DAG heads."""
1629
1629
1630 def branchheads(branch=None, start=None, closed=False):
1630 def branchheads(branch=None, start=None, closed=False):
1631 pass
1631 pass
1632
1632
1633 def branches(nodes):
1633 def branches(nodes):
1634 pass
1634 pass
1635
1635
1636 def between(pairs):
1636 def between(pairs):
1637 pass
1637 pass
1638
1638
1639 def checkpush(pushop):
1639 def checkpush(pushop):
1640 pass
1640 pass
1641
1641
1642 prepushoutgoinghooks = interfaceutil.Attribute(
1642 prepushoutgoinghooks = interfaceutil.Attribute(
1643 """util.hooks instance.""")
1643 """util.hooks instance.""")
1644
1644
1645 def pushkey(namespace, key, old, new):
1645 def pushkey(namespace, key, old, new):
1646 pass
1646 pass
1647
1647
1648 def listkeys(namespace):
1648 def listkeys(namespace):
1649 pass
1649 pass
1650
1650
1651 def debugwireargs(one, two, three=None, four=None, five=None):
1651 def debugwireargs(one, two, three=None, four=None, five=None):
1652 pass
1652 pass
1653
1653
1654 def savecommitmessage(text):
1654 def savecommitmessage(text):
1655 pass
1655 pass
1656
1656
1657 class completelocalrepository(ilocalrepositorymain,
1657 class completelocalrepository(ilocalrepositorymain,
1658 ilocalrepositoryfilestorage):
1658 ilocalrepositoryfilestorage):
1659 """Complete interface for a local repository."""
1659 """Complete interface for a local repository."""
1660
1661 class iwireprotocolcommandcacher(interfaceutil.Interface):
1662 """Represents a caching backend for wire protocol commands.
1663
1664 Wire protocol version 2 supports transparent caching of many commands.
1665 To leverage this caching, servers can activate objects that cache
1666 command responses. Objects handle both cache writing and reading.
1667 This interface defines how that response caching mechanism works.
1668
1669 Wire protocol version 2 commands emit a series of objects that are
1670 serialized and sent to the client. The caching layer exists between
1671 the invocation of the command function and the sending of its output
1672 objects to an output layer.
1673
1674 Instances of this interface represent a binding to a cache that
1675 can serve a response (in place of calling a command function) and/or
1676 write responses to a cache for subsequent use.
1677
1678 When a command request arrives, the following happens with regards
1679 to this interface:
1680
1681 1. The server determines whether the command request is cacheable.
1682 2. If it is, an instance of this interface is spawned.
1683 3. The cacher is activated in a context manager (``__enter__`` is called).
1684 4. A cache *key* for that request is derived. This will call the
1685 instance's ``adjustcachekeystate()`` method so the derivation
1686 can be influenced.
1687 5. The cacher is informed of the derived cache key via a call to
1688 ``setcachekey()``.
1689 6. The cacher's ``lookup()`` method is called to test for presence of
1690 the derived key in the cache.
1691 7. If ``lookup()`` returns a hit, that cached result is used in place
1692 of invoking the command function. ``__exit__`` is called and the instance
1693 is discarded.
1694 8. The command function is invoked.
1695 9. ``onobject()`` is called for each object emitted by the command
1696 function.
1697 10. After the final object is seen, ``onoutputfinished()`` is called.
1698 11. ``__exit__`` is called to signal the end of use of the instance.
1699
1700 Cache *key* derivation can be influenced by the instance.
1701
1702 Cache keys are initially derived by a deterministic representation of
1703 the command request. This includes the command name, arguments, protocol
1704 version, etc. This initial key derivation is performed by CBOR-encoding a
1705 data structure and feeding that output into a hasher.
1706
1707 Instances of this interface can influence this initial key derivation
1708 via ``adjustcachekeystate()``.
1709
1710 The instance is informed of the derived cache key via a call to
1711 ``setcachekey()``. The instance must store the key locally so it can
1712 be consulted on subsequent operations that may require it.
1713
1714 When constructed, the instance has access to a callable that can be used
1715 for encoding response objects. This callable receives as its single
1716 argument an object emitted by a command function. It returns an iterable
1717 of bytes chunks representing the encoded object. Unless the cacher is
1718 caching native Python objects in memory or has a way of reconstructing
1719 the original Python objects, implementations typically call this function
1720 to produce bytes from the output objects and then store those bytes in
1721 the cache. When it comes time to re-emit those bytes, they are wrapped
1722 in a ``wireprototypes.encodedresponse`` instance to tell the output
1723 layer that they are pre-encoded.
1724
1725 When receiving the objects emitted by the command function, instances
1726 can choose what to do with those objects. The simplest thing to do is
1727 re-emit the original objects. They will be forwarded to the output
1728 layer and will be processed as if the cacher did not exist.
1729
1730 Implementations could also choose to not emit objects - instead locally
1731 buffering objects or their encoded representation. They could then emit
1732 a single "coalesced" object when ``onoutputfinished()`` is called. In
1733 this way, the implementation would function as a filtering layer of
1734 sorts.
1735
1736 When caching objects, typically the encoded form of the object will
1737 be stored. Keep in mind that if the original object is forwarded to
1738 the output layer, it will need to be encoded there as well. For large
1739 output, this redundant encoding could add overhead. Implementations
1740 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1741 instances to avoid this overhead.
1742 """
1743 def __enter__():
1744 """Marks the instance as active.
1745
1746 Should return self.
1747 """
1748
1749 def __exit__(exctype, excvalue, exctb):
1750 """Called when cacher is no longer used.
1751
1752 This can be used by implementations to perform cleanup actions (e.g.
1753 disconnecting network sockets, aborting a partially cached response.
1754 """
1755
1756 def adjustcachekeystate(state):
1757 """Influences cache key derivation by adjusting state to derive key.
1758
1759 A dict defining the state used to derive the cache key is passed.
1760
1761 Implementations can modify this dict to record additional state that
1762 is wanted to influence key derivation.
1763
1764 Implementations are *highly* encouraged to not modify or delete
1765 existing keys.
1766 """
1767
1768 def setcachekey(key):
1769 """Record the derived cache key for this request.
1770
1771 Instances may mutate the key for internal usage, as desired. e.g.
1772 instances may wish to prepend the repo name, introduce path
1773 components for filesystem or URL addressing, etc. Behavior is up to
1774 the cache.
1775
1776 Returns a bool indicating if the request is cacheable by this
1777 instance.
1778 """
1779
1780 def lookup():
1781 """Attempt to resolve an entry in the cache.
1782
1783 The instance is instructed to look for the cache key that it was
1784 informed about via the call to ``setcachekey()``.
1785
1786 If there's no cache hit or the cacher doesn't wish to use the cached
1787 entry, ``None`` should be returned.
1788
1789 Else, a dict defining the cached result should be returned. The
1790 dict may have the following keys:
1791
1792 objs
1793 An iterable of objects that should be sent to the client. That
1794 iterable of objects is expected to be what the command function
1795 would return if invoked or an equivalent representation thereof.
1796 """
1797
1798 def onobject(obj):
1799 """Called when a new object is emitted from the command function.
1800
1801 Receives as its argument the object that was emitted from the
1802 command function.
1803
1804 This method returns an iterator of objects to forward to the output
1805 layer. The easiest implementation is a generator that just
1806 ``yield obj``.
1807 """
1808
1809 def onfinished():
1810 """Called after all objects have been emitted from the command function.
1811
1812 Implementations should return an iterator of objects to forward to
1813 the output layer.
1814
1815 This method can be a generator.
1816 """
@@ -1,369 +1,370
1 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
1 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 from __future__ import absolute_import
6 from __future__ import absolute_import
7
7
8 from .node import (
8 from .node import (
9 bin,
9 bin,
10 hex,
10 hex,
11 )
11 )
12 from .i18n import _
12 from .i18n import _
13 from .thirdparty import (
13 from .thirdparty import (
14 attr,
14 attr,
15 )
15 )
16 from . import (
16 from . import (
17 error,
17 error,
18 util,
18 util,
19 )
19 )
20 from .utils import (
20 from .utils import (
21 interfaceutil,
21 interfaceutil,
22 )
22 )
23
23
24 # Names of the SSH protocol implementations.
24 # Names of the SSH protocol implementations.
25 SSHV1 = 'ssh-v1'
25 SSHV1 = 'ssh-v1'
26 # These are advertised over the wire. Increment the counters at the end
26 # These are advertised over the wire. Increment the counters at the end
27 # to reflect BC breakages.
27 # to reflect BC breakages.
28 SSHV2 = 'exp-ssh-v2-0002'
28 SSHV2 = 'exp-ssh-v2-0002'
29 HTTP_WIREPROTO_V2 = 'exp-http-v2-0002'
29 HTTP_WIREPROTO_V2 = 'exp-http-v2-0002'
30
30
31 # All available wire protocol transports.
31 # All available wire protocol transports.
32 TRANSPORTS = {
32 TRANSPORTS = {
33 SSHV1: {
33 SSHV1: {
34 'transport': 'ssh',
34 'transport': 'ssh',
35 'version': 1,
35 'version': 1,
36 },
36 },
37 SSHV2: {
37 SSHV2: {
38 'transport': 'ssh',
38 'transport': 'ssh',
39 # TODO mark as version 2 once all commands are implemented.
39 # TODO mark as version 2 once all commands are implemented.
40 'version': 1,
40 'version': 1,
41 },
41 },
42 'http-v1': {
42 'http-v1': {
43 'transport': 'http',
43 'transport': 'http',
44 'version': 1,
44 'version': 1,
45 },
45 },
46 HTTP_WIREPROTO_V2: {
46 HTTP_WIREPROTO_V2: {
47 'transport': 'http',
47 'transport': 'http',
48 'version': 2,
48 'version': 2,
49 }
49 }
50 }
50 }
51
51
52 class bytesresponse(object):
52 class bytesresponse(object):
53 """A wire protocol response consisting of raw bytes."""
53 """A wire protocol response consisting of raw bytes."""
54 def __init__(self, data):
54 def __init__(self, data):
55 self.data = data
55 self.data = data
56
56
57 class ooberror(object):
57 class ooberror(object):
58 """wireproto reply: failure of a batch of operation
58 """wireproto reply: failure of a batch of operation
59
59
60 Something failed during a batch call. The error message is stored in
60 Something failed during a batch call. The error message is stored in
61 `self.message`.
61 `self.message`.
62 """
62 """
63 def __init__(self, message):
63 def __init__(self, message):
64 self.message = message
64 self.message = message
65
65
66 class pushres(object):
66 class pushres(object):
67 """wireproto reply: success with simple integer return
67 """wireproto reply: success with simple integer return
68
68
69 The call was successful and returned an integer contained in `self.res`.
69 The call was successful and returned an integer contained in `self.res`.
70 """
70 """
71 def __init__(self, res, output):
71 def __init__(self, res, output):
72 self.res = res
72 self.res = res
73 self.output = output
73 self.output = output
74
74
75 class pusherr(object):
75 class pusherr(object):
76 """wireproto reply: failure
76 """wireproto reply: failure
77
77
78 The call failed. The `self.res` attribute contains the error message.
78 The call failed. The `self.res` attribute contains the error message.
79 """
79 """
80 def __init__(self, res, output):
80 def __init__(self, res, output):
81 self.res = res
81 self.res = res
82 self.output = output
82 self.output = output
83
83
84 class streamres(object):
84 class streamres(object):
85 """wireproto reply: binary stream
85 """wireproto reply: binary stream
86
86
87 The call was successful and the result is a stream.
87 The call was successful and the result is a stream.
88
88
89 Accepts a generator containing chunks of data to be sent to the client.
89 Accepts a generator containing chunks of data to be sent to the client.
90
90
91 ``prefer_uncompressed`` indicates that the data is expected to be
91 ``prefer_uncompressed`` indicates that the data is expected to be
92 uncompressable and that the stream should therefore use the ``none``
92 uncompressable and that the stream should therefore use the ``none``
93 engine.
93 engine.
94 """
94 """
95 def __init__(self, gen=None, prefer_uncompressed=False):
95 def __init__(self, gen=None, prefer_uncompressed=False):
96 self.gen = gen
96 self.gen = gen
97 self.prefer_uncompressed = prefer_uncompressed
97 self.prefer_uncompressed = prefer_uncompressed
98
98
99 class streamreslegacy(object):
99 class streamreslegacy(object):
100 """wireproto reply: uncompressed binary stream
100 """wireproto reply: uncompressed binary stream
101
101
102 The call was successful and the result is a stream.
102 The call was successful and the result is a stream.
103
103
104 Accepts a generator containing chunks of data to be sent to the client.
104 Accepts a generator containing chunks of data to be sent to the client.
105
105
106 Like ``streamres``, but sends an uncompressed data for "version 1" clients
106 Like ``streamres``, but sends an uncompressed data for "version 1" clients
107 using the application/mercurial-0.1 media type.
107 using the application/mercurial-0.1 media type.
108 """
108 """
109 def __init__(self, gen=None):
109 def __init__(self, gen=None):
110 self.gen = gen
110 self.gen = gen
111
111
112 # list of nodes encoding / decoding
112 # list of nodes encoding / decoding
113 def decodelist(l, sep=' '):
113 def decodelist(l, sep=' '):
114 if l:
114 if l:
115 return [bin(v) for v in l.split(sep)]
115 return [bin(v) for v in l.split(sep)]
116 return []
116 return []
117
117
118 def encodelist(l, sep=' '):
118 def encodelist(l, sep=' '):
119 try:
119 try:
120 return sep.join(map(hex, l))
120 return sep.join(map(hex, l))
121 except TypeError:
121 except TypeError:
122 raise
122 raise
123
123
124 # batched call argument encoding
124 # batched call argument encoding
125
125
126 def escapebatcharg(plain):
126 def escapebatcharg(plain):
127 return (plain
127 return (plain
128 .replace(':', ':c')
128 .replace(':', ':c')
129 .replace(',', ':o')
129 .replace(',', ':o')
130 .replace(';', ':s')
130 .replace(';', ':s')
131 .replace('=', ':e'))
131 .replace('=', ':e'))
132
132
133 def unescapebatcharg(escaped):
133 def unescapebatcharg(escaped):
134 return (escaped
134 return (escaped
135 .replace(':e', '=')
135 .replace(':e', '=')
136 .replace(':s', ';')
136 .replace(':s', ';')
137 .replace(':o', ',')
137 .replace(':o', ',')
138 .replace(':c', ':'))
138 .replace(':c', ':'))
139
139
140 # mapping of options accepted by getbundle and their types
140 # mapping of options accepted by getbundle and their types
141 #
141 #
142 # Meant to be extended by extensions. It is extensions responsibility to ensure
142 # Meant to be extended by extensions. It is extensions responsibility to ensure
143 # such options are properly processed in exchange.getbundle.
143 # such options are properly processed in exchange.getbundle.
144 #
144 #
145 # supported types are:
145 # supported types are:
146 #
146 #
147 # :nodes: list of binary nodes
147 # :nodes: list of binary nodes
148 # :csv: list of comma-separated values
148 # :csv: list of comma-separated values
149 # :scsv: list of comma-separated values return as set
149 # :scsv: list of comma-separated values return as set
150 # :plain: string with no transformation needed.
150 # :plain: string with no transformation needed.
151 GETBUNDLE_ARGUMENTS = {
151 GETBUNDLE_ARGUMENTS = {
152 'heads': 'nodes',
152 'heads': 'nodes',
153 'bookmarks': 'boolean',
153 'bookmarks': 'boolean',
154 'common': 'nodes',
154 'common': 'nodes',
155 'obsmarkers': 'boolean',
155 'obsmarkers': 'boolean',
156 'phases': 'boolean',
156 'phases': 'boolean',
157 'bundlecaps': 'scsv',
157 'bundlecaps': 'scsv',
158 'listkeys': 'csv',
158 'listkeys': 'csv',
159 'cg': 'boolean',
159 'cg': 'boolean',
160 'cbattempted': 'boolean',
160 'cbattempted': 'boolean',
161 'stream': 'boolean',
161 'stream': 'boolean',
162 }
162 }
163
163
164 class baseprotocolhandler(interfaceutil.Interface):
164 class baseprotocolhandler(interfaceutil.Interface):
165 """Abstract base class for wire protocol handlers.
165 """Abstract base class for wire protocol handlers.
166
166
167 A wire protocol handler serves as an interface between protocol command
167 A wire protocol handler serves as an interface between protocol command
168 handlers and the wire protocol transport layer. Protocol handlers provide
168 handlers and the wire protocol transport layer. Protocol handlers provide
169 methods to read command arguments, redirect stdio for the duration of
169 methods to read command arguments, redirect stdio for the duration of
170 the request, handle response types, etc.
170 the request, handle response types, etc.
171 """
171 """
172
172
173 name = interfaceutil.Attribute(
173 name = interfaceutil.Attribute(
174 """The name of the protocol implementation.
174 """The name of the protocol implementation.
175
175
176 Used for uniquely identifying the transport type.
176 Used for uniquely identifying the transport type.
177 """)
177 """)
178
178
179 def getargs(args):
179 def getargs(args):
180 """return the value for arguments in <args>
180 """return the value for arguments in <args>
181
181
182 For version 1 transports, returns a list of values in the same
182 For version 1 transports, returns a list of values in the same
183 order they appear in ``args``. For version 2 transports, returns
183 order they appear in ``args``. For version 2 transports, returns
184 a dict mapping argument name to value.
184 a dict mapping argument name to value.
185 """
185 """
186
186
187 def getprotocaps():
187 def getprotocaps():
188 """Returns the list of protocol-level capabilities of client
188 """Returns the list of protocol-level capabilities of client
189
189
190 Returns a list of capabilities as declared by the client for
190 Returns a list of capabilities as declared by the client for
191 the current request (or connection for stateful protocol handlers)."""
191 the current request (or connection for stateful protocol handlers)."""
192
192
193 def getpayload():
193 def getpayload():
194 """Provide a generator for the raw payload.
194 """Provide a generator for the raw payload.
195
195
196 The caller is responsible for ensuring that the full payload is
196 The caller is responsible for ensuring that the full payload is
197 processed.
197 processed.
198 """
198 """
199
199
200 def mayberedirectstdio():
200 def mayberedirectstdio():
201 """Context manager to possibly redirect stdio.
201 """Context manager to possibly redirect stdio.
202
202
203 The context manager yields a file-object like object that receives
203 The context manager yields a file-object like object that receives
204 stdout and stderr output when the context manager is active. Or it
204 stdout and stderr output when the context manager is active. Or it
205 yields ``None`` if no I/O redirection occurs.
205 yields ``None`` if no I/O redirection occurs.
206
206
207 The intent of this context manager is to capture stdio output
207 The intent of this context manager is to capture stdio output
208 so it may be sent in the response. Some transports support streaming
208 so it may be sent in the response. Some transports support streaming
209 stdio to the client in real time. For these transports, stdio output
209 stdio to the client in real time. For these transports, stdio output
210 won't be captured.
210 won't be captured.
211 """
211 """
212
212
213 def client():
213 def client():
214 """Returns a string representation of this client (as bytes)."""
214 """Returns a string representation of this client (as bytes)."""
215
215
216 def addcapabilities(repo, caps):
216 def addcapabilities(repo, caps):
217 """Adds advertised capabilities specific to this protocol.
217 """Adds advertised capabilities specific to this protocol.
218
218
219 Receives the list of capabilities collected so far.
219 Receives the list of capabilities collected so far.
220
220
221 Returns a list of capabilities. The passed in argument can be returned.
221 Returns a list of capabilities. The passed in argument can be returned.
222 """
222 """
223
223
224 def checkperm(perm):
224 def checkperm(perm):
225 """Validate that the client has permissions to perform a request.
225 """Validate that the client has permissions to perform a request.
226
226
227 The argument is the permission required to proceed. If the client
227 The argument is the permission required to proceed. If the client
228 doesn't have that permission, the exception should raise or abort
228 doesn't have that permission, the exception should raise or abort
229 in a protocol specific manner.
229 in a protocol specific manner.
230 """
230 """
231
231
232 class commandentry(object):
232 class commandentry(object):
233 """Represents a declared wire protocol command."""
233 """Represents a declared wire protocol command."""
234 def __init__(self, func, args='', transports=None,
234 def __init__(self, func, args='', transports=None,
235 permission='push'):
235 permission='push', cachekeyfn=None):
236 self.func = func
236 self.func = func
237 self.args = args
237 self.args = args
238 self.transports = transports or set()
238 self.transports = transports or set()
239 self.permission = permission
239 self.permission = permission
240 self.cachekeyfn = cachekeyfn
240
241
241 def _merge(self, func, args):
242 def _merge(self, func, args):
242 """Merge this instance with an incoming 2-tuple.
243 """Merge this instance with an incoming 2-tuple.
243
244
244 This is called when a caller using the old 2-tuple API attempts
245 This is called when a caller using the old 2-tuple API attempts
245 to replace an instance. The incoming values are merged with
246 to replace an instance. The incoming values are merged with
246 data not captured by the 2-tuple and a new instance containing
247 data not captured by the 2-tuple and a new instance containing
247 the union of the two objects is returned.
248 the union of the two objects is returned.
248 """
249 """
249 return commandentry(func, args=args, transports=set(self.transports),
250 return commandentry(func, args=args, transports=set(self.transports),
250 permission=self.permission)
251 permission=self.permission)
251
252
252 # Old code treats instances as 2-tuples. So expose that interface.
253 # Old code treats instances as 2-tuples. So expose that interface.
253 def __iter__(self):
254 def __iter__(self):
254 yield self.func
255 yield self.func
255 yield self.args
256 yield self.args
256
257
257 def __getitem__(self, i):
258 def __getitem__(self, i):
258 if i == 0:
259 if i == 0:
259 return self.func
260 return self.func
260 elif i == 1:
261 elif i == 1:
261 return self.args
262 return self.args
262 else:
263 else:
263 raise IndexError('can only access elements 0 and 1')
264 raise IndexError('can only access elements 0 and 1')
264
265
265 class commanddict(dict):
266 class commanddict(dict):
266 """Container for registered wire protocol commands.
267 """Container for registered wire protocol commands.
267
268
268 It behaves like a dict. But __setitem__ is overwritten to allow silent
269 It behaves like a dict. But __setitem__ is overwritten to allow silent
269 coercion of values from 2-tuples for API compatibility.
270 coercion of values from 2-tuples for API compatibility.
270 """
271 """
271 def __setitem__(self, k, v):
272 def __setitem__(self, k, v):
272 if isinstance(v, commandentry):
273 if isinstance(v, commandentry):
273 pass
274 pass
274 # Cast 2-tuples to commandentry instances.
275 # Cast 2-tuples to commandentry instances.
275 elif isinstance(v, tuple):
276 elif isinstance(v, tuple):
276 if len(v) != 2:
277 if len(v) != 2:
277 raise ValueError('command tuples must have exactly 2 elements')
278 raise ValueError('command tuples must have exactly 2 elements')
278
279
279 # It is common for extensions to wrap wire protocol commands via
280 # It is common for extensions to wrap wire protocol commands via
280 # e.g. ``wireproto.commands[x] = (newfn, args)``. Because callers
281 # e.g. ``wireproto.commands[x] = (newfn, args)``. Because callers
281 # doing this aren't aware of the new API that uses objects to store
282 # doing this aren't aware of the new API that uses objects to store
282 # command entries, we automatically merge old state with new.
283 # command entries, we automatically merge old state with new.
283 if k in self:
284 if k in self:
284 v = self[k]._merge(v[0], v[1])
285 v = self[k]._merge(v[0], v[1])
285 else:
286 else:
286 # Use default values from @wireprotocommand.
287 # Use default values from @wireprotocommand.
287 v = commandentry(v[0], args=v[1],
288 v = commandentry(v[0], args=v[1],
288 transports=set(TRANSPORTS),
289 transports=set(TRANSPORTS),
289 permission='push')
290 permission='push')
290 else:
291 else:
291 raise ValueError('command entries must be commandentry instances '
292 raise ValueError('command entries must be commandentry instances '
292 'or 2-tuples')
293 'or 2-tuples')
293
294
294 return super(commanddict, self).__setitem__(k, v)
295 return super(commanddict, self).__setitem__(k, v)
295
296
296 def commandavailable(self, command, proto):
297 def commandavailable(self, command, proto):
297 """Determine if a command is available for the requested protocol."""
298 """Determine if a command is available for the requested protocol."""
298 assert proto.name in TRANSPORTS
299 assert proto.name in TRANSPORTS
299
300
300 entry = self.get(command)
301 entry = self.get(command)
301
302
302 if not entry:
303 if not entry:
303 return False
304 return False
304
305
305 if proto.name not in entry.transports:
306 if proto.name not in entry.transports:
306 return False
307 return False
307
308
308 return True
309 return True
309
310
310 def supportedcompengines(ui, role):
311 def supportedcompengines(ui, role):
311 """Obtain the list of supported compression engines for a request."""
312 """Obtain the list of supported compression engines for a request."""
312 assert role in (util.CLIENTROLE, util.SERVERROLE)
313 assert role in (util.CLIENTROLE, util.SERVERROLE)
313
314
314 compengines = util.compengines.supportedwireengines(role)
315 compengines = util.compengines.supportedwireengines(role)
315
316
316 # Allow config to override default list and ordering.
317 # Allow config to override default list and ordering.
317 if role == util.SERVERROLE:
318 if role == util.SERVERROLE:
318 configengines = ui.configlist('server', 'compressionengines')
319 configengines = ui.configlist('server', 'compressionengines')
319 config = 'server.compressionengines'
320 config = 'server.compressionengines'
320 else:
321 else:
321 # This is currently implemented mainly to facilitate testing. In most
322 # This is currently implemented mainly to facilitate testing. In most
322 # cases, the server should be in charge of choosing a compression engine
323 # cases, the server should be in charge of choosing a compression engine
323 # because a server has the most to lose from a sub-optimal choice. (e.g.
324 # because a server has the most to lose from a sub-optimal choice. (e.g.
324 # CPU DoS due to an expensive engine or a network DoS due to poor
325 # CPU DoS due to an expensive engine or a network DoS due to poor
325 # compression ratio).
326 # compression ratio).
326 configengines = ui.configlist('experimental',
327 configengines = ui.configlist('experimental',
327 'clientcompressionengines')
328 'clientcompressionengines')
328 config = 'experimental.clientcompressionengines'
329 config = 'experimental.clientcompressionengines'
329
330
330 # No explicit config. Filter out the ones that aren't supposed to be
331 # No explicit config. Filter out the ones that aren't supposed to be
331 # advertised and return default ordering.
332 # advertised and return default ordering.
332 if not configengines:
333 if not configengines:
333 attr = 'serverpriority' if role == util.SERVERROLE else 'clientpriority'
334 attr = 'serverpriority' if role == util.SERVERROLE else 'clientpriority'
334 return [e for e in compengines
335 return [e for e in compengines
335 if getattr(e.wireprotosupport(), attr) > 0]
336 if getattr(e.wireprotosupport(), attr) > 0]
336
337
337 # If compression engines are listed in the config, assume there is a good
338 # If compression engines are listed in the config, assume there is a good
338 # reason for it (like server operators wanting to achieve specific
339 # reason for it (like server operators wanting to achieve specific
339 # performance characteristics). So fail fast if the config references
340 # performance characteristics). So fail fast if the config references
340 # unusable compression engines.
341 # unusable compression engines.
341 validnames = set(e.name() for e in compengines)
342 validnames = set(e.name() for e in compengines)
342 invalidnames = set(e for e in configengines if e not in validnames)
343 invalidnames = set(e for e in configengines if e not in validnames)
343 if invalidnames:
344 if invalidnames:
344 raise error.Abort(_('invalid compression engine defined in %s: %s') %
345 raise error.Abort(_('invalid compression engine defined in %s: %s') %
345 (config, ', '.join(sorted(invalidnames))))
346 (config, ', '.join(sorted(invalidnames))))
346
347
347 compengines = [e for e in compengines if e.name() in configengines]
348 compengines = [e for e in compengines if e.name() in configengines]
348 compengines = sorted(compengines,
349 compengines = sorted(compengines,
349 key=lambda e: configengines.index(e.name()))
350 key=lambda e: configengines.index(e.name()))
350
351
351 if not compengines:
352 if not compengines:
352 raise error.Abort(_('%s config option does not specify any known '
353 raise error.Abort(_('%s config option does not specify any known '
353 'compression engines') % config,
354 'compression engines') % config,
354 hint=_('usable compression engines: %s') %
355 hint=_('usable compression engines: %s') %
355 ', '.sorted(validnames))
356 ', '.sorted(validnames))
356
357
357 return compengines
358 return compengines
358
359
359 @attr.s
360 @attr.s
360 class encodedresponse(object):
361 class encodedresponse(object):
361 """Represents response data that is already content encoded.
362 """Represents response data that is already content encoded.
362
363
363 Wire protocol version 2 only.
364 Wire protocol version 2 only.
364
365
365 Commands typically emit Python objects that are encoded and sent over the
366 Commands typically emit Python objects that are encoded and sent over the
366 wire. If commands emit an object of this type, the encoding step is bypassed
367 wire. If commands emit an object of this type, the encoding step is bypassed
367 and the content from this object is used instead.
368 and the content from this object is used instead.
368 """
369 """
369 data = attr.ib()
370 data = attr.ib()
@@ -1,970 +1,1129
1 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
1 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
2 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
2 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 #
3 #
4 # This software may be used and distributed according to the terms of the
4 # This software may be used and distributed according to the terms of the
5 # GNU General Public License version 2 or any later version.
5 # GNU General Public License version 2 or any later version.
6
6
7 from __future__ import absolute_import
7 from __future__ import absolute_import
8
8
9 import contextlib
9 import contextlib
10 import hashlib
10
11
11 from .i18n import _
12 from .i18n import _
12 from .node import (
13 from .node import (
13 hex,
14 hex,
14 nullid,
15 nullid,
15 )
16 )
16 from . import (
17 from . import (
17 discovery,
18 discovery,
18 encoding,
19 encoding,
19 error,
20 error,
20 narrowspec,
21 narrowspec,
21 pycompat,
22 pycompat,
22 streamclone,
23 streamclone,
23 util,
24 util,
24 wireprotoframing,
25 wireprotoframing,
25 wireprototypes,
26 wireprototypes,
26 )
27 )
27 from .utils import (
28 from .utils import (
29 cborutil,
28 interfaceutil,
30 interfaceutil,
29 stringutil,
31 stringutil,
30 )
32 )
31
33
32 FRAMINGTYPE = b'application/mercurial-exp-framing-0005'
34 FRAMINGTYPE = b'application/mercurial-exp-framing-0005'
33
35
34 HTTP_WIREPROTO_V2 = wireprototypes.HTTP_WIREPROTO_V2
36 HTTP_WIREPROTO_V2 = wireprototypes.HTTP_WIREPROTO_V2
35
37
36 COMMANDS = wireprototypes.commanddict()
38 COMMANDS = wireprototypes.commanddict()
37
39
40 # Value inserted into cache key computation function. Change the value to
41 # force new cache keys for every command request. This should be done when
42 # there is a change to how caching works, etc.
43 GLOBAL_CACHE_VERSION = 1
44
38 def handlehttpv2request(rctx, req, res, checkperm, urlparts):
45 def handlehttpv2request(rctx, req, res, checkperm, urlparts):
39 from .hgweb import common as hgwebcommon
46 from .hgweb import common as hgwebcommon
40
47
41 # URL space looks like: <permissions>/<command>, where <permission> can
48 # URL space looks like: <permissions>/<command>, where <permission> can
42 # be ``ro`` or ``rw`` to signal read-only or read-write, respectively.
49 # be ``ro`` or ``rw`` to signal read-only or read-write, respectively.
43
50
44 # Root URL does nothing meaningful... yet.
51 # Root URL does nothing meaningful... yet.
45 if not urlparts:
52 if not urlparts:
46 res.status = b'200 OK'
53 res.status = b'200 OK'
47 res.headers[b'Content-Type'] = b'text/plain'
54 res.headers[b'Content-Type'] = b'text/plain'
48 res.setbodybytes(_('HTTP version 2 API handler'))
55 res.setbodybytes(_('HTTP version 2 API handler'))
49 return
56 return
50
57
51 if len(urlparts) == 1:
58 if len(urlparts) == 1:
52 res.status = b'404 Not Found'
59 res.status = b'404 Not Found'
53 res.headers[b'Content-Type'] = b'text/plain'
60 res.headers[b'Content-Type'] = b'text/plain'
54 res.setbodybytes(_('do not know how to process %s\n') %
61 res.setbodybytes(_('do not know how to process %s\n') %
55 req.dispatchpath)
62 req.dispatchpath)
56 return
63 return
57
64
58 permission, command = urlparts[0:2]
65 permission, command = urlparts[0:2]
59
66
60 if permission not in (b'ro', b'rw'):
67 if permission not in (b'ro', b'rw'):
61 res.status = b'404 Not Found'
68 res.status = b'404 Not Found'
62 res.headers[b'Content-Type'] = b'text/plain'
69 res.headers[b'Content-Type'] = b'text/plain'
63 res.setbodybytes(_('unknown permission: %s') % permission)
70 res.setbodybytes(_('unknown permission: %s') % permission)
64 return
71 return
65
72
66 if req.method != 'POST':
73 if req.method != 'POST':
67 res.status = b'405 Method Not Allowed'
74 res.status = b'405 Method Not Allowed'
68 res.headers[b'Allow'] = b'POST'
75 res.headers[b'Allow'] = b'POST'
69 res.setbodybytes(_('commands require POST requests'))
76 res.setbodybytes(_('commands require POST requests'))
70 return
77 return
71
78
72 # At some point we'll want to use our own API instead of recycling the
79 # At some point we'll want to use our own API instead of recycling the
73 # behavior of version 1 of the wire protocol...
80 # behavior of version 1 of the wire protocol...
74 # TODO return reasonable responses - not responses that overload the
81 # TODO return reasonable responses - not responses that overload the
75 # HTTP status line message for error reporting.
82 # HTTP status line message for error reporting.
76 try:
83 try:
77 checkperm(rctx, req, 'pull' if permission == b'ro' else 'push')
84 checkperm(rctx, req, 'pull' if permission == b'ro' else 'push')
78 except hgwebcommon.ErrorResponse as e:
85 except hgwebcommon.ErrorResponse as e:
79 res.status = hgwebcommon.statusmessage(e.code, pycompat.bytestr(e))
86 res.status = hgwebcommon.statusmessage(e.code, pycompat.bytestr(e))
80 for k, v in e.headers:
87 for k, v in e.headers:
81 res.headers[k] = v
88 res.headers[k] = v
82 res.setbodybytes('permission denied')
89 res.setbodybytes('permission denied')
83 return
90 return
84
91
85 # We have a special endpoint to reflect the request back at the client.
92 # We have a special endpoint to reflect the request back at the client.
86 if command == b'debugreflect':
93 if command == b'debugreflect':
87 _processhttpv2reflectrequest(rctx.repo.ui, rctx.repo, req, res)
94 _processhttpv2reflectrequest(rctx.repo.ui, rctx.repo, req, res)
88 return
95 return
89
96
90 # Extra commands that we handle that aren't really wire protocol
97 # Extra commands that we handle that aren't really wire protocol
91 # commands. Think extra hard before making this hackery available to
98 # commands. Think extra hard before making this hackery available to
92 # extension.
99 # extension.
93 extracommands = {'multirequest'}
100 extracommands = {'multirequest'}
94
101
95 if command not in COMMANDS and command not in extracommands:
102 if command not in COMMANDS and command not in extracommands:
96 res.status = b'404 Not Found'
103 res.status = b'404 Not Found'
97 res.headers[b'Content-Type'] = b'text/plain'
104 res.headers[b'Content-Type'] = b'text/plain'
98 res.setbodybytes(_('unknown wire protocol command: %s\n') % command)
105 res.setbodybytes(_('unknown wire protocol command: %s\n') % command)
99 return
106 return
100
107
101 repo = rctx.repo
108 repo = rctx.repo
102 ui = repo.ui
109 ui = repo.ui
103
110
104 proto = httpv2protocolhandler(req, ui)
111 proto = httpv2protocolhandler(req, ui)
105
112
106 if (not COMMANDS.commandavailable(command, proto)
113 if (not COMMANDS.commandavailable(command, proto)
107 and command not in extracommands):
114 and command not in extracommands):
108 res.status = b'404 Not Found'
115 res.status = b'404 Not Found'
109 res.headers[b'Content-Type'] = b'text/plain'
116 res.headers[b'Content-Type'] = b'text/plain'
110 res.setbodybytes(_('invalid wire protocol command: %s') % command)
117 res.setbodybytes(_('invalid wire protocol command: %s') % command)
111 return
118 return
112
119
113 # TODO consider cases where proxies may add additional Accept headers.
120 # TODO consider cases where proxies may add additional Accept headers.
114 if req.headers.get(b'Accept') != FRAMINGTYPE:
121 if req.headers.get(b'Accept') != FRAMINGTYPE:
115 res.status = b'406 Not Acceptable'
122 res.status = b'406 Not Acceptable'
116 res.headers[b'Content-Type'] = b'text/plain'
123 res.headers[b'Content-Type'] = b'text/plain'
117 res.setbodybytes(_('client MUST specify Accept header with value: %s\n')
124 res.setbodybytes(_('client MUST specify Accept header with value: %s\n')
118 % FRAMINGTYPE)
125 % FRAMINGTYPE)
119 return
126 return
120
127
121 if req.headers.get(b'Content-Type') != FRAMINGTYPE:
128 if req.headers.get(b'Content-Type') != FRAMINGTYPE:
122 res.status = b'415 Unsupported Media Type'
129 res.status = b'415 Unsupported Media Type'
123 # TODO we should send a response with appropriate media type,
130 # TODO we should send a response with appropriate media type,
124 # since client does Accept it.
131 # since client does Accept it.
125 res.headers[b'Content-Type'] = b'text/plain'
132 res.headers[b'Content-Type'] = b'text/plain'
126 res.setbodybytes(_('client MUST send Content-Type header with '
133 res.setbodybytes(_('client MUST send Content-Type header with '
127 'value: %s\n') % FRAMINGTYPE)
134 'value: %s\n') % FRAMINGTYPE)
128 return
135 return
129
136
130 _processhttpv2request(ui, repo, req, res, permission, command, proto)
137 _processhttpv2request(ui, repo, req, res, permission, command, proto)
131
138
132 def _processhttpv2reflectrequest(ui, repo, req, res):
139 def _processhttpv2reflectrequest(ui, repo, req, res):
133 """Reads unified frame protocol request and dumps out state to client.
140 """Reads unified frame protocol request and dumps out state to client.
134
141
135 This special endpoint can be used to help debug the wire protocol.
142 This special endpoint can be used to help debug the wire protocol.
136
143
137 Instead of routing the request through the normal dispatch mechanism,
144 Instead of routing the request through the normal dispatch mechanism,
138 we instead read all frames, decode them, and feed them into our state
145 we instead read all frames, decode them, and feed them into our state
139 tracker. We then dump the log of all that activity back out to the
146 tracker. We then dump the log of all that activity back out to the
140 client.
147 client.
141 """
148 """
142 import json
149 import json
143
150
144 # Reflection APIs have a history of being abused, accidentally disclosing
151 # Reflection APIs have a history of being abused, accidentally disclosing
145 # sensitive data, etc. So we have a config knob.
152 # sensitive data, etc. So we have a config knob.
146 if not ui.configbool('experimental', 'web.api.debugreflect'):
153 if not ui.configbool('experimental', 'web.api.debugreflect'):
147 res.status = b'404 Not Found'
154 res.status = b'404 Not Found'
148 res.headers[b'Content-Type'] = b'text/plain'
155 res.headers[b'Content-Type'] = b'text/plain'
149 res.setbodybytes(_('debugreflect service not available'))
156 res.setbodybytes(_('debugreflect service not available'))
150 return
157 return
151
158
152 # We assume we have a unified framing protocol request body.
159 # We assume we have a unified framing protocol request body.
153
160
154 reactor = wireprotoframing.serverreactor()
161 reactor = wireprotoframing.serverreactor()
155 states = []
162 states = []
156
163
157 while True:
164 while True:
158 frame = wireprotoframing.readframe(req.bodyfh)
165 frame = wireprotoframing.readframe(req.bodyfh)
159
166
160 if not frame:
167 if not frame:
161 states.append(b'received: <no frame>')
168 states.append(b'received: <no frame>')
162 break
169 break
163
170
164 states.append(b'received: %d %d %d %s' % (frame.typeid, frame.flags,
171 states.append(b'received: %d %d %d %s' % (frame.typeid, frame.flags,
165 frame.requestid,
172 frame.requestid,
166 frame.payload))
173 frame.payload))
167
174
168 action, meta = reactor.onframerecv(frame)
175 action, meta = reactor.onframerecv(frame)
169 states.append(json.dumps((action, meta), sort_keys=True,
176 states.append(json.dumps((action, meta), sort_keys=True,
170 separators=(', ', ': ')))
177 separators=(', ', ': ')))
171
178
172 action, meta = reactor.oninputeof()
179 action, meta = reactor.oninputeof()
173 meta['action'] = action
180 meta['action'] = action
174 states.append(json.dumps(meta, sort_keys=True, separators=(', ',': ')))
181 states.append(json.dumps(meta, sort_keys=True, separators=(', ',': ')))
175
182
176 res.status = b'200 OK'
183 res.status = b'200 OK'
177 res.headers[b'Content-Type'] = b'text/plain'
184 res.headers[b'Content-Type'] = b'text/plain'
178 res.setbodybytes(b'\n'.join(states))
185 res.setbodybytes(b'\n'.join(states))
179
186
180 def _processhttpv2request(ui, repo, req, res, authedperm, reqcommand, proto):
187 def _processhttpv2request(ui, repo, req, res, authedperm, reqcommand, proto):
181 """Post-validation handler for HTTPv2 requests.
188 """Post-validation handler for HTTPv2 requests.
182
189
183 Called when the HTTP request contains unified frame-based protocol
190 Called when the HTTP request contains unified frame-based protocol
184 frames for evaluation.
191 frames for evaluation.
185 """
192 """
186 # TODO Some HTTP clients are full duplex and can receive data before
193 # TODO Some HTTP clients are full duplex and can receive data before
187 # the entire request is transmitted. Figure out a way to indicate support
194 # the entire request is transmitted. Figure out a way to indicate support
188 # for that so we can opt into full duplex mode.
195 # for that so we can opt into full duplex mode.
189 reactor = wireprotoframing.serverreactor(deferoutput=True)
196 reactor = wireprotoframing.serverreactor(deferoutput=True)
190 seencommand = False
197 seencommand = False
191
198
192 outstream = reactor.makeoutputstream()
199 outstream = reactor.makeoutputstream()
193
200
194 while True:
201 while True:
195 frame = wireprotoframing.readframe(req.bodyfh)
202 frame = wireprotoframing.readframe(req.bodyfh)
196 if not frame:
203 if not frame:
197 break
204 break
198
205
199 action, meta = reactor.onframerecv(frame)
206 action, meta = reactor.onframerecv(frame)
200
207
201 if action == 'wantframe':
208 if action == 'wantframe':
202 # Need more data before we can do anything.
209 # Need more data before we can do anything.
203 continue
210 continue
204 elif action == 'runcommand':
211 elif action == 'runcommand':
205 sentoutput = _httpv2runcommand(ui, repo, req, res, authedperm,
212 sentoutput = _httpv2runcommand(ui, repo, req, res, authedperm,
206 reqcommand, reactor, outstream,
213 reqcommand, reactor, outstream,
207 meta, issubsequent=seencommand)
214 meta, issubsequent=seencommand)
208
215
209 if sentoutput:
216 if sentoutput:
210 return
217 return
211
218
212 seencommand = True
219 seencommand = True
213
220
214 elif action == 'error':
221 elif action == 'error':
215 # TODO define proper error mechanism.
222 # TODO define proper error mechanism.
216 res.status = b'200 OK'
223 res.status = b'200 OK'
217 res.headers[b'Content-Type'] = b'text/plain'
224 res.headers[b'Content-Type'] = b'text/plain'
218 res.setbodybytes(meta['message'] + b'\n')
225 res.setbodybytes(meta['message'] + b'\n')
219 return
226 return
220 else:
227 else:
221 raise error.ProgrammingError(
228 raise error.ProgrammingError(
222 'unhandled action from frame processor: %s' % action)
229 'unhandled action from frame processor: %s' % action)
223
230
224 action, meta = reactor.oninputeof()
231 action, meta = reactor.oninputeof()
225 if action == 'sendframes':
232 if action == 'sendframes':
226 # We assume we haven't started sending the response yet. If we're
233 # We assume we haven't started sending the response yet. If we're
227 # wrong, the response type will raise an exception.
234 # wrong, the response type will raise an exception.
228 res.status = b'200 OK'
235 res.status = b'200 OK'
229 res.headers[b'Content-Type'] = FRAMINGTYPE
236 res.headers[b'Content-Type'] = FRAMINGTYPE
230 res.setbodygen(meta['framegen'])
237 res.setbodygen(meta['framegen'])
231 elif action == 'noop':
238 elif action == 'noop':
232 pass
239 pass
233 else:
240 else:
234 raise error.ProgrammingError('unhandled action from frame processor: %s'
241 raise error.ProgrammingError('unhandled action from frame processor: %s'
235 % action)
242 % action)
236
243
237 def _httpv2runcommand(ui, repo, req, res, authedperm, reqcommand, reactor,
244 def _httpv2runcommand(ui, repo, req, res, authedperm, reqcommand, reactor,
238 outstream, command, issubsequent):
245 outstream, command, issubsequent):
239 """Dispatch a wire protocol command made from HTTPv2 requests.
246 """Dispatch a wire protocol command made from HTTPv2 requests.
240
247
241 The authenticated permission (``authedperm``) along with the original
248 The authenticated permission (``authedperm``) along with the original
242 command from the URL (``reqcommand``) are passed in.
249 command from the URL (``reqcommand``) are passed in.
243 """
250 """
244 # We already validated that the session has permissions to perform the
251 # We already validated that the session has permissions to perform the
245 # actions in ``authedperm``. In the unified frame protocol, the canonical
252 # actions in ``authedperm``. In the unified frame protocol, the canonical
246 # command to run is expressed in a frame. However, the URL also requested
253 # command to run is expressed in a frame. However, the URL also requested
247 # to run a specific command. We need to be careful that the command we
254 # to run a specific command. We need to be careful that the command we
248 # run doesn't have permissions requirements greater than what was granted
255 # run doesn't have permissions requirements greater than what was granted
249 # by ``authedperm``.
256 # by ``authedperm``.
250 #
257 #
251 # Our rule for this is we only allow one command per HTTP request and
258 # Our rule for this is we only allow one command per HTTP request and
252 # that command must match the command in the URL. However, we make
259 # that command must match the command in the URL. However, we make
253 # an exception for the ``multirequest`` URL. This URL is allowed to
260 # an exception for the ``multirequest`` URL. This URL is allowed to
254 # execute multiple commands. We double check permissions of each command
261 # execute multiple commands. We double check permissions of each command
255 # as it is invoked to ensure there is no privilege escalation.
262 # as it is invoked to ensure there is no privilege escalation.
256 # TODO consider allowing multiple commands to regular command URLs
263 # TODO consider allowing multiple commands to regular command URLs
257 # iff each command is the same.
264 # iff each command is the same.
258
265
259 proto = httpv2protocolhandler(req, ui, args=command['args'])
266 proto = httpv2protocolhandler(req, ui, args=command['args'])
260
267
261 if reqcommand == b'multirequest':
268 if reqcommand == b'multirequest':
262 if not COMMANDS.commandavailable(command['command'], proto):
269 if not COMMANDS.commandavailable(command['command'], proto):
263 # TODO proper error mechanism
270 # TODO proper error mechanism
264 res.status = b'200 OK'
271 res.status = b'200 OK'
265 res.headers[b'Content-Type'] = b'text/plain'
272 res.headers[b'Content-Type'] = b'text/plain'
266 res.setbodybytes(_('wire protocol command not available: %s') %
273 res.setbodybytes(_('wire protocol command not available: %s') %
267 command['command'])
274 command['command'])
268 return True
275 return True
269
276
270 # TODO don't use assert here, since it may be elided by -O.
277 # TODO don't use assert here, since it may be elided by -O.
271 assert authedperm in (b'ro', b'rw')
278 assert authedperm in (b'ro', b'rw')
272 wirecommand = COMMANDS[command['command']]
279 wirecommand = COMMANDS[command['command']]
273 assert wirecommand.permission in ('push', 'pull')
280 assert wirecommand.permission in ('push', 'pull')
274
281
275 if authedperm == b'ro' and wirecommand.permission != 'pull':
282 if authedperm == b'ro' and wirecommand.permission != 'pull':
276 # TODO proper error mechanism
283 # TODO proper error mechanism
277 res.status = b'403 Forbidden'
284 res.status = b'403 Forbidden'
278 res.headers[b'Content-Type'] = b'text/plain'
285 res.headers[b'Content-Type'] = b'text/plain'
279 res.setbodybytes(_('insufficient permissions to execute '
286 res.setbodybytes(_('insufficient permissions to execute '
280 'command: %s') % command['command'])
287 'command: %s') % command['command'])
281 return True
288 return True
282
289
283 # TODO should we also call checkperm() here? Maybe not if we're going
290 # TODO should we also call checkperm() here? Maybe not if we're going
284 # to overhaul that API. The granted scope from the URL check should
291 # to overhaul that API. The granted scope from the URL check should
285 # be good enough.
292 # be good enough.
286
293
287 else:
294 else:
288 # Don't allow multiple commands outside of ``multirequest`` URL.
295 # Don't allow multiple commands outside of ``multirequest`` URL.
289 if issubsequent:
296 if issubsequent:
290 # TODO proper error mechanism
297 # TODO proper error mechanism
291 res.status = b'200 OK'
298 res.status = b'200 OK'
292 res.headers[b'Content-Type'] = b'text/plain'
299 res.headers[b'Content-Type'] = b'text/plain'
293 res.setbodybytes(_('multiple commands cannot be issued to this '
300 res.setbodybytes(_('multiple commands cannot be issued to this '
294 'URL'))
301 'URL'))
295 return True
302 return True
296
303
297 if reqcommand != command['command']:
304 if reqcommand != command['command']:
298 # TODO define proper error mechanism
305 # TODO define proper error mechanism
299 res.status = b'200 OK'
306 res.status = b'200 OK'
300 res.headers[b'Content-Type'] = b'text/plain'
307 res.headers[b'Content-Type'] = b'text/plain'
301 res.setbodybytes(_('command in frame must match command in URL'))
308 res.setbodybytes(_('command in frame must match command in URL'))
302 return True
309 return True
303
310
304 res.status = b'200 OK'
311 res.status = b'200 OK'
305 res.headers[b'Content-Type'] = FRAMINGTYPE
312 res.headers[b'Content-Type'] = FRAMINGTYPE
306
313
307 try:
314 try:
308 objs = dispatch(repo, proto, command['command'])
315 objs = dispatch(repo, proto, command['command'])
309
316
310 action, meta = reactor.oncommandresponsereadyobjects(
317 action, meta = reactor.oncommandresponsereadyobjects(
311 outstream, command['requestid'], objs)
318 outstream, command['requestid'], objs)
312
319
313 except error.WireprotoCommandError as e:
320 except error.WireprotoCommandError as e:
314 action, meta = reactor.oncommanderror(
321 action, meta = reactor.oncommanderror(
315 outstream, command['requestid'], e.message, e.messageargs)
322 outstream, command['requestid'], e.message, e.messageargs)
316
323
317 except Exception as e:
324 except Exception as e:
318 action, meta = reactor.onservererror(
325 action, meta = reactor.onservererror(
319 outstream, command['requestid'],
326 outstream, command['requestid'],
320 _('exception when invoking command: %s') %
327 _('exception when invoking command: %s') %
321 stringutil.forcebytestr(e))
328 stringutil.forcebytestr(e))
322
329
323 if action == 'sendframes':
330 if action == 'sendframes':
324 res.setbodygen(meta['framegen'])
331 res.setbodygen(meta['framegen'])
325 return True
332 return True
326 elif action == 'noop':
333 elif action == 'noop':
327 return False
334 return False
328 else:
335 else:
329 raise error.ProgrammingError('unhandled event from reactor: %s' %
336 raise error.ProgrammingError('unhandled event from reactor: %s' %
330 action)
337 action)
331
338
332 def getdispatchrepo(repo, proto, command):
339 def getdispatchrepo(repo, proto, command):
333 return repo.filtered('served')
340 return repo.filtered('served')
334
341
335 def dispatch(repo, proto, command):
342 def dispatch(repo, proto, command):
343 """Run a wire protocol command.
344
345 Returns an iterable of objects that will be sent to the client.
346 """
336 repo = getdispatchrepo(repo, proto, command)
347 repo = getdispatchrepo(repo, proto, command)
337
348
338 func, spec = COMMANDS[command]
349 entry = COMMANDS[command]
350 func = entry.func
351 spec = entry.args
352
339 args = proto.getargs(spec)
353 args = proto.getargs(spec)
340
354
341 return func(repo, proto, **pycompat.strkwargs(args))
355 # There is some duplicate boilerplate code here for calling the command and
356 # emitting objects. It is either that or a lot of indented code that looks
357 # like a pyramid (since there are a lot of code paths that result in not
358 # using the cacher).
359 callcommand = lambda: func(repo, proto, **pycompat.strkwargs(args))
360
361 # Request is not cacheable. Don't bother instantiating a cacher.
362 if not entry.cachekeyfn:
363 for o in callcommand():
364 yield o
365 return
366
367 cacher = makeresponsecacher(repo, proto, command, args,
368 cborutil.streamencode)
369
370 # But we have no cacher. Do default handling.
371 if not cacher:
372 for o in callcommand():
373 yield o
374 return
375
376 with cacher:
377 cachekey = entry.cachekeyfn(repo, proto, cacher, **args)
378
379 # No cache key or the cacher doesn't like it. Do default handling.
380 if cachekey is None or not cacher.setcachekey(cachekey):
381 for o in callcommand():
382 yield o
383 return
384
385 # Serve it from the cache, if possible.
386 cached = cacher.lookup()
387
388 if cached:
389 for o in cached['objs']:
390 yield o
391 return
392
393 # Else call the command and feed its output into the cacher, allowing
394 # the cacher to buffer/mutate objects as it desires.
395 for o in callcommand():
396 for o in cacher.onobject(o):
397 yield o
398
399 for o in cacher.onfinished():
400 yield o
342
401
343 @interfaceutil.implementer(wireprototypes.baseprotocolhandler)
402 @interfaceutil.implementer(wireprototypes.baseprotocolhandler)
344 class httpv2protocolhandler(object):
403 class httpv2protocolhandler(object):
345 def __init__(self, req, ui, args=None):
404 def __init__(self, req, ui, args=None):
346 self._req = req
405 self._req = req
347 self._ui = ui
406 self._ui = ui
348 self._args = args
407 self._args = args
349
408
350 @property
409 @property
351 def name(self):
410 def name(self):
352 return HTTP_WIREPROTO_V2
411 return HTTP_WIREPROTO_V2
353
412
354 def getargs(self, args):
413 def getargs(self, args):
355 # First look for args that were passed but aren't registered on this
414 # First look for args that were passed but aren't registered on this
356 # command.
415 # command.
357 extra = set(self._args) - set(args)
416 extra = set(self._args) - set(args)
358 if extra:
417 if extra:
359 raise error.WireprotoCommandError(
418 raise error.WireprotoCommandError(
360 'unsupported argument to command: %s' %
419 'unsupported argument to command: %s' %
361 ', '.join(sorted(extra)))
420 ', '.join(sorted(extra)))
362
421
363 # And look for required arguments that are missing.
422 # And look for required arguments that are missing.
364 missing = {a for a in args if args[a]['required']} - set(self._args)
423 missing = {a for a in args if args[a]['required']} - set(self._args)
365
424
366 if missing:
425 if missing:
367 raise error.WireprotoCommandError(
426 raise error.WireprotoCommandError(
368 'missing required arguments: %s' % ', '.join(sorted(missing)))
427 'missing required arguments: %s' % ', '.join(sorted(missing)))
369
428
370 # Now derive the arguments to pass to the command, taking into
429 # Now derive the arguments to pass to the command, taking into
371 # account the arguments specified by the client.
430 # account the arguments specified by the client.
372 data = {}
431 data = {}
373 for k, meta in sorted(args.items()):
432 for k, meta in sorted(args.items()):
374 # This argument wasn't passed by the client.
433 # This argument wasn't passed by the client.
375 if k not in self._args:
434 if k not in self._args:
376 data[k] = meta['default']()
435 data[k] = meta['default']()
377 continue
436 continue
378
437
379 v = self._args[k]
438 v = self._args[k]
380
439
381 # Sets may be expressed as lists. Silently normalize.
440 # Sets may be expressed as lists. Silently normalize.
382 if meta['type'] == 'set' and isinstance(v, list):
441 if meta['type'] == 'set' and isinstance(v, list):
383 v = set(v)
442 v = set(v)
384
443
385 # TODO consider more/stronger type validation.
444 # TODO consider more/stronger type validation.
386
445
387 data[k] = v
446 data[k] = v
388
447
389 return data
448 return data
390
449
391 def getprotocaps(self):
450 def getprotocaps(self):
392 # Protocol capabilities are currently not implemented for HTTP V2.
451 # Protocol capabilities are currently not implemented for HTTP V2.
393 return set()
452 return set()
394
453
395 def getpayload(self):
454 def getpayload(self):
396 raise NotImplementedError
455 raise NotImplementedError
397
456
398 @contextlib.contextmanager
457 @contextlib.contextmanager
399 def mayberedirectstdio(self):
458 def mayberedirectstdio(self):
400 raise NotImplementedError
459 raise NotImplementedError
401
460
402 def client(self):
461 def client(self):
403 raise NotImplementedError
462 raise NotImplementedError
404
463
405 def addcapabilities(self, repo, caps):
464 def addcapabilities(self, repo, caps):
406 return caps
465 return caps
407
466
408 def checkperm(self, perm):
467 def checkperm(self, perm):
409 raise NotImplementedError
468 raise NotImplementedError
410
469
411 def httpv2apidescriptor(req, repo):
470 def httpv2apidescriptor(req, repo):
412 proto = httpv2protocolhandler(req, repo.ui)
471 proto = httpv2protocolhandler(req, repo.ui)
413
472
414 return _capabilitiesv2(repo, proto)
473 return _capabilitiesv2(repo, proto)
415
474
416 def _capabilitiesv2(repo, proto):
475 def _capabilitiesv2(repo, proto):
417 """Obtain the set of capabilities for version 2 transports.
476 """Obtain the set of capabilities for version 2 transports.
418
477
419 These capabilities are distinct from the capabilities for version 1
478 These capabilities are distinct from the capabilities for version 1
420 transports.
479 transports.
421 """
480 """
422 compression = []
481 compression = []
423 for engine in wireprototypes.supportedcompengines(repo.ui, util.SERVERROLE):
482 for engine in wireprototypes.supportedcompengines(repo.ui, util.SERVERROLE):
424 compression.append({
483 compression.append({
425 b'name': engine.wireprotosupport().name,
484 b'name': engine.wireprotosupport().name,
426 })
485 })
427
486
428 caps = {
487 caps = {
429 'commands': {},
488 'commands': {},
430 'compression': compression,
489 'compression': compression,
431 'framingmediatypes': [FRAMINGTYPE],
490 'framingmediatypes': [FRAMINGTYPE],
432 'pathfilterprefixes': set(narrowspec.VALID_PREFIXES),
491 'pathfilterprefixes': set(narrowspec.VALID_PREFIXES),
433 }
492 }
434
493
435 for command, entry in COMMANDS.items():
494 for command, entry in COMMANDS.items():
436 args = {}
495 args = {}
437
496
438 for arg, meta in entry.args.items():
497 for arg, meta in entry.args.items():
439 args[arg] = {
498 args[arg] = {
440 # TODO should this be a normalized type using CBOR's
499 # TODO should this be a normalized type using CBOR's
441 # terminology?
500 # terminology?
442 b'type': meta['type'],
501 b'type': meta['type'],
443 b'required': meta['required'],
502 b'required': meta['required'],
444 }
503 }
445
504
446 if not meta['required']:
505 if not meta['required']:
447 args[arg][b'default'] = meta['default']()
506 args[arg][b'default'] = meta['default']()
448
507
449 if meta['validvalues']:
508 if meta['validvalues']:
450 args[arg][b'validvalues'] = meta['validvalues']
509 args[arg][b'validvalues'] = meta['validvalues']
451
510
452 caps['commands'][command] = {
511 caps['commands'][command] = {
453 'args': args,
512 'args': args,
454 'permissions': [entry.permission],
513 'permissions': [entry.permission],
455 }
514 }
456
515
457 if streamclone.allowservergeneration(repo):
516 if streamclone.allowservergeneration(repo):
458 caps['rawrepoformats'] = sorted(repo.requirements &
517 caps['rawrepoformats'] = sorted(repo.requirements &
459 repo.supportedformats)
518 repo.supportedformats)
460
519
461 return proto.addcapabilities(repo, caps)
520 return proto.addcapabilities(repo, caps)
462
521
463 def wireprotocommand(name, args=None, permission='push'):
522 def wireprotocommand(name, args=None, permission='push', cachekeyfn=None):
464 """Decorator to declare a wire protocol command.
523 """Decorator to declare a wire protocol command.
465
524
466 ``name`` is the name of the wire protocol command being provided.
525 ``name`` is the name of the wire protocol command being provided.
467
526
468 ``args`` is a dict defining arguments accepted by the command. Keys are
527 ``args`` is a dict defining arguments accepted by the command. Keys are
469 the argument name. Values are dicts with the following keys:
528 the argument name. Values are dicts with the following keys:
470
529
471 ``type``
530 ``type``
472 The argument data type. Must be one of the following string
531 The argument data type. Must be one of the following string
473 literals: ``bytes``, ``int``, ``list``, ``dict``, ``set``,
532 literals: ``bytes``, ``int``, ``list``, ``dict``, ``set``,
474 or ``bool``.
533 or ``bool``.
475
534
476 ``default``
535 ``default``
477 A callable returning the default value for this argument. If not
536 A callable returning the default value for this argument. If not
478 specified, ``None`` will be the default value.
537 specified, ``None`` will be the default value.
479
538
480 ``example``
539 ``example``
481 An example value for this argument.
540 An example value for this argument.
482
541
483 ``validvalues``
542 ``validvalues``
484 Set of recognized values for this argument.
543 Set of recognized values for this argument.
485
544
486 ``permission`` defines the permission type needed to run this command.
545 ``permission`` defines the permission type needed to run this command.
487 Can be ``push`` or ``pull``. These roughly map to read-write and read-only,
546 Can be ``push`` or ``pull``. These roughly map to read-write and read-only,
488 respectively. Default is to assume command requires ``push`` permissions
547 respectively. Default is to assume command requires ``push`` permissions
489 because otherwise commands not declaring their permissions could modify
548 because otherwise commands not declaring their permissions could modify
490 a repository that is supposed to be read-only.
549 a repository that is supposed to be read-only.
491
550
551 ``cachekeyfn`` defines an optional callable that can derive the
552 cache key for this request.
553
492 Wire protocol commands are generators of objects to be serialized and
554 Wire protocol commands are generators of objects to be serialized and
493 sent to the client.
555 sent to the client.
494
556
495 If a command raises an uncaught exception, this will be translated into
557 If a command raises an uncaught exception, this will be translated into
496 a command error.
558 a command error.
559
560 All commands can opt in to being cacheable by defining a function
561 (``cachekeyfn``) that is called to derive a cache key. This function
562 receives the same arguments as the command itself plus a ``cacher``
563 argument containing the active cacher for the request and returns a bytes
564 containing the key in a cache the response to this command may be cached
565 under.
497 """
566 """
498 transports = {k for k, v in wireprototypes.TRANSPORTS.items()
567 transports = {k for k, v in wireprototypes.TRANSPORTS.items()
499 if v['version'] == 2}
568 if v['version'] == 2}
500
569
501 if permission not in ('push', 'pull'):
570 if permission not in ('push', 'pull'):
502 raise error.ProgrammingError('invalid wire protocol permission; '
571 raise error.ProgrammingError('invalid wire protocol permission; '
503 'got %s; expected "push" or "pull"' %
572 'got %s; expected "push" or "pull"' %
504 permission)
573 permission)
505
574
506 if args is None:
575 if args is None:
507 args = {}
576 args = {}
508
577
509 if not isinstance(args, dict):
578 if not isinstance(args, dict):
510 raise error.ProgrammingError('arguments for version 2 commands '
579 raise error.ProgrammingError('arguments for version 2 commands '
511 'must be declared as dicts')
580 'must be declared as dicts')
512
581
513 for arg, meta in args.items():
582 for arg, meta in args.items():
514 if arg == '*':
583 if arg == '*':
515 raise error.ProgrammingError('* argument name not allowed on '
584 raise error.ProgrammingError('* argument name not allowed on '
516 'version 2 commands')
585 'version 2 commands')
517
586
518 if not isinstance(meta, dict):
587 if not isinstance(meta, dict):
519 raise error.ProgrammingError('arguments for version 2 commands '
588 raise error.ProgrammingError('arguments for version 2 commands '
520 'must declare metadata as a dict')
589 'must declare metadata as a dict')
521
590
522 if 'type' not in meta:
591 if 'type' not in meta:
523 raise error.ProgrammingError('%s argument for command %s does not '
592 raise error.ProgrammingError('%s argument for command %s does not '
524 'declare type field' % (arg, name))
593 'declare type field' % (arg, name))
525
594
526 if meta['type'] not in ('bytes', 'int', 'list', 'dict', 'set', 'bool'):
595 if meta['type'] not in ('bytes', 'int', 'list', 'dict', 'set', 'bool'):
527 raise error.ProgrammingError('%s argument for command %s has '
596 raise error.ProgrammingError('%s argument for command %s has '
528 'illegal type: %s' % (arg, name,
597 'illegal type: %s' % (arg, name,
529 meta['type']))
598 meta['type']))
530
599
531 if 'example' not in meta:
600 if 'example' not in meta:
532 raise error.ProgrammingError('%s argument for command %s does not '
601 raise error.ProgrammingError('%s argument for command %s does not '
533 'declare example field' % (arg, name))
602 'declare example field' % (arg, name))
534
603
535 meta['required'] = 'default' not in meta
604 meta['required'] = 'default' not in meta
536
605
537 meta.setdefault('default', lambda: None)
606 meta.setdefault('default', lambda: None)
538 meta.setdefault('validvalues', None)
607 meta.setdefault('validvalues', None)
539
608
540 def register(func):
609 def register(func):
541 if name in COMMANDS:
610 if name in COMMANDS:
542 raise error.ProgrammingError('%s command already registered '
611 raise error.ProgrammingError('%s command already registered '
543 'for version 2' % name)
612 'for version 2' % name)
544
613
545 COMMANDS[name] = wireprototypes.commandentry(
614 COMMANDS[name] = wireprototypes.commandentry(
546 func, args=args, transports=transports, permission=permission)
615 func, args=args, transports=transports, permission=permission,
616 cachekeyfn=cachekeyfn)
547
617
548 return func
618 return func
549
619
550 return register
620 return register
551
621
622 def makecommandcachekeyfn(command, localversion=None, allargs=False):
623 """Construct a cache key derivation function with common features.
624
625 By default, the cache key is a hash of:
626
627 * The command name.
628 * A global cache version number.
629 * A local cache version number (passed via ``localversion``).
630 * All the arguments passed to the command.
631 * The media type used.
632 * Wire protocol version string.
633 * The repository path.
634 """
635 if not allargs:
636 raise error.ProgrammingError('only allargs=True is currently supported')
637
638 if localversion is None:
639 raise error.ProgrammingError('must set localversion argument value')
640
641 def cachekeyfn(repo, proto, cacher, **args):
642 spec = COMMANDS[command]
643
644 # Commands that mutate the repo can not be cached.
645 if spec.permission == 'push':
646 return None
647
648 # TODO config option to disable caching.
649
650 # Our key derivation strategy is to construct a data structure
651 # holding everything that could influence cacheability and to hash
652 # the CBOR representation of that. Using CBOR seems like it might
653 # be overkill. However, simpler hashing mechanisms are prone to
654 # duplicate input issues. e.g. if you just concatenate two values,
655 # "foo"+"bar" is identical to "fo"+"obar". Using CBOR provides
656 # "padding" between values and prevents these problems.
657
658 # Seed the hash with various data.
659 state = {
660 # To invalidate all cache keys.
661 b'globalversion': GLOBAL_CACHE_VERSION,
662 # More granular cache key invalidation.
663 b'localversion': localversion,
664 # Cache keys are segmented by command.
665 b'command': pycompat.sysbytes(command),
666 # Throw in the media type and API version strings so changes
667 # to exchange semantics invalid cache.
668 b'mediatype': FRAMINGTYPE,
669 b'version': HTTP_WIREPROTO_V2,
670 # So same requests for different repos don't share cache keys.
671 b'repo': repo.root,
672 }
673
674 # The arguments passed to us will have already been normalized.
675 # Default values will be set, etc. This is important because it
676 # means that it doesn't matter if clients send an explicit argument
677 # or rely on the default value: it will all normalize to the same
678 # set of arguments on the server and therefore the same cache key.
679 #
680 # Arguments by their very nature must support being encoded to CBOR.
681 # And the CBOR encoder is deterministic. So we hash the arguments
682 # by feeding the CBOR of their representation into the hasher.
683 if allargs:
684 state[b'args'] = pycompat.byteskwargs(args)
685
686 cacher.adjustcachekeystate(state)
687
688 hasher = hashlib.sha1()
689 for chunk in cborutil.streamencode(state):
690 hasher.update(chunk)
691
692 return pycompat.sysbytes(hasher.hexdigest())
693
694 return cachekeyfn
695
696 def makeresponsecacher(repo, proto, command, args, objencoderfn):
697 """Construct a cacher for a cacheable command.
698
699 Returns an ``iwireprotocolcommandcacher`` instance.
700
701 Extensions can monkeypatch this function to provide custom caching
702 backends.
703 """
704 return None
705
552 @wireprotocommand('branchmap', permission='pull')
706 @wireprotocommand('branchmap', permission='pull')
553 def branchmapv2(repo, proto):
707 def branchmapv2(repo, proto):
554 yield {encoding.fromlocal(k): v
708 yield {encoding.fromlocal(k): v
555 for k, v in repo.branchmap().iteritems()}
709 for k, v in repo.branchmap().iteritems()}
556
710
557 @wireprotocommand('capabilities', permission='pull')
711 @wireprotocommand('capabilities', permission='pull')
558 def capabilitiesv2(repo, proto):
712 def capabilitiesv2(repo, proto):
559 yield _capabilitiesv2(repo, proto)
713 yield _capabilitiesv2(repo, proto)
560
714
561 @wireprotocommand(
715 @wireprotocommand(
562 'changesetdata',
716 'changesetdata',
563 args={
717 args={
564 'noderange': {
718 'noderange': {
565 'type': 'list',
719 'type': 'list',
566 'default': lambda: None,
720 'default': lambda: None,
567 'example': [[b'0123456...'], [b'abcdef...']],
721 'example': [[b'0123456...'], [b'abcdef...']],
568 },
722 },
569 'nodes': {
723 'nodes': {
570 'type': 'list',
724 'type': 'list',
571 'default': lambda: None,
725 'default': lambda: None,
572 'example': [b'0123456...'],
726 'example': [b'0123456...'],
573 },
727 },
574 'nodesdepth': {
728 'nodesdepth': {
575 'type': 'int',
729 'type': 'int',
576 'default': lambda: None,
730 'default': lambda: None,
577 'example': 10,
731 'example': 10,
578 },
732 },
579 'fields': {
733 'fields': {
580 'type': 'set',
734 'type': 'set',
581 'default': set,
735 'default': set,
582 'example': {b'parents', b'revision'},
736 'example': {b'parents', b'revision'},
583 'validvalues': {b'bookmarks', b'parents', b'phase', b'revision'},
737 'validvalues': {b'bookmarks', b'parents', b'phase', b'revision'},
584 },
738 },
585 },
739 },
586 permission='pull')
740 permission='pull')
587 def changesetdata(repo, proto, noderange, nodes, nodesdepth, fields):
741 def changesetdata(repo, proto, noderange, nodes, nodesdepth, fields):
588 # TODO look for unknown fields and abort when they can't be serviced.
742 # TODO look for unknown fields and abort when they can't be serviced.
589 # This could probably be validated by dispatcher using validvalues.
743 # This could probably be validated by dispatcher using validvalues.
590
744
591 if noderange is None and nodes is None:
745 if noderange is None and nodes is None:
592 raise error.WireprotoCommandError(
746 raise error.WireprotoCommandError(
593 'noderange or nodes must be defined')
747 'noderange or nodes must be defined')
594
748
595 if nodesdepth is not None and nodes is None:
749 if nodesdepth is not None and nodes is None:
596 raise error.WireprotoCommandError(
750 raise error.WireprotoCommandError(
597 'nodesdepth requires the nodes argument')
751 'nodesdepth requires the nodes argument')
598
752
599 if noderange is not None:
753 if noderange is not None:
600 if len(noderange) != 2:
754 if len(noderange) != 2:
601 raise error.WireprotoCommandError(
755 raise error.WireprotoCommandError(
602 'noderange must consist of 2 elements')
756 'noderange must consist of 2 elements')
603
757
604 if not noderange[1]:
758 if not noderange[1]:
605 raise error.WireprotoCommandError(
759 raise error.WireprotoCommandError(
606 'heads in noderange request cannot be empty')
760 'heads in noderange request cannot be empty')
607
761
608 cl = repo.changelog
762 cl = repo.changelog
609 hasnode = cl.hasnode
763 hasnode = cl.hasnode
610
764
611 seen = set()
765 seen = set()
612 outgoing = []
766 outgoing = []
613
767
614 if nodes is not None:
768 if nodes is not None:
615 outgoing = [n for n in nodes if hasnode(n)]
769 outgoing = [n for n in nodes if hasnode(n)]
616
770
617 if nodesdepth:
771 if nodesdepth:
618 outgoing = [cl.node(r) for r in
772 outgoing = [cl.node(r) for r in
619 repo.revs(b'ancestors(%ln, %d)', outgoing,
773 repo.revs(b'ancestors(%ln, %d)', outgoing,
620 nodesdepth - 1)]
774 nodesdepth - 1)]
621
775
622 seen |= set(outgoing)
776 seen |= set(outgoing)
623
777
624 if noderange is not None:
778 if noderange is not None:
625 if noderange[0]:
779 if noderange[0]:
626 common = [n for n in noderange[0] if hasnode(n)]
780 common = [n for n in noderange[0] if hasnode(n)]
627 else:
781 else:
628 common = [nullid]
782 common = [nullid]
629
783
630 for n in discovery.outgoing(repo, common, noderange[1]).missing:
784 for n in discovery.outgoing(repo, common, noderange[1]).missing:
631 if n not in seen:
785 if n not in seen:
632 outgoing.append(n)
786 outgoing.append(n)
633 # Don't need to add to seen here because this is the final
787 # Don't need to add to seen here because this is the final
634 # source of nodes and there should be no duplicates in this
788 # source of nodes and there should be no duplicates in this
635 # list.
789 # list.
636
790
637 seen.clear()
791 seen.clear()
638 publishing = repo.publishing()
792 publishing = repo.publishing()
639
793
640 if outgoing:
794 if outgoing:
641 repo.hook('preoutgoing', throw=True, source='serve')
795 repo.hook('preoutgoing', throw=True, source='serve')
642
796
643 yield {
797 yield {
644 b'totalitems': len(outgoing),
798 b'totalitems': len(outgoing),
645 }
799 }
646
800
647 # The phases of nodes already transferred to the client may have changed
801 # The phases of nodes already transferred to the client may have changed
648 # since the client last requested data. We send phase-only records
802 # since the client last requested data. We send phase-only records
649 # for these revisions, if requested.
803 # for these revisions, if requested.
650 if b'phase' in fields and noderange is not None:
804 if b'phase' in fields and noderange is not None:
651 # TODO skip nodes whose phase will be reflected by a node in the
805 # TODO skip nodes whose phase will be reflected by a node in the
652 # outgoing set. This is purely an optimization to reduce data
806 # outgoing set. This is purely an optimization to reduce data
653 # size.
807 # size.
654 for node in noderange[0]:
808 for node in noderange[0]:
655 yield {
809 yield {
656 b'node': node,
810 b'node': node,
657 b'phase': b'public' if publishing else repo[node].phasestr()
811 b'phase': b'public' if publishing else repo[node].phasestr()
658 }
812 }
659
813
660 nodebookmarks = {}
814 nodebookmarks = {}
661 for mark, node in repo._bookmarks.items():
815 for mark, node in repo._bookmarks.items():
662 nodebookmarks.setdefault(node, set()).add(mark)
816 nodebookmarks.setdefault(node, set()).add(mark)
663
817
664 # It is already topologically sorted by revision number.
818 # It is already topologically sorted by revision number.
665 for node in outgoing:
819 for node in outgoing:
666 d = {
820 d = {
667 b'node': node,
821 b'node': node,
668 }
822 }
669
823
670 if b'parents' in fields:
824 if b'parents' in fields:
671 d[b'parents'] = cl.parents(node)
825 d[b'parents'] = cl.parents(node)
672
826
673 if b'phase' in fields:
827 if b'phase' in fields:
674 if publishing:
828 if publishing:
675 d[b'phase'] = b'public'
829 d[b'phase'] = b'public'
676 else:
830 else:
677 ctx = repo[node]
831 ctx = repo[node]
678 d[b'phase'] = ctx.phasestr()
832 d[b'phase'] = ctx.phasestr()
679
833
680 if b'bookmarks' in fields and node in nodebookmarks:
834 if b'bookmarks' in fields and node in nodebookmarks:
681 d[b'bookmarks'] = sorted(nodebookmarks[node])
835 d[b'bookmarks'] = sorted(nodebookmarks[node])
682 del nodebookmarks[node]
836 del nodebookmarks[node]
683
837
684 followingmeta = []
838 followingmeta = []
685 followingdata = []
839 followingdata = []
686
840
687 if b'revision' in fields:
841 if b'revision' in fields:
688 revisiondata = cl.revision(node, raw=True)
842 revisiondata = cl.revision(node, raw=True)
689 followingmeta.append((b'revision', len(revisiondata)))
843 followingmeta.append((b'revision', len(revisiondata)))
690 followingdata.append(revisiondata)
844 followingdata.append(revisiondata)
691
845
692 # TODO make it possible for extensions to wrap a function or register
846 # TODO make it possible for extensions to wrap a function or register
693 # a handler to service custom fields.
847 # a handler to service custom fields.
694
848
695 if followingmeta:
849 if followingmeta:
696 d[b'fieldsfollowing'] = followingmeta
850 d[b'fieldsfollowing'] = followingmeta
697
851
698 yield d
852 yield d
699
853
700 for extra in followingdata:
854 for extra in followingdata:
701 yield extra
855 yield extra
702
856
703 # If requested, send bookmarks from nodes that didn't have revision
857 # If requested, send bookmarks from nodes that didn't have revision
704 # data sent so receiver is aware of any bookmark updates.
858 # data sent so receiver is aware of any bookmark updates.
705 if b'bookmarks' in fields:
859 if b'bookmarks' in fields:
706 for node, marks in sorted(nodebookmarks.iteritems()):
860 for node, marks in sorted(nodebookmarks.iteritems()):
707 yield {
861 yield {
708 b'node': node,
862 b'node': node,
709 b'bookmarks': sorted(marks),
863 b'bookmarks': sorted(marks),
710 }
864 }
711
865
712 class FileAccessError(Exception):
866 class FileAccessError(Exception):
713 """Represents an error accessing a specific file."""
867 """Represents an error accessing a specific file."""
714
868
715 def __init__(self, path, msg, args):
869 def __init__(self, path, msg, args):
716 self.path = path
870 self.path = path
717 self.msg = msg
871 self.msg = msg
718 self.args = args
872 self.args = args
719
873
720 def getfilestore(repo, proto, path):
874 def getfilestore(repo, proto, path):
721 """Obtain a file storage object for use with wire protocol.
875 """Obtain a file storage object for use with wire protocol.
722
876
723 Exists as a standalone function so extensions can monkeypatch to add
877 Exists as a standalone function so extensions can monkeypatch to add
724 access control.
878 access control.
725 """
879 """
726 # This seems to work even if the file doesn't exist. So catch
880 # This seems to work even if the file doesn't exist. So catch
727 # "empty" files and return an error.
881 # "empty" files and return an error.
728 fl = repo.file(path)
882 fl = repo.file(path)
729
883
730 if not len(fl):
884 if not len(fl):
731 raise FileAccessError(path, 'unknown file: %s', (path,))
885 raise FileAccessError(path, 'unknown file: %s', (path,))
732
886
733 return fl
887 return fl
734
888
735 @wireprotocommand(
889 @wireprotocommand(
736 'filedata',
890 'filedata',
737 args={
891 args={
738 'haveparents': {
892 'haveparents': {
739 'type': 'bool',
893 'type': 'bool',
740 'default': lambda: False,
894 'default': lambda: False,
741 'example': True,
895 'example': True,
742 },
896 },
743 'nodes': {
897 'nodes': {
744 'type': 'list',
898 'type': 'list',
745 'example': [b'0123456...'],
899 'example': [b'0123456...'],
746 },
900 },
747 'fields': {
901 'fields': {
748 'type': 'set',
902 'type': 'set',
749 'default': set,
903 'default': set,
750 'example': {b'parents', b'revision'},
904 'example': {b'parents', b'revision'},
751 'validvalues': {b'parents', b'revision'},
905 'validvalues': {b'parents', b'revision'},
752 },
906 },
753 'path': {
907 'path': {
754 'type': 'bytes',
908 'type': 'bytes',
755 'example': b'foo.txt',
909 'example': b'foo.txt',
756 }
910 }
757 },
911 },
758 permission='pull')
912 permission='pull',
913 # TODO censoring a file revision won't invalidate the cache.
914 # Figure out a way to take censoring into account when deriving
915 # the cache key.
916 cachekeyfn=makecommandcachekeyfn('filedata', 1, allargs=True))
759 def filedata(repo, proto, haveparents, nodes, fields, path):
917 def filedata(repo, proto, haveparents, nodes, fields, path):
760 try:
918 try:
761 # Extensions may wish to access the protocol handler.
919 # Extensions may wish to access the protocol handler.
762 store = getfilestore(repo, proto, path)
920 store = getfilestore(repo, proto, path)
763 except FileAccessError as e:
921 except FileAccessError as e:
764 raise error.WireprotoCommandError(e.msg, e.args)
922 raise error.WireprotoCommandError(e.msg, e.args)
765
923
766 # Validate requested nodes.
924 # Validate requested nodes.
767 for node in nodes:
925 for node in nodes:
768 try:
926 try:
769 store.rev(node)
927 store.rev(node)
770 except error.LookupError:
928 except error.LookupError:
771 raise error.WireprotoCommandError('unknown file node: %s',
929 raise error.WireprotoCommandError('unknown file node: %s',
772 (hex(node),))
930 (hex(node),))
773
931
774 revisions = store.emitrevisions(nodes,
932 revisions = store.emitrevisions(nodes,
775 revisiondata=b'revision' in fields,
933 revisiondata=b'revision' in fields,
776 assumehaveparentrevisions=haveparents)
934 assumehaveparentrevisions=haveparents)
777
935
778 yield {
936 yield {
779 b'totalitems': len(nodes),
937 b'totalitems': len(nodes),
780 }
938 }
781
939
782 for revision in revisions:
940 for revision in revisions:
783 d = {
941 d = {
784 b'node': revision.node,
942 b'node': revision.node,
785 }
943 }
786
944
787 if b'parents' in fields:
945 if b'parents' in fields:
788 d[b'parents'] = [revision.p1node, revision.p2node]
946 d[b'parents'] = [revision.p1node, revision.p2node]
789
947
790 followingmeta = []
948 followingmeta = []
791 followingdata = []
949 followingdata = []
792
950
793 if b'revision' in fields:
951 if b'revision' in fields:
794 if revision.revision is not None:
952 if revision.revision is not None:
795 followingmeta.append((b'revision', len(revision.revision)))
953 followingmeta.append((b'revision', len(revision.revision)))
796 followingdata.append(revision.revision)
954 followingdata.append(revision.revision)
797 else:
955 else:
798 d[b'deltabasenode'] = revision.basenode
956 d[b'deltabasenode'] = revision.basenode
799 followingmeta.append((b'delta', len(revision.delta)))
957 followingmeta.append((b'delta', len(revision.delta)))
800 followingdata.append(revision.delta)
958 followingdata.append(revision.delta)
801
959
802 if followingmeta:
960 if followingmeta:
803 d[b'fieldsfollowing'] = followingmeta
961 d[b'fieldsfollowing'] = followingmeta
804
962
805 yield d
963 yield d
806
964
807 for extra in followingdata:
965 for extra in followingdata:
808 yield extra
966 yield extra
809
967
810 @wireprotocommand(
968 @wireprotocommand(
811 'heads',
969 'heads',
812 args={
970 args={
813 'publiconly': {
971 'publiconly': {
814 'type': 'bool',
972 'type': 'bool',
815 'default': lambda: False,
973 'default': lambda: False,
816 'example': False,
974 'example': False,
817 },
975 },
818 },
976 },
819 permission='pull')
977 permission='pull')
820 def headsv2(repo, proto, publiconly):
978 def headsv2(repo, proto, publiconly):
821 if publiconly:
979 if publiconly:
822 repo = repo.filtered('immutable')
980 repo = repo.filtered('immutable')
823
981
824 yield repo.heads()
982 yield repo.heads()
825
983
826 @wireprotocommand(
984 @wireprotocommand(
827 'known',
985 'known',
828 args={
986 args={
829 'nodes': {
987 'nodes': {
830 'type': 'list',
988 'type': 'list',
831 'default': list,
989 'default': list,
832 'example': [b'deadbeef'],
990 'example': [b'deadbeef'],
833 },
991 },
834 },
992 },
835 permission='pull')
993 permission='pull')
836 def knownv2(repo, proto, nodes):
994 def knownv2(repo, proto, nodes):
837 result = b''.join(b'1' if n else b'0' for n in repo.known(nodes))
995 result = b''.join(b'1' if n else b'0' for n in repo.known(nodes))
838 yield result
996 yield result
839
997
840 @wireprotocommand(
998 @wireprotocommand(
841 'listkeys',
999 'listkeys',
842 args={
1000 args={
843 'namespace': {
1001 'namespace': {
844 'type': 'bytes',
1002 'type': 'bytes',
845 'example': b'ns',
1003 'example': b'ns',
846 },
1004 },
847 },
1005 },
848 permission='pull')
1006 permission='pull')
849 def listkeysv2(repo, proto, namespace):
1007 def listkeysv2(repo, proto, namespace):
850 keys = repo.listkeys(encoding.tolocal(namespace))
1008 keys = repo.listkeys(encoding.tolocal(namespace))
851 keys = {encoding.fromlocal(k): encoding.fromlocal(v)
1009 keys = {encoding.fromlocal(k): encoding.fromlocal(v)
852 for k, v in keys.iteritems()}
1010 for k, v in keys.iteritems()}
853
1011
854 yield keys
1012 yield keys
855
1013
856 @wireprotocommand(
1014 @wireprotocommand(
857 'lookup',
1015 'lookup',
858 args={
1016 args={
859 'key': {
1017 'key': {
860 'type': 'bytes',
1018 'type': 'bytes',
861 'example': b'foo',
1019 'example': b'foo',
862 },
1020 },
863 },
1021 },
864 permission='pull')
1022 permission='pull')
865 def lookupv2(repo, proto, key):
1023 def lookupv2(repo, proto, key):
866 key = encoding.tolocal(key)
1024 key = encoding.tolocal(key)
867
1025
868 # TODO handle exception.
1026 # TODO handle exception.
869 node = repo.lookup(key)
1027 node = repo.lookup(key)
870
1028
871 yield node
1029 yield node
872
1030
873 @wireprotocommand(
1031 @wireprotocommand(
874 'manifestdata',
1032 'manifestdata',
875 args={
1033 args={
876 'nodes': {
1034 'nodes': {
877 'type': 'list',
1035 'type': 'list',
878 'example': [b'0123456...'],
1036 'example': [b'0123456...'],
879 },
1037 },
880 'haveparents': {
1038 'haveparents': {
881 'type': 'bool',
1039 'type': 'bool',
882 'default': lambda: False,
1040 'default': lambda: False,
883 'example': True,
1041 'example': True,
884 },
1042 },
885 'fields': {
1043 'fields': {
886 'type': 'set',
1044 'type': 'set',
887 'default': set,
1045 'default': set,
888 'example': {b'parents', b'revision'},
1046 'example': {b'parents', b'revision'},
889 'validvalues': {b'parents', b'revision'},
1047 'validvalues': {b'parents', b'revision'},
890 },
1048 },
891 'tree': {
1049 'tree': {
892 'type': 'bytes',
1050 'type': 'bytes',
893 'example': b'',
1051 'example': b'',
894 },
1052 },
895 },
1053 },
896 permission='pull')
1054 permission='pull',
1055 cachekeyfn=makecommandcachekeyfn('manifestdata', 1, allargs=True))
897 def manifestdata(repo, proto, haveparents, nodes, fields, tree):
1056 def manifestdata(repo, proto, haveparents, nodes, fields, tree):
898 store = repo.manifestlog.getstorage(tree)
1057 store = repo.manifestlog.getstorage(tree)
899
1058
900 # Validate the node is known and abort on unknown revisions.
1059 # Validate the node is known and abort on unknown revisions.
901 for node in nodes:
1060 for node in nodes:
902 try:
1061 try:
903 store.rev(node)
1062 store.rev(node)
904 except error.LookupError:
1063 except error.LookupError:
905 raise error.WireprotoCommandError(
1064 raise error.WireprotoCommandError(
906 'unknown node: %s', (node,))
1065 'unknown node: %s', (node,))
907
1066
908 revisions = store.emitrevisions(nodes,
1067 revisions = store.emitrevisions(nodes,
909 revisiondata=b'revision' in fields,
1068 revisiondata=b'revision' in fields,
910 assumehaveparentrevisions=haveparents)
1069 assumehaveparentrevisions=haveparents)
911
1070
912 yield {
1071 yield {
913 b'totalitems': len(nodes),
1072 b'totalitems': len(nodes),
914 }
1073 }
915
1074
916 for revision in revisions:
1075 for revision in revisions:
917 d = {
1076 d = {
918 b'node': revision.node,
1077 b'node': revision.node,
919 }
1078 }
920
1079
921 if b'parents' in fields:
1080 if b'parents' in fields:
922 d[b'parents'] = [revision.p1node, revision.p2node]
1081 d[b'parents'] = [revision.p1node, revision.p2node]
923
1082
924 followingmeta = []
1083 followingmeta = []
925 followingdata = []
1084 followingdata = []
926
1085
927 if b'revision' in fields:
1086 if b'revision' in fields:
928 if revision.revision is not None:
1087 if revision.revision is not None:
929 followingmeta.append((b'revision', len(revision.revision)))
1088 followingmeta.append((b'revision', len(revision.revision)))
930 followingdata.append(revision.revision)
1089 followingdata.append(revision.revision)
931 else:
1090 else:
932 d[b'deltabasenode'] = revision.basenode
1091 d[b'deltabasenode'] = revision.basenode
933 followingmeta.append((b'delta', len(revision.delta)))
1092 followingmeta.append((b'delta', len(revision.delta)))
934 followingdata.append(revision.delta)
1093 followingdata.append(revision.delta)
935
1094
936 if followingmeta:
1095 if followingmeta:
937 d[b'fieldsfollowing'] = followingmeta
1096 d[b'fieldsfollowing'] = followingmeta
938
1097
939 yield d
1098 yield d
940
1099
941 for extra in followingdata:
1100 for extra in followingdata:
942 yield extra
1101 yield extra
943
1102
944 @wireprotocommand(
1103 @wireprotocommand(
945 'pushkey',
1104 'pushkey',
946 args={
1105 args={
947 'namespace': {
1106 'namespace': {
948 'type': 'bytes',
1107 'type': 'bytes',
949 'example': b'ns',
1108 'example': b'ns',
950 },
1109 },
951 'key': {
1110 'key': {
952 'type': 'bytes',
1111 'type': 'bytes',
953 'example': b'key',
1112 'example': b'key',
954 },
1113 },
955 'old': {
1114 'old': {
956 'type': 'bytes',
1115 'type': 'bytes',
957 'example': b'old',
1116 'example': b'old',
958 },
1117 },
959 'new': {
1118 'new': {
960 'type': 'bytes',
1119 'type': 'bytes',
961 'example': 'new',
1120 'example': 'new',
962 },
1121 },
963 },
1122 },
964 permission='push')
1123 permission='push')
965 def pushkeyv2(repo, proto, namespace, key, old, new):
1124 def pushkeyv2(repo, proto, namespace, key, old, new):
966 # TODO handle ui output redirection
1125 # TODO handle ui output redirection
967 yield repo.pushkey(encoding.tolocal(namespace),
1126 yield repo.pushkey(encoding.tolocal(namespace),
968 encoding.tolocal(key),
1127 encoding.tolocal(key),
969 encoding.tolocal(old),
1128 encoding.tolocal(old),
970 encoding.tolocal(new))
1129 encoding.tolocal(new))
General Comments 0
You need to be logged in to leave comments. Login now