##// END OF EJS Templates
wireprotov2: define and implement "changesetdata" command...
Gregory Szorc -
r39666:9c2c77c7 default
parent child Browse files
Show More
@@ -0,0 +1,486
1 $ . $TESTDIR/wireprotohelpers.sh
2
3 $ hg init server
4 $ enablehttpv2 server
5 $ cd server
6 $ echo a0 > a
7 $ echo b0 > b
8
9 $ hg -q commit -A -m 'commit 0'
10
11 $ echo a1 > a
12 $ echo b1 > b
13 $ hg commit -m 'commit 1'
14 $ echo b2 > b
15 $ hg commit -m 'commit 2'
16
17 $ hg -q up -r 0
18 $ echo a2 > a
19 $ hg commit -m 'commit 3'
20 created new head
21
22 $ hg log -G -T '{rev}:{node} {desc}\n'
23 @ 3:eae5f82c2e622368d27daecb76b7e393d0f24211 commit 3
24 |
25 | o 2:0bb8ad894a15b15380b2a2a5b183e20f2a4b28dd commit 2
26 | |
27 | o 1:7592917e1c3e82677cb0a4bc715ca25dd12d28c1 commit 1
28 |/
29 o 0:3390ef850073fbc2f0dfff2244342c8e9229013a commit 0
30
31
32 $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
33 $ cat hg.pid > $DAEMON_PIDS
34
35 No arguments is an invalid request
36
37 $ sendhttpv2peer << EOF
38 > command changesetdata
39 > EOF
40 creating http peer for wire protocol version 2
41 sending changesetdata command
42 s> POST /api/exp-http-v2-0001/ro/changesetdata HTTP/1.1\r\n
43 s> Accept-Encoding: identity\r\n
44 s> accept: application/mercurial-exp-framing-0005\r\n
45 s> content-type: application/mercurial-exp-framing-0005\r\n
46 s> content-length: 28\r\n
47 s> host: $LOCALIP:$HGPORT\r\n (glob)
48 s> user-agent: Mercurial debugwireproto\r\n
49 s> \r\n
50 s> \x14\x00\x00\x01\x00\x01\x01\x11\xa1DnameMchangesetdata
51 s> makefile('rb', None)
52 s> HTTP/1.1 200 OK\r\n
53 s> Server: testing stub value\r\n
54 s> Date: $HTTP_DATE$\r\n
55 s> Content-Type: application/mercurial-exp-framing-0005\r\n
56 s> Transfer-Encoding: chunked\r\n
57 s> \r\n
58 s> 49\r\n
59 s> A\x00\x00\x01\x00\x02\x012
60 s> \xa2Eerror\xa1GmessageX"noderange or nodes must be definedFstatusEerror
61 s> \r\n
62 received frame(size=65; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=eos)
63 s> 0\r\n
64 s> \r\n
65 abort: noderange or nodes must be defined!
66 [255]
67
68 Empty noderange heads results in an error
69
70 $ sendhttpv2peer << EOF
71 > command changesetdata
72 > noderange eval:[[],[]]
73 > EOF
74 creating http peer for wire protocol version 2
75 sending changesetdata command
76 s> POST /api/exp-http-v2-0001/ro/changesetdata HTTP/1.1\r\n
77 s> Accept-Encoding: identity\r\n
78 s> accept: application/mercurial-exp-framing-0005\r\n
79 s> content-type: application/mercurial-exp-framing-0005\r\n
80 s> content-length: 47\r\n
81 s> host: $LOCALIP:$HGPORT\r\n (glob)
82 s> user-agent: Mercurial debugwireproto\r\n
83 s> \r\n
84 s> \'\x00\x00\x01\x00\x01\x01\x11\xa2Dargs\xa1Inoderange\x82\x80\x80DnameMchangesetdata
85 s> makefile('rb', None)
86 s> HTTP/1.1 200 OK\r\n
87 s> Server: testing stub value\r\n
88 s> Date: $HTTP_DATE$\r\n
89 s> Content-Type: application/mercurial-exp-framing-0005\r\n
90 s> Transfer-Encoding: chunked\r\n
91 s> \r\n
92 s> 51\r\n
93 s> I\x00\x00\x01\x00\x02\x012
94 s> \xa2Eerror\xa1GmessageX*heads in noderange request cannot be emptyFstatusEerror
95 s> \r\n
96 received frame(size=73; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=eos)
97 s> 0\r\n
98 s> \r\n
99 abort: heads in noderange request cannot be empty!
100 [255]
101
102 Sending just noderange heads sends all revisions
103
104 $ sendhttpv2peer << EOF
105 > command changesetdata
106 > noderange eval:[[], [b'\x0b\xb8\xad\x89\x4a\x15\xb1\x53\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f\x2a\x4b\x28\xdd', b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11']]
107 > EOF
108 creating http peer for wire protocol version 2
109 sending changesetdata command
110 s> POST /api/exp-http-v2-0001/ro/changesetdata HTTP/1.1\r\n
111 s> Accept-Encoding: identity\r\n
112 s> accept: application/mercurial-exp-framing-0005\r\n
113 s> content-type: application/mercurial-exp-framing-0005\r\n
114 s> content-length: 89\r\n
115 s> host: $LOCALIP:$HGPORT\r\n (glob)
116 s> user-agent: Mercurial debugwireproto\r\n
117 s> \r\n
118 s> Q\x00\x00\x01\x00\x01\x01\x11\xa2Dargs\xa1Inoderange\x82\x80\x82T\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xddT\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11DnameMchangesetdata
119 s> makefile('rb', None)
120 s> HTTP/1.1 200 OK\r\n
121 s> Server: testing stub value\r\n
122 s> Date: $HTTP_DATE$\r\n
123 s> Content-Type: application/mercurial-exp-framing-0005\r\n
124 s> Transfer-Encoding: chunked\r\n
125 s> \r\n
126 s> 13\r\n
127 s> \x0b\x00\x00\x01\x00\x02\x011
128 s> \xa1FstatusBok
129 s> \r\n
130 received frame(size=11; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=continuation)
131 s> 81\r\n
132 s> y\x00\x00\x01\x00\x02\x001
133 s> \xa1Jtotalitems\x04\xa1DnodeT3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:\xa1DnodeTu\x92\x91~\x1c>\x82g|\xb0\xa4\xbcq\\\xa2]\xd1-(\xc1\xa1DnodeT\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd\xa1DnodeT\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11
134 s> \r\n
135 received frame(size=121; request=1; stream=2; streamflags=; type=command-response; flags=continuation)
136 s> 8\r\n
137 s> \x00\x00\x00\x01\x00\x02\x002
138 s> \r\n
139 s> 0\r\n
140 s> \r\n
141 received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
142 response: gen[
143 {
144 b'totalitems': 4
145 },
146 {
147 b'node': b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:'
148 },
149 {
150 b'node': b'u\x92\x91~\x1c>\x82g|\xb0\xa4\xbcq\\\xa2]\xd1-(\xc1'
151 },
152 {
153 b'node': b'\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd'
154 },
155 {
156 b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11'
157 }
158 ]
159
160 Sending root nodes limits what data is sent
161
162 $ sendhttpv2peer << EOF
163 > command changesetdata
164 > noderange eval:[[b'\x33\x90\xef\x85\x00\x73\xfb\xc2\xf0\xdf\xff\x22\x44\x34\x2c\x8e\x92\x29\x01\x3a'], [b'\x0b\xb8\xad\x89\x4a\x15\xb1\x53\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f\x2a\x4b\x28\xdd']]
165 > EOF
166 creating http peer for wire protocol version 2
167 sending changesetdata command
168 s> POST /api/exp-http-v2-0001/ro/changesetdata HTTP/1.1\r\n
169 s> Accept-Encoding: identity\r\n
170 s> accept: application/mercurial-exp-framing-0005\r\n
171 s> content-type: application/mercurial-exp-framing-0005\r\n
172 s> content-length: 89\r\n
173 s> host: $LOCALIP:$HGPORT\r\n (glob)
174 s> user-agent: Mercurial debugwireproto\r\n
175 s> \r\n
176 s> Q\x00\x00\x01\x00\x01\x01\x11\xa2Dargs\xa1Inoderange\x82\x81T3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:\x81T\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xddDnameMchangesetdata
177 s> makefile('rb', None)
178 s> HTTP/1.1 200 OK\r\n
179 s> Server: testing stub value\r\n
180 s> Date: $HTTP_DATE$\r\n
181 s> Content-Type: application/mercurial-exp-framing-0005\r\n
182 s> Transfer-Encoding: chunked\r\n
183 s> \r\n
184 s> 13\r\n
185 s> \x0b\x00\x00\x01\x00\x02\x011
186 s> \xa1FstatusBok
187 s> \r\n
188 received frame(size=11; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=continuation)
189 s> 4b\r\n
190 s> C\x00\x00\x01\x00\x02\x001
191 s> \xa1Jtotalitems\x02\xa1DnodeTu\x92\x91~\x1c>\x82g|\xb0\xa4\xbcq\\\xa2]\xd1-(\xc1\xa1DnodeT\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd
192 s> \r\n
193 received frame(size=67; request=1; stream=2; streamflags=; type=command-response; flags=continuation)
194 s> 8\r\n
195 s> \x00\x00\x00\x01\x00\x02\x002
196 s> \r\n
197 s> 0\r\n
198 s> \r\n
199 received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
200 response: gen[
201 {
202 b'totalitems': 2
203 },
204 {
205 b'node': b'u\x92\x91~\x1c>\x82g|\xb0\xa4\xbcq\\\xa2]\xd1-(\xc1'
206 },
207 {
208 b'node': b'\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd'
209 }
210 ]
211
212 Requesting data on a single node by node works
213
214 $ sendhttpv2peer << EOF
215 > command changesetdata
216 > nodes eval:[b'\x33\x90\xef\x85\x00\x73\xfb\xc2\xf0\xdf\xff\x22\x44\x34\x2c\x8e\x92\x29\x01\x3a']
217 > EOF
218 creating http peer for wire protocol version 2
219 sending changesetdata command
220 s> POST /api/exp-http-v2-0001/ro/changesetdata HTTP/1.1\r\n
221 s> Accept-Encoding: identity\r\n
222 s> accept: application/mercurial-exp-framing-0005\r\n
223 s> content-type: application/mercurial-exp-framing-0005\r\n
224 s> content-length: 62\r\n
225 s> host: $LOCALIP:$HGPORT\r\n (glob)
226 s> user-agent: Mercurial debugwireproto\r\n
227 s> \r\n
228 s> 6\x00\x00\x01\x00\x01\x01\x11\xa2Dargs\xa1Enodes\x81T3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:DnameMchangesetdata
229 s> makefile('rb', None)
230 s> HTTP/1.1 200 OK\r\n
231 s> Server: testing stub value\r\n
232 s> Date: $HTTP_DATE$\r\n
233 s> Content-Type: application/mercurial-exp-framing-0005\r\n
234 s> Transfer-Encoding: chunked\r\n
235 s> \r\n
236 s> 13\r\n
237 s> \x0b\x00\x00\x01\x00\x02\x011
238 s> \xa1FstatusBok
239 s> \r\n
240 received frame(size=11; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=continuation)
241 s> 30\r\n
242 s> (\x00\x00\x01\x00\x02\x001
243 s> \xa1Jtotalitems\x01\xa1DnodeT3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:
244 s> \r\n
245 received frame(size=40; request=1; stream=2; streamflags=; type=command-response; flags=continuation)
246 s> 8\r\n
247 s> \x00\x00\x00\x01\x00\x02\x002
248 s> \r\n
249 s> 0\r\n
250 s> \r\n
251 received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
252 response: gen[
253 {
254 b'totalitems': 1
255 },
256 {
257 b'node': b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:'
258 }
259 ]
260
261 Specifying a noderange and nodes takes union
262
263 $ sendhttpv2peer << EOF
264 > command changesetdata
265 > noderange eval:[[b'\x75\x92\x91\x7e\x1c\x3e\x82\x67\x7c\xb0\xa4\xbc\x71\x5c\xa2\x5d\xd1\x2d\x28\xc1'], [b'\x0b\xb8\xad\x89\x4a\x15\xb1\x53\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f\x2a\x4b\x28\xdd']]
266 > nodes eval:[b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11']
267 > EOF
268 creating http peer for wire protocol version 2
269 sending changesetdata command
270 s> POST /api/exp-http-v2-0001/ro/changesetdata HTTP/1.1\r\n
271 s> Accept-Encoding: identity\r\n
272 s> accept: application/mercurial-exp-framing-0005\r\n
273 s> content-type: application/mercurial-exp-framing-0005\r\n
274 s> content-length: 117\r\n
275 s> host: $LOCALIP:$HGPORT\r\n (glob)
276 s> user-agent: Mercurial debugwireproto\r\n
277 s> \r\n
278 s> m\x00\x00\x01\x00\x01\x01\x11\xa2Dargs\xa2Inoderange\x82\x81Tu\x92\x91~\x1c>\x82g|\xb0\xa4\xbcq\\\xa2]\xd1-(\xc1\x81T\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xddEnodes\x81T\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11DnameMchangesetdata
279 s> makefile('rb', None)
280 s> HTTP/1.1 200 OK\r\n
281 s> Server: testing stub value\r\n
282 s> Date: $HTTP_DATE$\r\n
283 s> Content-Type: application/mercurial-exp-framing-0005\r\n
284 s> Transfer-Encoding: chunked\r\n
285 s> \r\n
286 s> 13\r\n
287 s> \x0b\x00\x00\x01\x00\x02\x011
288 s> \xa1FstatusBok
289 s> \r\n
290 received frame(size=11; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=continuation)
291 s> 4b\r\n
292 s> C\x00\x00\x01\x00\x02\x001
293 s> \xa1Jtotalitems\x02\xa1DnodeT\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11\xa1DnodeT\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd
294 s> \r\n
295 received frame(size=67; request=1; stream=2; streamflags=; type=command-response; flags=continuation)
296 s> 8\r\n
297 s> \x00\x00\x00\x01\x00\x02\x002
298 s> \r\n
299 s> 0\r\n
300 s> \r\n
301 received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
302 response: gen[
303 {
304 b'totalitems': 2
305 },
306 {
307 b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11'
308 },
309 {
310 b'node': b'\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd'
311 }
312 ]
313
314 Parents data is transferred upon request
315
316 $ sendhttpv2peer << EOF
317 > command changesetdata
318 > fields eval:[b'parents']
319 > nodes eval:[b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11']
320 > EOF
321 creating http peer for wire protocol version 2
322 sending changesetdata command
323 s> POST /api/exp-http-v2-0001/ro/changesetdata HTTP/1.1\r\n
324 s> Accept-Encoding: identity\r\n
325 s> accept: application/mercurial-exp-framing-0005\r\n
326 s> content-type: application/mercurial-exp-framing-0005\r\n
327 s> content-length: 78\r\n
328 s> host: $LOCALIP:$HGPORT\r\n (glob)
329 s> user-agent: Mercurial debugwireproto\r\n
330 s> \r\n
331 s> F\x00\x00\x01\x00\x01\x01\x11\xa2Dargs\xa2Ffields\x81GparentsEnodes\x81T\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11DnameMchangesetdata
332 s> makefile('rb', None)
333 s> HTTP/1.1 200 OK\r\n
334 s> Server: testing stub value\r\n
335 s> Date: $HTTP_DATE$\r\n
336 s> Content-Type: application/mercurial-exp-framing-0005\r\n
337 s> Transfer-Encoding: chunked\r\n
338 s> \r\n
339 s> 13\r\n
340 s> \x0b\x00\x00\x01\x00\x02\x011
341 s> \xa1FstatusBok
342 s> \r\n
343 received frame(size=11; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=continuation)
344 s> 63\r\n
345 s> [\x00\x00\x01\x00\x02\x001
346 s> \xa1Jtotalitems\x01\xa2DnodeT\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11Gparents\x82T3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00
347 s> \r\n
348 received frame(size=91; request=1; stream=2; streamflags=; type=command-response; flags=continuation)
349 s> 8\r\n
350 s> \x00\x00\x00\x01\x00\x02\x002
351 s> \r\n
352 s> 0\r\n
353 s> \r\n
354 received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
355 response: gen[
356 {
357 b'totalitems': 1
358 },
359 {
360 b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11',
361 b'parents': [
362 b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:',
363 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
364 ]
365 }
366 ]
367
368 Revision data is transferred upon request
369
370 $ sendhttpv2peer << EOF
371 > command changesetdata
372 > fields eval:[b'revision']
373 > nodes eval:[b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11']
374 > EOF
375 creating http peer for wire protocol version 2
376 sending changesetdata command
377 s> POST /api/exp-http-v2-0001/ro/changesetdata HTTP/1.1\r\n
378 s> Accept-Encoding: identity\r\n
379 s> accept: application/mercurial-exp-framing-0005\r\n
380 s> content-type: application/mercurial-exp-framing-0005\r\n
381 s> content-length: 79\r\n
382 s> host: $LOCALIP:$HGPORT\r\n (glob)
383 s> user-agent: Mercurial debugwireproto\r\n
384 s> \r\n
385 s> G\x00\x00\x01\x00\x01\x01\x11\xa2Dargs\xa2Ffields\x81HrevisionEnodes\x81T\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11DnameMchangesetdata
386 s> makefile('rb', None)
387 s> HTTP/1.1 200 OK\r\n
388 s> Server: testing stub value\r\n
389 s> Date: $HTTP_DATE$\r\n
390 s> Content-Type: application/mercurial-exp-framing-0005\r\n
391 s> Transfer-Encoding: chunked\r\n
392 s> \r\n
393 s> 13\r\n
394 s> \x0b\x00\x00\x01\x00\x02\x011
395 s> \xa1FstatusBok
396 s> \r\n
397 received frame(size=11; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=continuation)
398 s> 7e\r\n
399 s> v\x00\x00\x01\x00\x02\x001
400 s> \xa1Jtotalitems\x01\xa2DnodeT\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11Lrevisionsize\x18=X=1b74476799ec8318045db759b1b4bcc9b839d0aa\n
401 s> test\n
402 s> 0 0\n
403 s> a\n
404 s> \n
405 s> commit 3
406 s> \r\n
407 received frame(size=118; request=1; stream=2; streamflags=; type=command-response; flags=continuation)
408 s> 8\r\n
409 s> \x00\x00\x00\x01\x00\x02\x002
410 s> \r\n
411 s> 0\r\n
412 s> \r\n
413 received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
414 response: gen[
415 {
416 b'totalitems': 1
417 },
418 {
419 b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11',
420 b'revisionsize': 61
421 },
422 b'1b74476799ec8318045db759b1b4bcc9b839d0aa\ntest\n0 0\na\n\ncommit 3'
423 ]
424
425 Multiple fields can be transferred
426
427 $ sendhttpv2peer << EOF
428 > command changesetdata
429 > fields eval:[b'parents', b'revision']
430 > nodes eval:[b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11']
431 > EOF
432 creating http peer for wire protocol version 2
433 sending changesetdata command
434 s> POST /api/exp-http-v2-0001/ro/changesetdata HTTP/1.1\r\n
435 s> Accept-Encoding: identity\r\n
436 s> accept: application/mercurial-exp-framing-0005\r\n
437 s> content-type: application/mercurial-exp-framing-0005\r\n
438 s> content-length: 87\r\n
439 s> host: $LOCALIP:$HGPORT\r\n (glob)
440 s> user-agent: Mercurial debugwireproto\r\n
441 s> \r\n
442 s> O\x00\x00\x01\x00\x01\x01\x11\xa2Dargs\xa2Ffields\x82GparentsHrevisionEnodes\x81T\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11DnameMchangesetdata
443 s> makefile('rb', None)
444 s> HTTP/1.1 200 OK\r\n
445 s> Server: testing stub value\r\n
446 s> Date: $HTTP_DATE$\r\n
447 s> Content-Type: application/mercurial-exp-framing-0005\r\n
448 s> Transfer-Encoding: chunked\r\n
449 s> \r\n
450 s> 13\r\n
451 s> \x0b\x00\x00\x01\x00\x02\x011
452 s> \xa1FstatusBok
453 s> \r\n
454 received frame(size=11; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=continuation)
455 s> b1\r\n
456 s> \xa9\x00\x00\x01\x00\x02\x001
457 s> \xa1Jtotalitems\x01\xa3DnodeT\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11Gparents\x82T3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00Lrevisionsize\x18=X=1b74476799ec8318045db759b1b4bcc9b839d0aa\n
458 s> test\n
459 s> 0 0\n
460 s> a\n
461 s> \n
462 s> commit 3
463 s> \r\n
464 received frame(size=169; request=1; stream=2; streamflags=; type=command-response; flags=continuation)
465 s> 8\r\n
466 s> \x00\x00\x00\x01\x00\x02\x002
467 s> \r\n
468 s> 0\r\n
469 s> \r\n
470 received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
471 response: gen[
472 {
473 b'totalitems': 1
474 },
475 {
476 b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11',
477 b'parents': [
478 b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:',
479 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
480 ],
481 b'revisionsize': 61
482 },
483 b'1b74476799ec8318045db759b1b4bcc9b839d0aa\ntest\n0 0\na\n\ncommit 3'
484 ]
485
486 $ cat error.log
@@ -1,2649 +1,2649
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import hashlib
12 12
13 13 from .i18n import _
14 14 from .node import (
15 15 bin,
16 16 hex,
17 17 nullid,
18 18 nullrev,
19 19 )
20 20 from .thirdparty import (
21 21 attr,
22 22 )
23 23 from . import (
24 24 bookmarks as bookmod,
25 25 bundle2,
26 26 changegroup,
27 27 discovery,
28 28 error,
29 29 exchangev2,
30 30 lock as lockmod,
31 31 logexchange,
32 32 narrowspec,
33 33 obsolete,
34 34 phases,
35 35 pushkey,
36 36 pycompat,
37 37 repository,
38 38 scmutil,
39 39 sslutil,
40 40 streamclone,
41 41 url as urlmod,
42 42 util,
43 43 )
44 44 from .utils import (
45 45 stringutil,
46 46 )
47 47
48 48 urlerr = util.urlerr
49 49 urlreq = util.urlreq
50 50
51 51 _NARROWACL_SECTION = 'narrowhgacl'
52 52
53 53 # Maps bundle version human names to changegroup versions.
54 54 _bundlespeccgversions = {'v1': '01',
55 55 'v2': '02',
56 56 'packed1': 's1',
57 57 'bundle2': '02', #legacy
58 58 }
59 59
60 60 # Maps bundle version with content opts to choose which part to bundle
61 61 _bundlespeccontentopts = {
62 62 'v1': {
63 63 'changegroup': True,
64 64 'cg.version': '01',
65 65 'obsolescence': False,
66 66 'phases': False,
67 67 'tagsfnodescache': False,
68 68 'revbranchcache': False
69 69 },
70 70 'v2': {
71 71 'changegroup': True,
72 72 'cg.version': '02',
73 73 'obsolescence': False,
74 74 'phases': False,
75 75 'tagsfnodescache': True,
76 76 'revbranchcache': True
77 77 },
78 78 'packed1' : {
79 79 'cg.version': 's1'
80 80 }
81 81 }
82 82 _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
83 83
84 84 _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
85 85 "tagsfnodescache": False,
86 86 "revbranchcache": False}}
87 87
88 88 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
89 89 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
90 90
91 91 @attr.s
92 92 class bundlespec(object):
93 93 compression = attr.ib()
94 94 wirecompression = attr.ib()
95 95 version = attr.ib()
96 96 wireversion = attr.ib()
97 97 params = attr.ib()
98 98 contentopts = attr.ib()
99 99
100 100 def parsebundlespec(repo, spec, strict=True):
101 101 """Parse a bundle string specification into parts.
102 102
103 103 Bundle specifications denote a well-defined bundle/exchange format.
104 104 The content of a given specification should not change over time in
105 105 order to ensure that bundles produced by a newer version of Mercurial are
106 106 readable from an older version.
107 107
108 108 The string currently has the form:
109 109
110 110 <compression>-<type>[;<parameter0>[;<parameter1>]]
111 111
112 112 Where <compression> is one of the supported compression formats
113 113 and <type> is (currently) a version string. A ";" can follow the type and
114 114 all text afterwards is interpreted as URI encoded, ";" delimited key=value
115 115 pairs.
116 116
117 117 If ``strict`` is True (the default) <compression> is required. Otherwise,
118 118 it is optional.
119 119
120 120 Returns a bundlespec object of (compression, version, parameters).
121 121 Compression will be ``None`` if not in strict mode and a compression isn't
122 122 defined.
123 123
124 124 An ``InvalidBundleSpecification`` is raised when the specification is
125 125 not syntactically well formed.
126 126
127 127 An ``UnsupportedBundleSpecification`` is raised when the compression or
128 128 bundle type/version is not recognized.
129 129
130 130 Note: this function will likely eventually return a more complex data
131 131 structure, including bundle2 part information.
132 132 """
133 133 def parseparams(s):
134 134 if ';' not in s:
135 135 return s, {}
136 136
137 137 params = {}
138 138 version, paramstr = s.split(';', 1)
139 139
140 140 for p in paramstr.split(';'):
141 141 if '=' not in p:
142 142 raise error.InvalidBundleSpecification(
143 143 _('invalid bundle specification: '
144 144 'missing "=" in parameter: %s') % p)
145 145
146 146 key, value = p.split('=', 1)
147 147 key = urlreq.unquote(key)
148 148 value = urlreq.unquote(value)
149 149 params[key] = value
150 150
151 151 return version, params
152 152
153 153
154 154 if strict and '-' not in spec:
155 155 raise error.InvalidBundleSpecification(
156 156 _('invalid bundle specification; '
157 157 'must be prefixed with compression: %s') % spec)
158 158
159 159 if '-' in spec:
160 160 compression, version = spec.split('-', 1)
161 161
162 162 if compression not in util.compengines.supportedbundlenames:
163 163 raise error.UnsupportedBundleSpecification(
164 164 _('%s compression is not supported') % compression)
165 165
166 166 version, params = parseparams(version)
167 167
168 168 if version not in _bundlespeccgversions:
169 169 raise error.UnsupportedBundleSpecification(
170 170 _('%s is not a recognized bundle version') % version)
171 171 else:
172 172 # Value could be just the compression or just the version, in which
173 173 # case some defaults are assumed (but only when not in strict mode).
174 174 assert not strict
175 175
176 176 spec, params = parseparams(spec)
177 177
178 178 if spec in util.compengines.supportedbundlenames:
179 179 compression = spec
180 180 version = 'v1'
181 181 # Generaldelta repos require v2.
182 182 if 'generaldelta' in repo.requirements:
183 183 version = 'v2'
184 184 # Modern compression engines require v2.
185 185 if compression not in _bundlespecv1compengines:
186 186 version = 'v2'
187 187 elif spec in _bundlespeccgversions:
188 188 if spec == 'packed1':
189 189 compression = 'none'
190 190 else:
191 191 compression = 'bzip2'
192 192 version = spec
193 193 else:
194 194 raise error.UnsupportedBundleSpecification(
195 195 _('%s is not a recognized bundle specification') % spec)
196 196
197 197 # Bundle version 1 only supports a known set of compression engines.
198 198 if version == 'v1' and compression not in _bundlespecv1compengines:
199 199 raise error.UnsupportedBundleSpecification(
200 200 _('compression engine %s is not supported on v1 bundles') %
201 201 compression)
202 202
203 203 # The specification for packed1 can optionally declare the data formats
204 204 # required to apply it. If we see this metadata, compare against what the
205 205 # repo supports and error if the bundle isn't compatible.
206 206 if version == 'packed1' and 'requirements' in params:
207 207 requirements = set(params['requirements'].split(','))
208 208 missingreqs = requirements - repo.supportedformats
209 209 if missingreqs:
210 210 raise error.UnsupportedBundleSpecification(
211 211 _('missing support for repository features: %s') %
212 212 ', '.join(sorted(missingreqs)))
213 213
214 214 # Compute contentopts based on the version
215 215 contentopts = _bundlespeccontentopts.get(version, {}).copy()
216 216
217 217 # Process the variants
218 218 if "stream" in params and params["stream"] == "v2":
219 219 variant = _bundlespecvariants["streamv2"]
220 220 contentopts.update(variant)
221 221
222 222 engine = util.compengines.forbundlename(compression)
223 223 compression, wirecompression = engine.bundletype()
224 224 wireversion = _bundlespeccgversions[version]
225 225
226 226 return bundlespec(compression, wirecompression, version, wireversion,
227 227 params, contentopts)
228 228
229 229 def readbundle(ui, fh, fname, vfs=None):
230 230 header = changegroup.readexactly(fh, 4)
231 231
232 232 alg = None
233 233 if not fname:
234 234 fname = "stream"
235 235 if not header.startswith('HG') and header.startswith('\0'):
236 236 fh = changegroup.headerlessfixup(fh, header)
237 237 header = "HG10"
238 238 alg = 'UN'
239 239 elif vfs:
240 240 fname = vfs.join(fname)
241 241
242 242 magic, version = header[0:2], header[2:4]
243 243
244 244 if magic != 'HG':
245 245 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
246 246 if version == '10':
247 247 if alg is None:
248 248 alg = changegroup.readexactly(fh, 2)
249 249 return changegroup.cg1unpacker(fh, alg)
250 250 elif version.startswith('2'):
251 251 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
252 252 elif version == 'S1':
253 253 return streamclone.streamcloneapplier(fh)
254 254 else:
255 255 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
256 256
257 257 def getbundlespec(ui, fh):
258 258 """Infer the bundlespec from a bundle file handle.
259 259
260 260 The input file handle is seeked and the original seek position is not
261 261 restored.
262 262 """
263 263 def speccompression(alg):
264 264 try:
265 265 return util.compengines.forbundletype(alg).bundletype()[0]
266 266 except KeyError:
267 267 return None
268 268
269 269 b = readbundle(ui, fh, None)
270 270 if isinstance(b, changegroup.cg1unpacker):
271 271 alg = b._type
272 272 if alg == '_truncatedBZ':
273 273 alg = 'BZ'
274 274 comp = speccompression(alg)
275 275 if not comp:
276 276 raise error.Abort(_('unknown compression algorithm: %s') % alg)
277 277 return '%s-v1' % comp
278 278 elif isinstance(b, bundle2.unbundle20):
279 279 if 'Compression' in b.params:
280 280 comp = speccompression(b.params['Compression'])
281 281 if not comp:
282 282 raise error.Abort(_('unknown compression algorithm: %s') % comp)
283 283 else:
284 284 comp = 'none'
285 285
286 286 version = None
287 287 for part in b.iterparts():
288 288 if part.type == 'changegroup':
289 289 version = part.params['version']
290 290 if version in ('01', '02'):
291 291 version = 'v2'
292 292 else:
293 293 raise error.Abort(_('changegroup version %s does not have '
294 294 'a known bundlespec') % version,
295 295 hint=_('try upgrading your Mercurial '
296 296 'client'))
297 297 elif part.type == 'stream2' and version is None:
298 298 # A stream2 part requires to be part of a v2 bundle
299 299 version = "v2"
300 300 requirements = urlreq.unquote(part.params['requirements'])
301 301 splitted = requirements.split()
302 302 params = bundle2._formatrequirementsparams(splitted)
303 303 return 'none-v2;stream=v2;%s' % params
304 304
305 305 if not version:
306 306 raise error.Abort(_('could not identify changegroup version in '
307 307 'bundle'))
308 308
309 309 return '%s-%s' % (comp, version)
310 310 elif isinstance(b, streamclone.streamcloneapplier):
311 311 requirements = streamclone.readbundle1header(fh)[2]
312 312 formatted = bundle2._formatrequirementsparams(requirements)
313 313 return 'none-packed1;%s' % formatted
314 314 else:
315 315 raise error.Abort(_('unknown bundle type: %s') % b)
316 316
317 317 def _computeoutgoing(repo, heads, common):
318 318 """Computes which revs are outgoing given a set of common
319 319 and a set of heads.
320 320
321 321 This is a separate function so extensions can have access to
322 322 the logic.
323 323
324 324 Returns a discovery.outgoing object.
325 325 """
326 326 cl = repo.changelog
327 327 if common:
328 328 hasnode = cl.hasnode
329 329 common = [n for n in common if hasnode(n)]
330 330 else:
331 331 common = [nullid]
332 332 if not heads:
333 333 heads = cl.heads()
334 334 return discovery.outgoing(repo, common, heads)
335 335
336 336 def _forcebundle1(op):
337 337 """return true if a pull/push must use bundle1
338 338
339 339 This function is used to allow testing of the older bundle version"""
340 340 ui = op.repo.ui
341 341 # The goal is this config is to allow developer to choose the bundle
342 342 # version used during exchanged. This is especially handy during test.
343 343 # Value is a list of bundle version to be picked from, highest version
344 344 # should be used.
345 345 #
346 346 # developer config: devel.legacy.exchange
347 347 exchange = ui.configlist('devel', 'legacy.exchange')
348 348 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
349 349 return forcebundle1 or not op.remote.capable('bundle2')
350 350
351 351 class pushoperation(object):
352 352 """A object that represent a single push operation
353 353
354 354 Its purpose is to carry push related state and very common operations.
355 355
356 356 A new pushoperation should be created at the beginning of each push and
357 357 discarded afterward.
358 358 """
359 359
360 360 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
361 361 bookmarks=(), pushvars=None):
362 362 # repo we push from
363 363 self.repo = repo
364 364 self.ui = repo.ui
365 365 # repo we push to
366 366 self.remote = remote
367 367 # force option provided
368 368 self.force = force
369 369 # revs to be pushed (None is "all")
370 370 self.revs = revs
371 371 # bookmark explicitly pushed
372 372 self.bookmarks = bookmarks
373 373 # allow push of new branch
374 374 self.newbranch = newbranch
375 375 # step already performed
376 376 # (used to check what steps have been already performed through bundle2)
377 377 self.stepsdone = set()
378 378 # Integer version of the changegroup push result
379 379 # - None means nothing to push
380 380 # - 0 means HTTP error
381 381 # - 1 means we pushed and remote head count is unchanged *or*
382 382 # we have outgoing changesets but refused to push
383 383 # - other values as described by addchangegroup()
384 384 self.cgresult = None
385 385 # Boolean value for the bookmark push
386 386 self.bkresult = None
387 387 # discover.outgoing object (contains common and outgoing data)
388 388 self.outgoing = None
389 389 # all remote topological heads before the push
390 390 self.remoteheads = None
391 391 # Details of the remote branch pre and post push
392 392 #
393 393 # mapping: {'branch': ([remoteheads],
394 394 # [newheads],
395 395 # [unsyncedheads],
396 396 # [discardedheads])}
397 397 # - branch: the branch name
398 398 # - remoteheads: the list of remote heads known locally
399 399 # None if the branch is new
400 400 # - newheads: the new remote heads (known locally) with outgoing pushed
401 401 # - unsyncedheads: the list of remote heads unknown locally.
402 402 # - discardedheads: the list of remote heads made obsolete by the push
403 403 self.pushbranchmap = None
404 404 # testable as a boolean indicating if any nodes are missing locally.
405 405 self.incoming = None
406 406 # summary of the remote phase situation
407 407 self.remotephases = None
408 408 # phases changes that must be pushed along side the changesets
409 409 self.outdatedphases = None
410 410 # phases changes that must be pushed if changeset push fails
411 411 self.fallbackoutdatedphases = None
412 412 # outgoing obsmarkers
413 413 self.outobsmarkers = set()
414 414 # outgoing bookmarks
415 415 self.outbookmarks = []
416 416 # transaction manager
417 417 self.trmanager = None
418 418 # map { pushkey partid -> callback handling failure}
419 419 # used to handle exception from mandatory pushkey part failure
420 420 self.pkfailcb = {}
421 421 # an iterable of pushvars or None
422 422 self.pushvars = pushvars
423 423
424 424 @util.propertycache
425 425 def futureheads(self):
426 426 """future remote heads if the changeset push succeeds"""
427 427 return self.outgoing.missingheads
428 428
429 429 @util.propertycache
430 430 def fallbackheads(self):
431 431 """future remote heads if the changeset push fails"""
432 432 if self.revs is None:
433 433 # not target to push, all common are relevant
434 434 return self.outgoing.commonheads
435 435 unfi = self.repo.unfiltered()
436 436 # I want cheads = heads(::missingheads and ::commonheads)
437 437 # (missingheads is revs with secret changeset filtered out)
438 438 #
439 439 # This can be expressed as:
440 440 # cheads = ( (missingheads and ::commonheads)
441 441 # + (commonheads and ::missingheads))"
442 442 # )
443 443 #
444 444 # while trying to push we already computed the following:
445 445 # common = (::commonheads)
446 446 # missing = ((commonheads::missingheads) - commonheads)
447 447 #
448 448 # We can pick:
449 449 # * missingheads part of common (::commonheads)
450 450 common = self.outgoing.common
451 451 nm = self.repo.changelog.nodemap
452 452 cheads = [node for node in self.revs if nm[node] in common]
453 453 # and
454 454 # * commonheads parents on missing
455 455 revset = unfi.set('%ln and parents(roots(%ln))',
456 456 self.outgoing.commonheads,
457 457 self.outgoing.missing)
458 458 cheads.extend(c.node() for c in revset)
459 459 return cheads
460 460
461 461 @property
462 462 def commonheads(self):
463 463 """set of all common heads after changeset bundle push"""
464 464 if self.cgresult:
465 465 return self.futureheads
466 466 else:
467 467 return self.fallbackheads
468 468
469 469 # mapping of message used when pushing bookmark
470 470 bookmsgmap = {'update': (_("updating bookmark %s\n"),
471 471 _('updating bookmark %s failed!\n')),
472 472 'export': (_("exporting bookmark %s\n"),
473 473 _('exporting bookmark %s failed!\n')),
474 474 'delete': (_("deleting remote bookmark %s\n"),
475 475 _('deleting remote bookmark %s failed!\n')),
476 476 }
477 477
478 478
479 479 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
480 480 opargs=None):
481 481 '''Push outgoing changesets (limited by revs) from a local
482 482 repository to remote. Return an integer:
483 483 - None means nothing to push
484 484 - 0 means HTTP error
485 485 - 1 means we pushed and remote head count is unchanged *or*
486 486 we have outgoing changesets but refused to push
487 487 - other values as described by addchangegroup()
488 488 '''
489 489 if opargs is None:
490 490 opargs = {}
491 491 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
492 492 **pycompat.strkwargs(opargs))
493 493 if pushop.remote.local():
494 494 missing = (set(pushop.repo.requirements)
495 495 - pushop.remote.local().supported)
496 496 if missing:
497 497 msg = _("required features are not"
498 498 " supported in the destination:"
499 499 " %s") % (', '.join(sorted(missing)))
500 500 raise error.Abort(msg)
501 501
502 502 if not pushop.remote.canpush():
503 503 raise error.Abort(_("destination does not support push"))
504 504
505 505 if not pushop.remote.capable('unbundle'):
506 506 raise error.Abort(_('cannot push: destination does not support the '
507 507 'unbundle wire protocol command'))
508 508
509 509 # get lock as we might write phase data
510 510 wlock = lock = None
511 511 try:
512 512 # bundle2 push may receive a reply bundle touching bookmarks or other
513 513 # things requiring the wlock. Take it now to ensure proper ordering.
514 514 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
515 515 if (not _forcebundle1(pushop)) and maypushback:
516 516 wlock = pushop.repo.wlock()
517 517 lock = pushop.repo.lock()
518 518 pushop.trmanager = transactionmanager(pushop.repo,
519 519 'push-response',
520 520 pushop.remote.url())
521 521 except error.LockUnavailable as err:
522 522 # source repo cannot be locked.
523 523 # We do not abort the push, but just disable the local phase
524 524 # synchronisation.
525 525 msg = 'cannot lock source repository: %s\n' % err
526 526 pushop.ui.debug(msg)
527 527
528 528 with wlock or util.nullcontextmanager(), \
529 529 lock or util.nullcontextmanager(), \
530 530 pushop.trmanager or util.nullcontextmanager():
531 531 pushop.repo.checkpush(pushop)
532 532 _pushdiscovery(pushop)
533 533 if not _forcebundle1(pushop):
534 534 _pushbundle2(pushop)
535 535 _pushchangeset(pushop)
536 536 _pushsyncphase(pushop)
537 537 _pushobsolete(pushop)
538 538 _pushbookmark(pushop)
539 539
540 540 if repo.ui.configbool('experimental', 'remotenames'):
541 541 logexchange.pullremotenames(repo, remote)
542 542
543 543 return pushop
544 544
545 545 # list of steps to perform discovery before push
546 546 pushdiscoveryorder = []
547 547
548 548 # Mapping between step name and function
549 549 #
550 550 # This exists to help extensions wrap steps if necessary
551 551 pushdiscoverymapping = {}
552 552
553 553 def pushdiscovery(stepname):
554 554 """decorator for function performing discovery before push
555 555
556 556 The function is added to the step -> function mapping and appended to the
557 557 list of steps. Beware that decorated function will be added in order (this
558 558 may matter).
559 559
560 560 You can only use this decorator for a new step, if you want to wrap a step
561 561 from an extension, change the pushdiscovery dictionary directly."""
562 562 def dec(func):
563 563 assert stepname not in pushdiscoverymapping
564 564 pushdiscoverymapping[stepname] = func
565 565 pushdiscoveryorder.append(stepname)
566 566 return func
567 567 return dec
568 568
569 569 def _pushdiscovery(pushop):
570 570 """Run all discovery steps"""
571 571 for stepname in pushdiscoveryorder:
572 572 step = pushdiscoverymapping[stepname]
573 573 step(pushop)
574 574
575 575 @pushdiscovery('changeset')
576 576 def _pushdiscoverychangeset(pushop):
577 577 """discover the changeset that need to be pushed"""
578 578 fci = discovery.findcommonincoming
579 579 if pushop.revs:
580 580 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
581 581 ancestorsof=pushop.revs)
582 582 else:
583 583 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
584 584 common, inc, remoteheads = commoninc
585 585 fco = discovery.findcommonoutgoing
586 586 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
587 587 commoninc=commoninc, force=pushop.force)
588 588 pushop.outgoing = outgoing
589 589 pushop.remoteheads = remoteheads
590 590 pushop.incoming = inc
591 591
592 592 @pushdiscovery('phase')
593 593 def _pushdiscoveryphase(pushop):
594 594 """discover the phase that needs to be pushed
595 595
596 596 (computed for both success and failure case for changesets push)"""
597 597 outgoing = pushop.outgoing
598 598 unfi = pushop.repo.unfiltered()
599 599 remotephases = listkeys(pushop.remote, 'phases')
600 600
601 601 if (pushop.ui.configbool('ui', '_usedassubrepo')
602 602 and remotephases # server supports phases
603 603 and not pushop.outgoing.missing # no changesets to be pushed
604 604 and remotephases.get('publishing', False)):
605 605 # When:
606 606 # - this is a subrepo push
607 607 # - and remote support phase
608 608 # - and no changeset are to be pushed
609 609 # - and remote is publishing
610 610 # We may be in issue 3781 case!
611 611 # We drop the possible phase synchronisation done by
612 612 # courtesy to publish changesets possibly locally draft
613 613 # on the remote.
614 614 pushop.outdatedphases = []
615 615 pushop.fallbackoutdatedphases = []
616 616 return
617 617
618 618 pushop.remotephases = phases.remotephasessummary(pushop.repo,
619 619 pushop.fallbackheads,
620 620 remotephases)
621 621 droots = pushop.remotephases.draftroots
622 622
623 623 extracond = ''
624 624 if not pushop.remotephases.publishing:
625 625 extracond = ' and public()'
626 626 revset = 'heads((%%ln::%%ln) %s)' % extracond
627 627 # Get the list of all revs draft on remote by public here.
628 628 # XXX Beware that revset break if droots is not strictly
629 629 # XXX root we may want to ensure it is but it is costly
630 630 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
631 631 if not outgoing.missing:
632 632 future = fallback
633 633 else:
634 634 # adds changeset we are going to push as draft
635 635 #
636 636 # should not be necessary for publishing server, but because of an
637 637 # issue fixed in xxxxx we have to do it anyway.
638 638 fdroots = list(unfi.set('roots(%ln + %ln::)',
639 639 outgoing.missing, droots))
640 640 fdroots = [f.node() for f in fdroots]
641 641 future = list(unfi.set(revset, fdroots, pushop.futureheads))
642 642 pushop.outdatedphases = future
643 643 pushop.fallbackoutdatedphases = fallback
644 644
645 645 @pushdiscovery('obsmarker')
646 646 def _pushdiscoveryobsmarkers(pushop):
647 647 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
648 648 return
649 649
650 650 if not pushop.repo.obsstore:
651 651 return
652 652
653 653 if 'obsolete' not in listkeys(pushop.remote, 'namespaces'):
654 654 return
655 655
656 656 repo = pushop.repo
657 657 # very naive computation, that can be quite expensive on big repo.
658 658 # However: evolution is currently slow on them anyway.
659 659 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
660 660 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
661 661
662 662 @pushdiscovery('bookmarks')
663 663 def _pushdiscoverybookmarks(pushop):
664 664 ui = pushop.ui
665 665 repo = pushop.repo.unfiltered()
666 666 remote = pushop.remote
667 667 ui.debug("checking for updated bookmarks\n")
668 668 ancestors = ()
669 669 if pushop.revs:
670 670 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
671 671 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
672 672
673 673 remotebookmark = listkeys(remote, 'bookmarks')
674 674
675 675 explicit = set([repo._bookmarks.expandname(bookmark)
676 676 for bookmark in pushop.bookmarks])
677 677
678 678 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
679 679 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
680 680
681 681 def safehex(x):
682 682 if x is None:
683 683 return x
684 684 return hex(x)
685 685
686 686 def hexifycompbookmarks(bookmarks):
687 687 return [(b, safehex(scid), safehex(dcid))
688 688 for (b, scid, dcid) in bookmarks]
689 689
690 690 comp = [hexifycompbookmarks(marks) for marks in comp]
691 691 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
692 692
693 693 def _processcompared(pushop, pushed, explicit, remotebms, comp):
694 694 """take decision on bookmark to pull from the remote bookmark
695 695
696 696 Exist to help extensions who want to alter this behavior.
697 697 """
698 698 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
699 699
700 700 repo = pushop.repo
701 701
702 702 for b, scid, dcid in advsrc:
703 703 if b in explicit:
704 704 explicit.remove(b)
705 705 if not pushed or repo[scid].rev() in pushed:
706 706 pushop.outbookmarks.append((b, dcid, scid))
707 707 # search added bookmark
708 708 for b, scid, dcid in addsrc:
709 709 if b in explicit:
710 710 explicit.remove(b)
711 711 pushop.outbookmarks.append((b, '', scid))
712 712 # search for overwritten bookmark
713 713 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
714 714 if b in explicit:
715 715 explicit.remove(b)
716 716 pushop.outbookmarks.append((b, dcid, scid))
717 717 # search for bookmark to delete
718 718 for b, scid, dcid in adddst:
719 719 if b in explicit:
720 720 explicit.remove(b)
721 721 # treat as "deleted locally"
722 722 pushop.outbookmarks.append((b, dcid, ''))
723 723 # identical bookmarks shouldn't get reported
724 724 for b, scid, dcid in same:
725 725 if b in explicit:
726 726 explicit.remove(b)
727 727
728 728 if explicit:
729 729 explicit = sorted(explicit)
730 730 # we should probably list all of them
731 731 pushop.ui.warn(_('bookmark %s does not exist on the local '
732 732 'or remote repository!\n') % explicit[0])
733 733 pushop.bkresult = 2
734 734
735 735 pushop.outbookmarks.sort()
736 736
737 737 def _pushcheckoutgoing(pushop):
738 738 outgoing = pushop.outgoing
739 739 unfi = pushop.repo.unfiltered()
740 740 if not outgoing.missing:
741 741 # nothing to push
742 742 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
743 743 return False
744 744 # something to push
745 745 if not pushop.force:
746 746 # if repo.obsstore == False --> no obsolete
747 747 # then, save the iteration
748 748 if unfi.obsstore:
749 749 # this message are here for 80 char limit reason
750 750 mso = _("push includes obsolete changeset: %s!")
751 751 mspd = _("push includes phase-divergent changeset: %s!")
752 752 mscd = _("push includes content-divergent changeset: %s!")
753 753 mst = {"orphan": _("push includes orphan changeset: %s!"),
754 754 "phase-divergent": mspd,
755 755 "content-divergent": mscd}
756 756 # If we are to push if there is at least one
757 757 # obsolete or unstable changeset in missing, at
758 758 # least one of the missinghead will be obsolete or
759 759 # unstable. So checking heads only is ok
760 760 for node in outgoing.missingheads:
761 761 ctx = unfi[node]
762 762 if ctx.obsolete():
763 763 raise error.Abort(mso % ctx)
764 764 elif ctx.isunstable():
765 765 # TODO print more than one instability in the abort
766 766 # message
767 767 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
768 768
769 769 discovery.checkheads(pushop)
770 770 return True
771 771
772 772 # List of names of steps to perform for an outgoing bundle2, order matters.
773 773 b2partsgenorder = []
774 774
775 775 # Mapping between step name and function
776 776 #
777 777 # This exists to help extensions wrap steps if necessary
778 778 b2partsgenmapping = {}
779 779
780 780 def b2partsgenerator(stepname, idx=None):
781 781 """decorator for function generating bundle2 part
782 782
783 783 The function is added to the step -> function mapping and appended to the
784 784 list of steps. Beware that decorated functions will be added in order
785 785 (this may matter).
786 786
787 787 You can only use this decorator for new steps, if you want to wrap a step
788 788 from an extension, attack the b2partsgenmapping dictionary directly."""
789 789 def dec(func):
790 790 assert stepname not in b2partsgenmapping
791 791 b2partsgenmapping[stepname] = func
792 792 if idx is None:
793 793 b2partsgenorder.append(stepname)
794 794 else:
795 795 b2partsgenorder.insert(idx, stepname)
796 796 return func
797 797 return dec
798 798
799 799 def _pushb2ctxcheckheads(pushop, bundler):
800 800 """Generate race condition checking parts
801 801
802 802 Exists as an independent function to aid extensions
803 803 """
804 804 # * 'force' do not check for push race,
805 805 # * if we don't push anything, there are nothing to check.
806 806 if not pushop.force and pushop.outgoing.missingheads:
807 807 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
808 808 emptyremote = pushop.pushbranchmap is None
809 809 if not allowunrelated or emptyremote:
810 810 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
811 811 else:
812 812 affected = set()
813 813 for branch, heads in pushop.pushbranchmap.iteritems():
814 814 remoteheads, newheads, unsyncedheads, discardedheads = heads
815 815 if remoteheads is not None:
816 816 remote = set(remoteheads)
817 817 affected |= set(discardedheads) & remote
818 818 affected |= remote - set(newheads)
819 819 if affected:
820 820 data = iter(sorted(affected))
821 821 bundler.newpart('check:updated-heads', data=data)
822 822
823 823 def _pushing(pushop):
824 824 """return True if we are pushing anything"""
825 825 return bool(pushop.outgoing.missing
826 826 or pushop.outdatedphases
827 827 or pushop.outobsmarkers
828 828 or pushop.outbookmarks)
829 829
830 830 @b2partsgenerator('check-bookmarks')
831 831 def _pushb2checkbookmarks(pushop, bundler):
832 832 """insert bookmark move checking"""
833 833 if not _pushing(pushop) or pushop.force:
834 834 return
835 835 b2caps = bundle2.bundle2caps(pushop.remote)
836 836 hasbookmarkcheck = 'bookmarks' in b2caps
837 837 if not (pushop.outbookmarks and hasbookmarkcheck):
838 838 return
839 839 data = []
840 840 for book, old, new in pushop.outbookmarks:
841 841 old = bin(old)
842 842 data.append((book, old))
843 843 checkdata = bookmod.binaryencode(data)
844 844 bundler.newpart('check:bookmarks', data=checkdata)
845 845
846 846 @b2partsgenerator('check-phases')
847 847 def _pushb2checkphases(pushop, bundler):
848 848 """insert phase move checking"""
849 849 if not _pushing(pushop) or pushop.force:
850 850 return
851 851 b2caps = bundle2.bundle2caps(pushop.remote)
852 852 hasphaseheads = 'heads' in b2caps.get('phases', ())
853 853 if pushop.remotephases is not None and hasphaseheads:
854 854 # check that the remote phase has not changed
855 855 checks = [[] for p in phases.allphases]
856 856 checks[phases.public].extend(pushop.remotephases.publicheads)
857 857 checks[phases.draft].extend(pushop.remotephases.draftroots)
858 858 if any(checks):
859 859 for nodes in checks:
860 860 nodes.sort()
861 861 checkdata = phases.binaryencode(checks)
862 862 bundler.newpart('check:phases', data=checkdata)
863 863
864 864 @b2partsgenerator('changeset')
865 865 def _pushb2ctx(pushop, bundler):
866 866 """handle changegroup push through bundle2
867 867
868 868 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
869 869 """
870 870 if 'changesets' in pushop.stepsdone:
871 871 return
872 872 pushop.stepsdone.add('changesets')
873 873 # Send known heads to the server for race detection.
874 874 if not _pushcheckoutgoing(pushop):
875 875 return
876 876 pushop.repo.prepushoutgoinghooks(pushop)
877 877
878 878 _pushb2ctxcheckheads(pushop, bundler)
879 879
880 880 b2caps = bundle2.bundle2caps(pushop.remote)
881 881 version = '01'
882 882 cgversions = b2caps.get('changegroup')
883 883 if cgversions: # 3.1 and 3.2 ship with an empty value
884 884 cgversions = [v for v in cgversions
885 885 if v in changegroup.supportedoutgoingversions(
886 886 pushop.repo)]
887 887 if not cgversions:
888 888 raise ValueError(_('no common changegroup version'))
889 889 version = max(cgversions)
890 890 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
891 891 'push')
892 892 cgpart = bundler.newpart('changegroup', data=cgstream)
893 893 if cgversions:
894 894 cgpart.addparam('version', version)
895 895 if 'treemanifest' in pushop.repo.requirements:
896 896 cgpart.addparam('treemanifest', '1')
897 897 def handlereply(op):
898 898 """extract addchangegroup returns from server reply"""
899 899 cgreplies = op.records.getreplies(cgpart.id)
900 900 assert len(cgreplies['changegroup']) == 1
901 901 pushop.cgresult = cgreplies['changegroup'][0]['return']
902 902 return handlereply
903 903
904 904 @b2partsgenerator('phase')
905 905 def _pushb2phases(pushop, bundler):
906 906 """handle phase push through bundle2"""
907 907 if 'phases' in pushop.stepsdone:
908 908 return
909 909 b2caps = bundle2.bundle2caps(pushop.remote)
910 910 ui = pushop.repo.ui
911 911
912 912 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
913 913 haspushkey = 'pushkey' in b2caps
914 914 hasphaseheads = 'heads' in b2caps.get('phases', ())
915 915
916 916 if hasphaseheads and not legacyphase:
917 917 return _pushb2phaseheads(pushop, bundler)
918 918 elif haspushkey:
919 919 return _pushb2phasespushkey(pushop, bundler)
920 920
921 921 def _pushb2phaseheads(pushop, bundler):
922 922 """push phase information through a bundle2 - binary part"""
923 923 pushop.stepsdone.add('phases')
924 924 if pushop.outdatedphases:
925 925 updates = [[] for p in phases.allphases]
926 926 updates[0].extend(h.node() for h in pushop.outdatedphases)
927 927 phasedata = phases.binaryencode(updates)
928 928 bundler.newpart('phase-heads', data=phasedata)
929 929
930 930 def _pushb2phasespushkey(pushop, bundler):
931 931 """push phase information through a bundle2 - pushkey part"""
932 932 pushop.stepsdone.add('phases')
933 933 part2node = []
934 934
935 935 def handlefailure(pushop, exc):
936 936 targetid = int(exc.partid)
937 937 for partid, node in part2node:
938 938 if partid == targetid:
939 939 raise error.Abort(_('updating %s to public failed') % node)
940 940
941 941 enc = pushkey.encode
942 942 for newremotehead in pushop.outdatedphases:
943 943 part = bundler.newpart('pushkey')
944 944 part.addparam('namespace', enc('phases'))
945 945 part.addparam('key', enc(newremotehead.hex()))
946 946 part.addparam('old', enc('%d' % phases.draft))
947 947 part.addparam('new', enc('%d' % phases.public))
948 948 part2node.append((part.id, newremotehead))
949 949 pushop.pkfailcb[part.id] = handlefailure
950 950
951 951 def handlereply(op):
952 952 for partid, node in part2node:
953 953 partrep = op.records.getreplies(partid)
954 954 results = partrep['pushkey']
955 955 assert len(results) <= 1
956 956 msg = None
957 957 if not results:
958 958 msg = _('server ignored update of %s to public!\n') % node
959 959 elif not int(results[0]['return']):
960 960 msg = _('updating %s to public failed!\n') % node
961 961 if msg is not None:
962 962 pushop.ui.warn(msg)
963 963 return handlereply
964 964
965 965 @b2partsgenerator('obsmarkers')
966 966 def _pushb2obsmarkers(pushop, bundler):
967 967 if 'obsmarkers' in pushop.stepsdone:
968 968 return
969 969 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
970 970 if obsolete.commonversion(remoteversions) is None:
971 971 return
972 972 pushop.stepsdone.add('obsmarkers')
973 973 if pushop.outobsmarkers:
974 974 markers = sorted(pushop.outobsmarkers)
975 975 bundle2.buildobsmarkerspart(bundler, markers)
976 976
977 977 @b2partsgenerator('bookmarks')
978 978 def _pushb2bookmarks(pushop, bundler):
979 979 """handle bookmark push through bundle2"""
980 980 if 'bookmarks' in pushop.stepsdone:
981 981 return
982 982 b2caps = bundle2.bundle2caps(pushop.remote)
983 983
984 984 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
985 985 legacybooks = 'bookmarks' in legacy
986 986
987 987 if not legacybooks and 'bookmarks' in b2caps:
988 988 return _pushb2bookmarkspart(pushop, bundler)
989 989 elif 'pushkey' in b2caps:
990 990 return _pushb2bookmarkspushkey(pushop, bundler)
991 991
992 992 def _bmaction(old, new):
993 993 """small utility for bookmark pushing"""
994 994 if not old:
995 995 return 'export'
996 996 elif not new:
997 997 return 'delete'
998 998 return 'update'
999 999
1000 1000 def _pushb2bookmarkspart(pushop, bundler):
1001 1001 pushop.stepsdone.add('bookmarks')
1002 1002 if not pushop.outbookmarks:
1003 1003 return
1004 1004
1005 1005 allactions = []
1006 1006 data = []
1007 1007 for book, old, new in pushop.outbookmarks:
1008 1008 new = bin(new)
1009 1009 data.append((book, new))
1010 1010 allactions.append((book, _bmaction(old, new)))
1011 1011 checkdata = bookmod.binaryencode(data)
1012 1012 bundler.newpart('bookmarks', data=checkdata)
1013 1013
1014 1014 def handlereply(op):
1015 1015 ui = pushop.ui
1016 1016 # if success
1017 1017 for book, action in allactions:
1018 1018 ui.status(bookmsgmap[action][0] % book)
1019 1019
1020 1020 return handlereply
1021 1021
1022 1022 def _pushb2bookmarkspushkey(pushop, bundler):
1023 1023 pushop.stepsdone.add('bookmarks')
1024 1024 part2book = []
1025 1025 enc = pushkey.encode
1026 1026
1027 1027 def handlefailure(pushop, exc):
1028 1028 targetid = int(exc.partid)
1029 1029 for partid, book, action in part2book:
1030 1030 if partid == targetid:
1031 1031 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1032 1032 # we should not be called for part we did not generated
1033 1033 assert False
1034 1034
1035 1035 for book, old, new in pushop.outbookmarks:
1036 1036 part = bundler.newpart('pushkey')
1037 1037 part.addparam('namespace', enc('bookmarks'))
1038 1038 part.addparam('key', enc(book))
1039 1039 part.addparam('old', enc(old))
1040 1040 part.addparam('new', enc(new))
1041 1041 action = 'update'
1042 1042 if not old:
1043 1043 action = 'export'
1044 1044 elif not new:
1045 1045 action = 'delete'
1046 1046 part2book.append((part.id, book, action))
1047 1047 pushop.pkfailcb[part.id] = handlefailure
1048 1048
1049 1049 def handlereply(op):
1050 1050 ui = pushop.ui
1051 1051 for partid, book, action in part2book:
1052 1052 partrep = op.records.getreplies(partid)
1053 1053 results = partrep['pushkey']
1054 1054 assert len(results) <= 1
1055 1055 if not results:
1056 1056 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
1057 1057 else:
1058 1058 ret = int(results[0]['return'])
1059 1059 if ret:
1060 1060 ui.status(bookmsgmap[action][0] % book)
1061 1061 else:
1062 1062 ui.warn(bookmsgmap[action][1] % book)
1063 1063 if pushop.bkresult is not None:
1064 1064 pushop.bkresult = 1
1065 1065 return handlereply
1066 1066
1067 1067 @b2partsgenerator('pushvars', idx=0)
1068 1068 def _getbundlesendvars(pushop, bundler):
1069 1069 '''send shellvars via bundle2'''
1070 1070 pushvars = pushop.pushvars
1071 1071 if pushvars:
1072 1072 shellvars = {}
1073 1073 for raw in pushvars:
1074 1074 if '=' not in raw:
1075 1075 msg = ("unable to parse variable '%s', should follow "
1076 1076 "'KEY=VALUE' or 'KEY=' format")
1077 1077 raise error.Abort(msg % raw)
1078 1078 k, v = raw.split('=', 1)
1079 1079 shellvars[k] = v
1080 1080
1081 1081 part = bundler.newpart('pushvars')
1082 1082
1083 1083 for key, value in shellvars.iteritems():
1084 1084 part.addparam(key, value, mandatory=False)
1085 1085
1086 1086 def _pushbundle2(pushop):
1087 1087 """push data to the remote using bundle2
1088 1088
1089 1089 The only currently supported type of data is changegroup but this will
1090 1090 evolve in the future."""
1091 1091 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1092 1092 pushback = (pushop.trmanager
1093 1093 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1094 1094
1095 1095 # create reply capability
1096 1096 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1097 1097 allowpushback=pushback,
1098 1098 role='client'))
1099 1099 bundler.newpart('replycaps', data=capsblob)
1100 1100 replyhandlers = []
1101 1101 for partgenname in b2partsgenorder:
1102 1102 partgen = b2partsgenmapping[partgenname]
1103 1103 ret = partgen(pushop, bundler)
1104 1104 if callable(ret):
1105 1105 replyhandlers.append(ret)
1106 1106 # do not push if nothing to push
1107 1107 if bundler.nbparts <= 1:
1108 1108 return
1109 1109 stream = util.chunkbuffer(bundler.getchunks())
1110 1110 try:
1111 1111 try:
1112 1112 with pushop.remote.commandexecutor() as e:
1113 1113 reply = e.callcommand('unbundle', {
1114 1114 'bundle': stream,
1115 1115 'heads': ['force'],
1116 1116 'url': pushop.remote.url(),
1117 1117 }).result()
1118 1118 except error.BundleValueError as exc:
1119 1119 raise error.Abort(_('missing support for %s') % exc)
1120 1120 try:
1121 1121 trgetter = None
1122 1122 if pushback:
1123 1123 trgetter = pushop.trmanager.transaction
1124 1124 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1125 1125 except error.BundleValueError as exc:
1126 1126 raise error.Abort(_('missing support for %s') % exc)
1127 1127 except bundle2.AbortFromPart as exc:
1128 1128 pushop.ui.status(_('remote: %s\n') % exc)
1129 1129 if exc.hint is not None:
1130 1130 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1131 1131 raise error.Abort(_('push failed on remote'))
1132 1132 except error.PushkeyFailed as exc:
1133 1133 partid = int(exc.partid)
1134 1134 if partid not in pushop.pkfailcb:
1135 1135 raise
1136 1136 pushop.pkfailcb[partid](pushop, exc)
1137 1137 for rephand in replyhandlers:
1138 1138 rephand(op)
1139 1139
1140 1140 def _pushchangeset(pushop):
1141 1141 """Make the actual push of changeset bundle to remote repo"""
1142 1142 if 'changesets' in pushop.stepsdone:
1143 1143 return
1144 1144 pushop.stepsdone.add('changesets')
1145 1145 if not _pushcheckoutgoing(pushop):
1146 1146 return
1147 1147
1148 1148 # Should have verified this in push().
1149 1149 assert pushop.remote.capable('unbundle')
1150 1150
1151 1151 pushop.repo.prepushoutgoinghooks(pushop)
1152 1152 outgoing = pushop.outgoing
1153 1153 # TODO: get bundlecaps from remote
1154 1154 bundlecaps = None
1155 1155 # create a changegroup from local
1156 1156 if pushop.revs is None and not (outgoing.excluded
1157 1157 or pushop.repo.changelog.filteredrevs):
1158 1158 # push everything,
1159 1159 # use the fast path, no race possible on push
1160 1160 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1161 1161 fastpath=True, bundlecaps=bundlecaps)
1162 1162 else:
1163 1163 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1164 1164 'push', bundlecaps=bundlecaps)
1165 1165
1166 1166 # apply changegroup to remote
1167 1167 # local repo finds heads on server, finds out what
1168 1168 # revs it must push. once revs transferred, if server
1169 1169 # finds it has different heads (someone else won
1170 1170 # commit/push race), server aborts.
1171 1171 if pushop.force:
1172 1172 remoteheads = ['force']
1173 1173 else:
1174 1174 remoteheads = pushop.remoteheads
1175 1175 # ssh: return remote's addchangegroup()
1176 1176 # http: return remote's addchangegroup() or 0 for error
1177 1177 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1178 1178 pushop.repo.url())
1179 1179
1180 1180 def _pushsyncphase(pushop):
1181 1181 """synchronise phase information locally and remotely"""
1182 1182 cheads = pushop.commonheads
1183 1183 # even when we don't push, exchanging phase data is useful
1184 1184 remotephases = listkeys(pushop.remote, 'phases')
1185 1185 if (pushop.ui.configbool('ui', '_usedassubrepo')
1186 1186 and remotephases # server supports phases
1187 1187 and pushop.cgresult is None # nothing was pushed
1188 1188 and remotephases.get('publishing', False)):
1189 1189 # When:
1190 1190 # - this is a subrepo push
1191 1191 # - and remote support phase
1192 1192 # - and no changeset was pushed
1193 1193 # - and remote is publishing
1194 1194 # We may be in issue 3871 case!
1195 1195 # We drop the possible phase synchronisation done by
1196 1196 # courtesy to publish changesets possibly locally draft
1197 1197 # on the remote.
1198 1198 remotephases = {'publishing': 'True'}
1199 1199 if not remotephases: # old server or public only reply from non-publishing
1200 1200 _localphasemove(pushop, cheads)
1201 1201 # don't push any phase data as there is nothing to push
1202 1202 else:
1203 1203 ana = phases.analyzeremotephases(pushop.repo, cheads,
1204 1204 remotephases)
1205 1205 pheads, droots = ana
1206 1206 ### Apply remote phase on local
1207 1207 if remotephases.get('publishing', False):
1208 1208 _localphasemove(pushop, cheads)
1209 1209 else: # publish = False
1210 1210 _localphasemove(pushop, pheads)
1211 1211 _localphasemove(pushop, cheads, phases.draft)
1212 1212 ### Apply local phase on remote
1213 1213
1214 1214 if pushop.cgresult:
1215 1215 if 'phases' in pushop.stepsdone:
1216 1216 # phases already pushed though bundle2
1217 1217 return
1218 1218 outdated = pushop.outdatedphases
1219 1219 else:
1220 1220 outdated = pushop.fallbackoutdatedphases
1221 1221
1222 1222 pushop.stepsdone.add('phases')
1223 1223
1224 1224 # filter heads already turned public by the push
1225 1225 outdated = [c for c in outdated if c.node() not in pheads]
1226 1226 # fallback to independent pushkey command
1227 1227 for newremotehead in outdated:
1228 1228 with pushop.remote.commandexecutor() as e:
1229 1229 r = e.callcommand('pushkey', {
1230 1230 'namespace': 'phases',
1231 1231 'key': newremotehead.hex(),
1232 1232 'old': '%d' % phases.draft,
1233 1233 'new': '%d' % phases.public
1234 1234 }).result()
1235 1235
1236 1236 if not r:
1237 1237 pushop.ui.warn(_('updating %s to public failed!\n')
1238 1238 % newremotehead)
1239 1239
1240 1240 def _localphasemove(pushop, nodes, phase=phases.public):
1241 1241 """move <nodes> to <phase> in the local source repo"""
1242 1242 if pushop.trmanager:
1243 1243 phases.advanceboundary(pushop.repo,
1244 1244 pushop.trmanager.transaction(),
1245 1245 phase,
1246 1246 nodes)
1247 1247 else:
1248 1248 # repo is not locked, do not change any phases!
1249 1249 # Informs the user that phases should have been moved when
1250 1250 # applicable.
1251 1251 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1252 1252 phasestr = phases.phasenames[phase]
1253 1253 if actualmoves:
1254 1254 pushop.ui.status(_('cannot lock source repo, skipping '
1255 1255 'local %s phase update\n') % phasestr)
1256 1256
1257 1257 def _pushobsolete(pushop):
1258 1258 """utility function to push obsolete markers to a remote"""
1259 1259 if 'obsmarkers' in pushop.stepsdone:
1260 1260 return
1261 1261 repo = pushop.repo
1262 1262 remote = pushop.remote
1263 1263 pushop.stepsdone.add('obsmarkers')
1264 1264 if pushop.outobsmarkers:
1265 1265 pushop.ui.debug('try to push obsolete markers to remote\n')
1266 1266 rslts = []
1267 1267 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1268 1268 for key in sorted(remotedata, reverse=True):
1269 1269 # reverse sort to ensure we end with dump0
1270 1270 data = remotedata[key]
1271 1271 rslts.append(remote.pushkey('obsolete', key, '', data))
1272 1272 if [r for r in rslts if not r]:
1273 1273 msg = _('failed to push some obsolete markers!\n')
1274 1274 repo.ui.warn(msg)
1275 1275
1276 1276 def _pushbookmark(pushop):
1277 1277 """Update bookmark position on remote"""
1278 1278 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1279 1279 return
1280 1280 pushop.stepsdone.add('bookmarks')
1281 1281 ui = pushop.ui
1282 1282 remote = pushop.remote
1283 1283
1284 1284 for b, old, new in pushop.outbookmarks:
1285 1285 action = 'update'
1286 1286 if not old:
1287 1287 action = 'export'
1288 1288 elif not new:
1289 1289 action = 'delete'
1290 1290
1291 1291 with remote.commandexecutor() as e:
1292 1292 r = e.callcommand('pushkey', {
1293 1293 'namespace': 'bookmarks',
1294 1294 'key': b,
1295 1295 'old': old,
1296 1296 'new': new,
1297 1297 }).result()
1298 1298
1299 1299 if r:
1300 1300 ui.status(bookmsgmap[action][0] % b)
1301 1301 else:
1302 1302 ui.warn(bookmsgmap[action][1] % b)
1303 1303 # discovery can have set the value form invalid entry
1304 1304 if pushop.bkresult is not None:
1305 1305 pushop.bkresult = 1
1306 1306
1307 1307 class pulloperation(object):
1308 1308 """A object that represent a single pull operation
1309 1309
1310 1310 It purpose is to carry pull related state and very common operation.
1311 1311
1312 1312 A new should be created at the beginning of each pull and discarded
1313 1313 afterward.
1314 1314 """
1315 1315
1316 1316 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1317 1317 remotebookmarks=None, streamclonerequested=None,
1318 1318 includepats=None, excludepats=None):
1319 1319 # repo we pull into
1320 1320 self.repo = repo
1321 1321 # repo we pull from
1322 1322 self.remote = remote
1323 1323 # revision we try to pull (None is "all")
1324 1324 self.heads = heads
1325 1325 # bookmark pulled explicitly
1326 1326 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1327 1327 for bookmark in bookmarks]
1328 1328 # do we force pull?
1329 1329 self.force = force
1330 1330 # whether a streaming clone was requested
1331 1331 self.streamclonerequested = streamclonerequested
1332 1332 # transaction manager
1333 1333 self.trmanager = None
1334 1334 # set of common changeset between local and remote before pull
1335 1335 self.common = None
1336 1336 # set of pulled head
1337 1337 self.rheads = None
1338 1338 # list of missing changeset to fetch remotely
1339 1339 self.fetch = None
1340 1340 # remote bookmarks data
1341 1341 self.remotebookmarks = remotebookmarks
1342 1342 # result of changegroup pulling (used as return code by pull)
1343 1343 self.cgresult = None
1344 1344 # list of step already done
1345 1345 self.stepsdone = set()
1346 1346 # Whether we attempted a clone from pre-generated bundles.
1347 1347 self.clonebundleattempted = False
1348 1348 # Set of file patterns to include.
1349 1349 self.includepats = includepats
1350 1350 # Set of file patterns to exclude.
1351 1351 self.excludepats = excludepats
1352 1352
1353 1353 @util.propertycache
1354 1354 def pulledsubset(self):
1355 1355 """heads of the set of changeset target by the pull"""
1356 1356 # compute target subset
1357 1357 if self.heads is None:
1358 1358 # We pulled every thing possible
1359 1359 # sync on everything common
1360 1360 c = set(self.common)
1361 1361 ret = list(self.common)
1362 1362 for n in self.rheads:
1363 1363 if n not in c:
1364 1364 ret.append(n)
1365 1365 return ret
1366 1366 else:
1367 1367 # We pulled a specific subset
1368 1368 # sync on this subset
1369 1369 return self.heads
1370 1370
1371 1371 @util.propertycache
1372 1372 def canusebundle2(self):
1373 1373 return not _forcebundle1(self)
1374 1374
1375 1375 @util.propertycache
1376 1376 def remotebundle2caps(self):
1377 1377 return bundle2.bundle2caps(self.remote)
1378 1378
1379 1379 def gettransaction(self):
1380 1380 # deprecated; talk to trmanager directly
1381 1381 return self.trmanager.transaction()
1382 1382
1383 1383 class transactionmanager(util.transactional):
1384 1384 """An object to manage the life cycle of a transaction
1385 1385
1386 1386 It creates the transaction on demand and calls the appropriate hooks when
1387 1387 closing the transaction."""
1388 1388 def __init__(self, repo, source, url):
1389 1389 self.repo = repo
1390 1390 self.source = source
1391 1391 self.url = url
1392 1392 self._tr = None
1393 1393
1394 1394 def transaction(self):
1395 1395 """Return an open transaction object, constructing if necessary"""
1396 1396 if not self._tr:
1397 1397 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1398 1398 self._tr = self.repo.transaction(trname)
1399 1399 self._tr.hookargs['source'] = self.source
1400 1400 self._tr.hookargs['url'] = self.url
1401 1401 return self._tr
1402 1402
1403 1403 def close(self):
1404 1404 """close transaction if created"""
1405 1405 if self._tr is not None:
1406 1406 self._tr.close()
1407 1407
1408 1408 def release(self):
1409 1409 """release transaction if created"""
1410 1410 if self._tr is not None:
1411 1411 self._tr.release()
1412 1412
1413 1413 def listkeys(remote, namespace):
1414 1414 with remote.commandexecutor() as e:
1415 1415 return e.callcommand('listkeys', {'namespace': namespace}).result()
1416 1416
1417 1417 def _fullpullbundle2(repo, pullop):
1418 1418 # The server may send a partial reply, i.e. when inlining
1419 1419 # pre-computed bundles. In that case, update the common
1420 1420 # set based on the results and pull another bundle.
1421 1421 #
1422 1422 # There are two indicators that the process is finished:
1423 1423 # - no changeset has been added, or
1424 1424 # - all remote heads are known locally.
1425 1425 # The head check must use the unfiltered view as obsoletion
1426 1426 # markers can hide heads.
1427 1427 unfi = repo.unfiltered()
1428 1428 unficl = unfi.changelog
1429 1429 def headsofdiff(h1, h2):
1430 1430 """Returns heads(h1 % h2)"""
1431 1431 res = unfi.set('heads(%ln %% %ln)', h1, h2)
1432 1432 return set(ctx.node() for ctx in res)
1433 1433 def headsofunion(h1, h2):
1434 1434 """Returns heads((h1 + h2) - null)"""
1435 1435 res = unfi.set('heads((%ln + %ln - null))', h1, h2)
1436 1436 return set(ctx.node() for ctx in res)
1437 1437 while True:
1438 1438 old_heads = unficl.heads()
1439 1439 clstart = len(unficl)
1440 1440 _pullbundle2(pullop)
1441 1441 if repository.NARROW_REQUIREMENT in repo.requirements:
1442 1442 # XXX narrow clones filter the heads on the server side during
1443 1443 # XXX getbundle and result in partial replies as well.
1444 1444 # XXX Disable pull bundles in this case as band aid to avoid
1445 1445 # XXX extra round trips.
1446 1446 break
1447 1447 if clstart == len(unficl):
1448 1448 break
1449 1449 if all(unficl.hasnode(n) for n in pullop.rheads):
1450 1450 break
1451 1451 new_heads = headsofdiff(unficl.heads(), old_heads)
1452 1452 pullop.common = headsofunion(new_heads, pullop.common)
1453 1453 pullop.rheads = set(pullop.rheads) - pullop.common
1454 1454
1455 1455 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1456 1456 streamclonerequested=None, includepats=None, excludepats=None):
1457 1457 """Fetch repository data from a remote.
1458 1458
1459 1459 This is the main function used to retrieve data from a remote repository.
1460 1460
1461 1461 ``repo`` is the local repository to clone into.
1462 1462 ``remote`` is a peer instance.
1463 1463 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1464 1464 default) means to pull everything from the remote.
1465 1465 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1466 1466 default, all remote bookmarks are pulled.
1467 1467 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1468 1468 initialization.
1469 1469 ``streamclonerequested`` is a boolean indicating whether a "streaming
1470 1470 clone" is requested. A "streaming clone" is essentially a raw file copy
1471 1471 of revlogs from the server. This only works when the local repository is
1472 1472 empty. The default value of ``None`` means to respect the server
1473 1473 configuration for preferring stream clones.
1474 1474 ``includepats`` and ``excludepats`` define explicit file patterns to
1475 1475 include and exclude in storage, respectively. If not defined, narrow
1476 1476 patterns from the repo instance are used, if available.
1477 1477
1478 1478 Returns the ``pulloperation`` created for this pull.
1479 1479 """
1480 1480 if opargs is None:
1481 1481 opargs = {}
1482 1482
1483 1483 # We allow the narrow patterns to be passed in explicitly to provide more
1484 1484 # flexibility for API consumers.
1485 1485 if includepats or excludepats:
1486 1486 includepats = includepats or set()
1487 1487 excludepats = excludepats or set()
1488 1488 else:
1489 1489 includepats, excludepats = repo.narrowpats
1490 1490
1491 1491 narrowspec.validatepatterns(includepats)
1492 1492 narrowspec.validatepatterns(excludepats)
1493 1493
1494 1494 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1495 1495 streamclonerequested=streamclonerequested,
1496 1496 includepats=includepats, excludepats=excludepats,
1497 1497 **pycompat.strkwargs(opargs))
1498 1498
1499 1499 peerlocal = pullop.remote.local()
1500 1500 if peerlocal:
1501 1501 missing = set(peerlocal.requirements) - pullop.repo.supported
1502 1502 if missing:
1503 1503 msg = _("required features are not"
1504 1504 " supported in the destination:"
1505 1505 " %s") % (', '.join(sorted(missing)))
1506 1506 raise error.Abort(msg)
1507 1507
1508 1508 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1509 1509 with repo.wlock(), repo.lock(), pullop.trmanager:
1510 1510 # Use the modern wire protocol, if available.
1511 if remote.capable('exchangev2'):
1511 if remote.capable('command-changesetdata'):
1512 1512 exchangev2.pull(pullop)
1513 1513 else:
1514 1514 # This should ideally be in _pullbundle2(). However, it needs to run
1515 1515 # before discovery to avoid extra work.
1516 1516 _maybeapplyclonebundle(pullop)
1517 1517 streamclone.maybeperformlegacystreamclone(pullop)
1518 1518 _pulldiscovery(pullop)
1519 1519 if pullop.canusebundle2:
1520 1520 _fullpullbundle2(repo, pullop)
1521 1521 _pullchangeset(pullop)
1522 1522 _pullphase(pullop)
1523 1523 _pullbookmarks(pullop)
1524 1524 _pullobsolete(pullop)
1525 1525
1526 1526 # storing remotenames
1527 1527 if repo.ui.configbool('experimental', 'remotenames'):
1528 1528 logexchange.pullremotenames(repo, remote)
1529 1529
1530 1530 return pullop
1531 1531
1532 1532 # list of steps to perform discovery before pull
1533 1533 pulldiscoveryorder = []
1534 1534
1535 1535 # Mapping between step name and function
1536 1536 #
1537 1537 # This exists to help extensions wrap steps if necessary
1538 1538 pulldiscoverymapping = {}
1539 1539
1540 1540 def pulldiscovery(stepname):
1541 1541 """decorator for function performing discovery before pull
1542 1542
1543 1543 The function is added to the step -> function mapping and appended to the
1544 1544 list of steps. Beware that decorated function will be added in order (this
1545 1545 may matter).
1546 1546
1547 1547 You can only use this decorator for a new step, if you want to wrap a step
1548 1548 from an extension, change the pulldiscovery dictionary directly."""
1549 1549 def dec(func):
1550 1550 assert stepname not in pulldiscoverymapping
1551 1551 pulldiscoverymapping[stepname] = func
1552 1552 pulldiscoveryorder.append(stepname)
1553 1553 return func
1554 1554 return dec
1555 1555
1556 1556 def _pulldiscovery(pullop):
1557 1557 """Run all discovery steps"""
1558 1558 for stepname in pulldiscoveryorder:
1559 1559 step = pulldiscoverymapping[stepname]
1560 1560 step(pullop)
1561 1561
1562 1562 @pulldiscovery('b1:bookmarks')
1563 1563 def _pullbookmarkbundle1(pullop):
1564 1564 """fetch bookmark data in bundle1 case
1565 1565
1566 1566 If not using bundle2, we have to fetch bookmarks before changeset
1567 1567 discovery to reduce the chance and impact of race conditions."""
1568 1568 if pullop.remotebookmarks is not None:
1569 1569 return
1570 1570 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1571 1571 # all known bundle2 servers now support listkeys, but lets be nice with
1572 1572 # new implementation.
1573 1573 return
1574 1574 books = listkeys(pullop.remote, 'bookmarks')
1575 1575 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1576 1576
1577 1577
1578 1578 @pulldiscovery('changegroup')
1579 1579 def _pulldiscoverychangegroup(pullop):
1580 1580 """discovery phase for the pull
1581 1581
1582 1582 Current handle changeset discovery only, will change handle all discovery
1583 1583 at some point."""
1584 1584 tmp = discovery.findcommonincoming(pullop.repo,
1585 1585 pullop.remote,
1586 1586 heads=pullop.heads,
1587 1587 force=pullop.force)
1588 1588 common, fetch, rheads = tmp
1589 1589 nm = pullop.repo.unfiltered().changelog.nodemap
1590 1590 if fetch and rheads:
1591 1591 # If a remote heads is filtered locally, put in back in common.
1592 1592 #
1593 1593 # This is a hackish solution to catch most of "common but locally
1594 1594 # hidden situation". We do not performs discovery on unfiltered
1595 1595 # repository because it end up doing a pathological amount of round
1596 1596 # trip for w huge amount of changeset we do not care about.
1597 1597 #
1598 1598 # If a set of such "common but filtered" changeset exist on the server
1599 1599 # but are not including a remote heads, we'll not be able to detect it,
1600 1600 scommon = set(common)
1601 1601 for n in rheads:
1602 1602 if n in nm:
1603 1603 if n not in scommon:
1604 1604 common.append(n)
1605 1605 if set(rheads).issubset(set(common)):
1606 1606 fetch = []
1607 1607 pullop.common = common
1608 1608 pullop.fetch = fetch
1609 1609 pullop.rheads = rheads
1610 1610
1611 1611 def _pullbundle2(pullop):
1612 1612 """pull data using bundle2
1613 1613
1614 1614 For now, the only supported data are changegroup."""
1615 1615 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1616 1616
1617 1617 # make ui easier to access
1618 1618 ui = pullop.repo.ui
1619 1619
1620 1620 # At the moment we don't do stream clones over bundle2. If that is
1621 1621 # implemented then here's where the check for that will go.
1622 1622 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1623 1623
1624 1624 # declare pull perimeters
1625 1625 kwargs['common'] = pullop.common
1626 1626 kwargs['heads'] = pullop.heads or pullop.rheads
1627 1627
1628 1628 if streaming:
1629 1629 kwargs['cg'] = False
1630 1630 kwargs['stream'] = True
1631 1631 pullop.stepsdone.add('changegroup')
1632 1632 pullop.stepsdone.add('phases')
1633 1633
1634 1634 else:
1635 1635 # pulling changegroup
1636 1636 pullop.stepsdone.add('changegroup')
1637 1637
1638 1638 kwargs['cg'] = pullop.fetch
1639 1639
1640 1640 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1641 1641 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1642 1642 if (not legacyphase and hasbinaryphase):
1643 1643 kwargs['phases'] = True
1644 1644 pullop.stepsdone.add('phases')
1645 1645
1646 1646 if 'listkeys' in pullop.remotebundle2caps:
1647 1647 if 'phases' not in pullop.stepsdone:
1648 1648 kwargs['listkeys'] = ['phases']
1649 1649
1650 1650 bookmarksrequested = False
1651 1651 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1652 1652 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1653 1653
1654 1654 if pullop.remotebookmarks is not None:
1655 1655 pullop.stepsdone.add('request-bookmarks')
1656 1656
1657 1657 if ('request-bookmarks' not in pullop.stepsdone
1658 1658 and pullop.remotebookmarks is None
1659 1659 and not legacybookmark and hasbinarybook):
1660 1660 kwargs['bookmarks'] = True
1661 1661 bookmarksrequested = True
1662 1662
1663 1663 if 'listkeys' in pullop.remotebundle2caps:
1664 1664 if 'request-bookmarks' not in pullop.stepsdone:
1665 1665 # make sure to always includes bookmark data when migrating
1666 1666 # `hg incoming --bundle` to using this function.
1667 1667 pullop.stepsdone.add('request-bookmarks')
1668 1668 kwargs.setdefault('listkeys', []).append('bookmarks')
1669 1669
1670 1670 # If this is a full pull / clone and the server supports the clone bundles
1671 1671 # feature, tell the server whether we attempted a clone bundle. The
1672 1672 # presence of this flag indicates the client supports clone bundles. This
1673 1673 # will enable the server to treat clients that support clone bundles
1674 1674 # differently from those that don't.
1675 1675 if (pullop.remote.capable('clonebundles')
1676 1676 and pullop.heads is None and list(pullop.common) == [nullid]):
1677 1677 kwargs['cbattempted'] = pullop.clonebundleattempted
1678 1678
1679 1679 if streaming:
1680 1680 pullop.repo.ui.status(_('streaming all changes\n'))
1681 1681 elif not pullop.fetch:
1682 1682 pullop.repo.ui.status(_("no changes found\n"))
1683 1683 pullop.cgresult = 0
1684 1684 else:
1685 1685 if pullop.heads is None and list(pullop.common) == [nullid]:
1686 1686 pullop.repo.ui.status(_("requesting all changes\n"))
1687 1687 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1688 1688 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1689 1689 if obsolete.commonversion(remoteversions) is not None:
1690 1690 kwargs['obsmarkers'] = True
1691 1691 pullop.stepsdone.add('obsmarkers')
1692 1692 _pullbundle2extraprepare(pullop, kwargs)
1693 1693
1694 1694 with pullop.remote.commandexecutor() as e:
1695 1695 args = dict(kwargs)
1696 1696 args['source'] = 'pull'
1697 1697 bundle = e.callcommand('getbundle', args).result()
1698 1698
1699 1699 try:
1700 1700 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,
1701 1701 source='pull')
1702 1702 op.modes['bookmarks'] = 'records'
1703 1703 bundle2.processbundle(pullop.repo, bundle, op=op)
1704 1704 except bundle2.AbortFromPart as exc:
1705 1705 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1706 1706 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1707 1707 except error.BundleValueError as exc:
1708 1708 raise error.Abort(_('missing support for %s') % exc)
1709 1709
1710 1710 if pullop.fetch:
1711 1711 pullop.cgresult = bundle2.combinechangegroupresults(op)
1712 1712
1713 1713 # processing phases change
1714 1714 for namespace, value in op.records['listkeys']:
1715 1715 if namespace == 'phases':
1716 1716 _pullapplyphases(pullop, value)
1717 1717
1718 1718 # processing bookmark update
1719 1719 if bookmarksrequested:
1720 1720 books = {}
1721 1721 for record in op.records['bookmarks']:
1722 1722 books[record['bookmark']] = record["node"]
1723 1723 pullop.remotebookmarks = books
1724 1724 else:
1725 1725 for namespace, value in op.records['listkeys']:
1726 1726 if namespace == 'bookmarks':
1727 1727 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1728 1728
1729 1729 # bookmark data were either already there or pulled in the bundle
1730 1730 if pullop.remotebookmarks is not None:
1731 1731 _pullbookmarks(pullop)
1732 1732
1733 1733 def _pullbundle2extraprepare(pullop, kwargs):
1734 1734 """hook function so that extensions can extend the getbundle call"""
1735 1735
1736 1736 def _pullchangeset(pullop):
1737 1737 """pull changeset from unbundle into the local repo"""
1738 1738 # We delay the open of the transaction as late as possible so we
1739 1739 # don't open transaction for nothing or you break future useful
1740 1740 # rollback call
1741 1741 if 'changegroup' in pullop.stepsdone:
1742 1742 return
1743 1743 pullop.stepsdone.add('changegroup')
1744 1744 if not pullop.fetch:
1745 1745 pullop.repo.ui.status(_("no changes found\n"))
1746 1746 pullop.cgresult = 0
1747 1747 return
1748 1748 tr = pullop.gettransaction()
1749 1749 if pullop.heads is None and list(pullop.common) == [nullid]:
1750 1750 pullop.repo.ui.status(_("requesting all changes\n"))
1751 1751 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1752 1752 # issue1320, avoid a race if remote changed after discovery
1753 1753 pullop.heads = pullop.rheads
1754 1754
1755 1755 if pullop.remote.capable('getbundle'):
1756 1756 # TODO: get bundlecaps from remote
1757 1757 cg = pullop.remote.getbundle('pull', common=pullop.common,
1758 1758 heads=pullop.heads or pullop.rheads)
1759 1759 elif pullop.heads is None:
1760 1760 with pullop.remote.commandexecutor() as e:
1761 1761 cg = e.callcommand('changegroup', {
1762 1762 'nodes': pullop.fetch,
1763 1763 'source': 'pull',
1764 1764 }).result()
1765 1765
1766 1766 elif not pullop.remote.capable('changegroupsubset'):
1767 1767 raise error.Abort(_("partial pull cannot be done because "
1768 1768 "other repository doesn't support "
1769 1769 "changegroupsubset."))
1770 1770 else:
1771 1771 with pullop.remote.commandexecutor() as e:
1772 1772 cg = e.callcommand('changegroupsubset', {
1773 1773 'bases': pullop.fetch,
1774 1774 'heads': pullop.heads,
1775 1775 'source': 'pull',
1776 1776 }).result()
1777 1777
1778 1778 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1779 1779 pullop.remote.url())
1780 1780 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1781 1781
1782 1782 def _pullphase(pullop):
1783 1783 # Get remote phases data from remote
1784 1784 if 'phases' in pullop.stepsdone:
1785 1785 return
1786 1786 remotephases = listkeys(pullop.remote, 'phases')
1787 1787 _pullapplyphases(pullop, remotephases)
1788 1788
1789 1789 def _pullapplyphases(pullop, remotephases):
1790 1790 """apply phase movement from observed remote state"""
1791 1791 if 'phases' in pullop.stepsdone:
1792 1792 return
1793 1793 pullop.stepsdone.add('phases')
1794 1794 publishing = bool(remotephases.get('publishing', False))
1795 1795 if remotephases and not publishing:
1796 1796 # remote is new and non-publishing
1797 1797 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1798 1798 pullop.pulledsubset,
1799 1799 remotephases)
1800 1800 dheads = pullop.pulledsubset
1801 1801 else:
1802 1802 # Remote is old or publishing all common changesets
1803 1803 # should be seen as public
1804 1804 pheads = pullop.pulledsubset
1805 1805 dheads = []
1806 1806 unfi = pullop.repo.unfiltered()
1807 1807 phase = unfi._phasecache.phase
1808 1808 rev = unfi.changelog.nodemap.get
1809 1809 public = phases.public
1810 1810 draft = phases.draft
1811 1811
1812 1812 # exclude changesets already public locally and update the others
1813 1813 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1814 1814 if pheads:
1815 1815 tr = pullop.gettransaction()
1816 1816 phases.advanceboundary(pullop.repo, tr, public, pheads)
1817 1817
1818 1818 # exclude changesets already draft locally and update the others
1819 1819 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1820 1820 if dheads:
1821 1821 tr = pullop.gettransaction()
1822 1822 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1823 1823
1824 1824 def _pullbookmarks(pullop):
1825 1825 """process the remote bookmark information to update the local one"""
1826 1826 if 'bookmarks' in pullop.stepsdone:
1827 1827 return
1828 1828 pullop.stepsdone.add('bookmarks')
1829 1829 repo = pullop.repo
1830 1830 remotebookmarks = pullop.remotebookmarks
1831 1831 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1832 1832 pullop.remote.url(),
1833 1833 pullop.gettransaction,
1834 1834 explicit=pullop.explicitbookmarks)
1835 1835
1836 1836 def _pullobsolete(pullop):
1837 1837 """utility function to pull obsolete markers from a remote
1838 1838
1839 1839 The `gettransaction` is function that return the pull transaction, creating
1840 1840 one if necessary. We return the transaction to inform the calling code that
1841 1841 a new transaction have been created (when applicable).
1842 1842
1843 1843 Exists mostly to allow overriding for experimentation purpose"""
1844 1844 if 'obsmarkers' in pullop.stepsdone:
1845 1845 return
1846 1846 pullop.stepsdone.add('obsmarkers')
1847 1847 tr = None
1848 1848 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1849 1849 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1850 1850 remoteobs = listkeys(pullop.remote, 'obsolete')
1851 1851 if 'dump0' in remoteobs:
1852 1852 tr = pullop.gettransaction()
1853 1853 markers = []
1854 1854 for key in sorted(remoteobs, reverse=True):
1855 1855 if key.startswith('dump'):
1856 1856 data = util.b85decode(remoteobs[key])
1857 1857 version, newmarks = obsolete._readmarkers(data)
1858 1858 markers += newmarks
1859 1859 if markers:
1860 1860 pullop.repo.obsstore.add(tr, markers)
1861 1861 pullop.repo.invalidatevolatilesets()
1862 1862 return tr
1863 1863
1864 1864 def applynarrowacl(repo, kwargs):
1865 1865 """Apply narrow fetch access control.
1866 1866
1867 1867 This massages the named arguments for getbundle wire protocol commands
1868 1868 so requested data is filtered through access control rules.
1869 1869 """
1870 1870 ui = repo.ui
1871 1871 # TODO this assumes existence of HTTP and is a layering violation.
1872 1872 username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
1873 1873 user_includes = ui.configlist(
1874 1874 _NARROWACL_SECTION, username + '.includes',
1875 1875 ui.configlist(_NARROWACL_SECTION, 'default.includes'))
1876 1876 user_excludes = ui.configlist(
1877 1877 _NARROWACL_SECTION, username + '.excludes',
1878 1878 ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
1879 1879 if not user_includes:
1880 1880 raise error.Abort(_("{} configuration for user {} is empty")
1881 1881 .format(_NARROWACL_SECTION, username))
1882 1882
1883 1883 user_includes = [
1884 1884 'path:.' if p == '*' else 'path:' + p for p in user_includes]
1885 1885 user_excludes = [
1886 1886 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
1887 1887
1888 1888 req_includes = set(kwargs.get(r'includepats', []))
1889 1889 req_excludes = set(kwargs.get(r'excludepats', []))
1890 1890
1891 1891 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
1892 1892 req_includes, req_excludes, user_includes, user_excludes)
1893 1893
1894 1894 if invalid_includes:
1895 1895 raise error.Abort(
1896 1896 _("The following includes are not accessible for {}: {}")
1897 1897 .format(username, invalid_includes))
1898 1898
1899 1899 new_args = {}
1900 1900 new_args.update(kwargs)
1901 1901 new_args[r'narrow'] = True
1902 1902 new_args[r'includepats'] = req_includes
1903 1903 if req_excludes:
1904 1904 new_args[r'excludepats'] = req_excludes
1905 1905
1906 1906 return new_args
1907 1907
1908 1908 def _computeellipsis(repo, common, heads, known, match, depth=None):
1909 1909 """Compute the shape of a narrowed DAG.
1910 1910
1911 1911 Args:
1912 1912 repo: The repository we're transferring.
1913 1913 common: The roots of the DAG range we're transferring.
1914 1914 May be just [nullid], which means all ancestors of heads.
1915 1915 heads: The heads of the DAG range we're transferring.
1916 1916 match: The narrowmatcher that allows us to identify relevant changes.
1917 1917 depth: If not None, only consider nodes to be full nodes if they are at
1918 1918 most depth changesets away from one of heads.
1919 1919
1920 1920 Returns:
1921 1921 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
1922 1922
1923 1923 visitnodes: The list of nodes (either full or ellipsis) which
1924 1924 need to be sent to the client.
1925 1925 relevant_nodes: The set of changelog nodes which change a file inside
1926 1926 the narrowspec. The client needs these as non-ellipsis nodes.
1927 1927 ellipsisroots: A dict of {rev: parents} that is used in
1928 1928 narrowchangegroup to produce ellipsis nodes with the
1929 1929 correct parents.
1930 1930 """
1931 1931 cl = repo.changelog
1932 1932 mfl = repo.manifestlog
1933 1933
1934 1934 clrev = cl.rev
1935 1935
1936 1936 commonrevs = {clrev(n) for n in common} | {nullrev}
1937 1937 headsrevs = {clrev(n) for n in heads}
1938 1938
1939 1939 if depth:
1940 1940 revdepth = {h: 0 for h in headsrevs}
1941 1941
1942 1942 ellipsisheads = collections.defaultdict(set)
1943 1943 ellipsisroots = collections.defaultdict(set)
1944 1944
1945 1945 def addroot(head, curchange):
1946 1946 """Add a root to an ellipsis head, splitting heads with 3 roots."""
1947 1947 ellipsisroots[head].add(curchange)
1948 1948 # Recursively split ellipsis heads with 3 roots by finding the
1949 1949 # roots' youngest common descendant which is an elided merge commit.
1950 1950 # That descendant takes 2 of the 3 roots as its own, and becomes a
1951 1951 # root of the head.
1952 1952 while len(ellipsisroots[head]) > 2:
1953 1953 child, roots = splithead(head)
1954 1954 splitroots(head, child, roots)
1955 1955 head = child # Recurse in case we just added a 3rd root
1956 1956
1957 1957 def splitroots(head, child, roots):
1958 1958 ellipsisroots[head].difference_update(roots)
1959 1959 ellipsisroots[head].add(child)
1960 1960 ellipsisroots[child].update(roots)
1961 1961 ellipsisroots[child].discard(child)
1962 1962
1963 1963 def splithead(head):
1964 1964 r1, r2, r3 = sorted(ellipsisroots[head])
1965 1965 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
1966 1966 mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
1967 1967 nr1, head, nr2, head)
1968 1968 for j in mid:
1969 1969 if j == nr2:
1970 1970 return nr2, (nr1, nr2)
1971 1971 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
1972 1972 return j, (nr1, nr2)
1973 1973 raise error.Abort(_('Failed to split up ellipsis node! head: %d, '
1974 1974 'roots: %d %d %d') % (head, r1, r2, r3))
1975 1975
1976 1976 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
1977 1977 visit = reversed(missing)
1978 1978 relevant_nodes = set()
1979 1979 visitnodes = [cl.node(m) for m in missing]
1980 1980 required = set(headsrevs) | known
1981 1981 for rev in visit:
1982 1982 clrev = cl.changelogrevision(rev)
1983 1983 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
1984 1984 if depth is not None:
1985 1985 curdepth = revdepth[rev]
1986 1986 for p in ps:
1987 1987 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
1988 1988 needed = False
1989 1989 shallow_enough = depth is None or revdepth[rev] <= depth
1990 1990 if shallow_enough:
1991 1991 curmf = mfl[clrev.manifest].read()
1992 1992 if ps:
1993 1993 # We choose to not trust the changed files list in
1994 1994 # changesets because it's not always correct. TODO: could
1995 1995 # we trust it for the non-merge case?
1996 1996 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
1997 1997 needed = bool(curmf.diff(p1mf, match))
1998 1998 if not needed and len(ps) > 1:
1999 1999 # For merge changes, the list of changed files is not
2000 2000 # helpful, since we need to emit the merge if a file
2001 2001 # in the narrow spec has changed on either side of the
2002 2002 # merge. As a result, we do a manifest diff to check.
2003 2003 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2004 2004 needed = bool(curmf.diff(p2mf, match))
2005 2005 else:
2006 2006 # For a root node, we need to include the node if any
2007 2007 # files in the node match the narrowspec.
2008 2008 needed = any(curmf.walk(match))
2009 2009
2010 2010 if needed:
2011 2011 for head in ellipsisheads[rev]:
2012 2012 addroot(head, rev)
2013 2013 for p in ps:
2014 2014 required.add(p)
2015 2015 relevant_nodes.add(cl.node(rev))
2016 2016 else:
2017 2017 if not ps:
2018 2018 ps = [nullrev]
2019 2019 if rev in required:
2020 2020 for head in ellipsisheads[rev]:
2021 2021 addroot(head, rev)
2022 2022 for p in ps:
2023 2023 ellipsisheads[p].add(rev)
2024 2024 else:
2025 2025 for p in ps:
2026 2026 ellipsisheads[p] |= ellipsisheads[rev]
2027 2027
2028 2028 # add common changesets as roots of their reachable ellipsis heads
2029 2029 for c in commonrevs:
2030 2030 for head in ellipsisheads[c]:
2031 2031 addroot(head, c)
2032 2032 return visitnodes, relevant_nodes, ellipsisroots
2033 2033
2034 2034 def caps20to10(repo, role):
2035 2035 """return a set with appropriate options to use bundle20 during getbundle"""
2036 2036 caps = {'HG20'}
2037 2037 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2038 2038 caps.add('bundle2=' + urlreq.quote(capsblob))
2039 2039 return caps
2040 2040
2041 2041 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2042 2042 getbundle2partsorder = []
2043 2043
2044 2044 # Mapping between step name and function
2045 2045 #
2046 2046 # This exists to help extensions wrap steps if necessary
2047 2047 getbundle2partsmapping = {}
2048 2048
2049 2049 def getbundle2partsgenerator(stepname, idx=None):
2050 2050 """decorator for function generating bundle2 part for getbundle
2051 2051
2052 2052 The function is added to the step -> function mapping and appended to the
2053 2053 list of steps. Beware that decorated functions will be added in order
2054 2054 (this may matter).
2055 2055
2056 2056 You can only use this decorator for new steps, if you want to wrap a step
2057 2057 from an extension, attack the getbundle2partsmapping dictionary directly."""
2058 2058 def dec(func):
2059 2059 assert stepname not in getbundle2partsmapping
2060 2060 getbundle2partsmapping[stepname] = func
2061 2061 if idx is None:
2062 2062 getbundle2partsorder.append(stepname)
2063 2063 else:
2064 2064 getbundle2partsorder.insert(idx, stepname)
2065 2065 return func
2066 2066 return dec
2067 2067
2068 2068 def bundle2requested(bundlecaps):
2069 2069 if bundlecaps is not None:
2070 2070 return any(cap.startswith('HG2') for cap in bundlecaps)
2071 2071 return False
2072 2072
2073 2073 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
2074 2074 **kwargs):
2075 2075 """Return chunks constituting a bundle's raw data.
2076 2076
2077 2077 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2078 2078 passed.
2079 2079
2080 2080 Returns a 2-tuple of a dict with metadata about the generated bundle
2081 2081 and an iterator over raw chunks (of varying sizes).
2082 2082 """
2083 2083 kwargs = pycompat.byteskwargs(kwargs)
2084 2084 info = {}
2085 2085 usebundle2 = bundle2requested(bundlecaps)
2086 2086 # bundle10 case
2087 2087 if not usebundle2:
2088 2088 if bundlecaps and not kwargs.get('cg', True):
2089 2089 raise ValueError(_('request for bundle10 must include changegroup'))
2090 2090
2091 2091 if kwargs:
2092 2092 raise ValueError(_('unsupported getbundle arguments: %s')
2093 2093 % ', '.join(sorted(kwargs.keys())))
2094 2094 outgoing = _computeoutgoing(repo, heads, common)
2095 2095 info['bundleversion'] = 1
2096 2096 return info, changegroup.makestream(repo, outgoing, '01', source,
2097 2097 bundlecaps=bundlecaps)
2098 2098
2099 2099 # bundle20 case
2100 2100 info['bundleversion'] = 2
2101 2101 b2caps = {}
2102 2102 for bcaps in bundlecaps:
2103 2103 if bcaps.startswith('bundle2='):
2104 2104 blob = urlreq.unquote(bcaps[len('bundle2='):])
2105 2105 b2caps.update(bundle2.decodecaps(blob))
2106 2106 bundler = bundle2.bundle20(repo.ui, b2caps)
2107 2107
2108 2108 kwargs['heads'] = heads
2109 2109 kwargs['common'] = common
2110 2110
2111 2111 for name in getbundle2partsorder:
2112 2112 func = getbundle2partsmapping[name]
2113 2113 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
2114 2114 **pycompat.strkwargs(kwargs))
2115 2115
2116 2116 info['prefercompressed'] = bundler.prefercompressed
2117 2117
2118 2118 return info, bundler.getchunks()
2119 2119
2120 2120 @getbundle2partsgenerator('stream2')
2121 2121 def _getbundlestream2(bundler, repo, *args, **kwargs):
2122 2122 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2123 2123
2124 2124 @getbundle2partsgenerator('changegroup')
2125 2125 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
2126 2126 b2caps=None, heads=None, common=None, **kwargs):
2127 2127 """add a changegroup part to the requested bundle"""
2128 2128 if not kwargs.get(r'cg', True):
2129 2129 return
2130 2130
2131 2131 version = '01'
2132 2132 cgversions = b2caps.get('changegroup')
2133 2133 if cgversions: # 3.1 and 3.2 ship with an empty value
2134 2134 cgversions = [v for v in cgversions
2135 2135 if v in changegroup.supportedoutgoingversions(repo)]
2136 2136 if not cgversions:
2137 2137 raise ValueError(_('no common changegroup version'))
2138 2138 version = max(cgversions)
2139 2139
2140 2140 outgoing = _computeoutgoing(repo, heads, common)
2141 2141 if not outgoing.missing:
2142 2142 return
2143 2143
2144 2144 if kwargs.get(r'narrow', False):
2145 2145 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
2146 2146 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
2147 2147 filematcher = narrowspec.match(repo.root, include=include,
2148 2148 exclude=exclude)
2149 2149 else:
2150 2150 filematcher = None
2151 2151
2152 2152 cgstream = changegroup.makestream(repo, outgoing, version, source,
2153 2153 bundlecaps=bundlecaps,
2154 2154 filematcher=filematcher)
2155 2155
2156 2156 part = bundler.newpart('changegroup', data=cgstream)
2157 2157 if cgversions:
2158 2158 part.addparam('version', version)
2159 2159
2160 2160 part.addparam('nbchanges', '%d' % len(outgoing.missing),
2161 2161 mandatory=False)
2162 2162
2163 2163 if 'treemanifest' in repo.requirements:
2164 2164 part.addparam('treemanifest', '1')
2165 2165
2166 2166 if kwargs.get(r'narrow', False) and (include or exclude):
2167 2167 narrowspecpart = bundler.newpart('narrow:spec')
2168 2168 if include:
2169 2169 narrowspecpart.addparam(
2170 2170 'include', '\n'.join(include), mandatory=True)
2171 2171 if exclude:
2172 2172 narrowspecpart.addparam(
2173 2173 'exclude', '\n'.join(exclude), mandatory=True)
2174 2174
2175 2175 @getbundle2partsgenerator('bookmarks')
2176 2176 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
2177 2177 b2caps=None, **kwargs):
2178 2178 """add a bookmark part to the requested bundle"""
2179 2179 if not kwargs.get(r'bookmarks', False):
2180 2180 return
2181 2181 if 'bookmarks' not in b2caps:
2182 2182 raise ValueError(_('no common bookmarks exchange method'))
2183 2183 books = bookmod.listbinbookmarks(repo)
2184 2184 data = bookmod.binaryencode(books)
2185 2185 if data:
2186 2186 bundler.newpart('bookmarks', data=data)
2187 2187
2188 2188 @getbundle2partsgenerator('listkeys')
2189 2189 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
2190 2190 b2caps=None, **kwargs):
2191 2191 """add parts containing listkeys namespaces to the requested bundle"""
2192 2192 listkeys = kwargs.get(r'listkeys', ())
2193 2193 for namespace in listkeys:
2194 2194 part = bundler.newpart('listkeys')
2195 2195 part.addparam('namespace', namespace)
2196 2196 keys = repo.listkeys(namespace).items()
2197 2197 part.data = pushkey.encodekeys(keys)
2198 2198
2199 2199 @getbundle2partsgenerator('obsmarkers')
2200 2200 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
2201 2201 b2caps=None, heads=None, **kwargs):
2202 2202 """add an obsolescence markers part to the requested bundle"""
2203 2203 if kwargs.get(r'obsmarkers', False):
2204 2204 if heads is None:
2205 2205 heads = repo.heads()
2206 2206 subset = [c.node() for c in repo.set('::%ln', heads)]
2207 2207 markers = repo.obsstore.relevantmarkers(subset)
2208 2208 markers = sorted(markers)
2209 2209 bundle2.buildobsmarkerspart(bundler, markers)
2210 2210
2211 2211 @getbundle2partsgenerator('phases')
2212 2212 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
2213 2213 b2caps=None, heads=None, **kwargs):
2214 2214 """add phase heads part to the requested bundle"""
2215 2215 if kwargs.get(r'phases', False):
2216 2216 if not 'heads' in b2caps.get('phases'):
2217 2217 raise ValueError(_('no common phases exchange method'))
2218 2218 if heads is None:
2219 2219 heads = repo.heads()
2220 2220
2221 2221 headsbyphase = collections.defaultdict(set)
2222 2222 if repo.publishing():
2223 2223 headsbyphase[phases.public] = heads
2224 2224 else:
2225 2225 # find the appropriate heads to move
2226 2226
2227 2227 phase = repo._phasecache.phase
2228 2228 node = repo.changelog.node
2229 2229 rev = repo.changelog.rev
2230 2230 for h in heads:
2231 2231 headsbyphase[phase(repo, rev(h))].add(h)
2232 2232 seenphases = list(headsbyphase.keys())
2233 2233
2234 2234 # We do not handle anything but public and draft phase for now)
2235 2235 if seenphases:
2236 2236 assert max(seenphases) <= phases.draft
2237 2237
2238 2238 # if client is pulling non-public changesets, we need to find
2239 2239 # intermediate public heads.
2240 2240 draftheads = headsbyphase.get(phases.draft, set())
2241 2241 if draftheads:
2242 2242 publicheads = headsbyphase.get(phases.public, set())
2243 2243
2244 2244 revset = 'heads(only(%ln, %ln) and public())'
2245 2245 extraheads = repo.revs(revset, draftheads, publicheads)
2246 2246 for r in extraheads:
2247 2247 headsbyphase[phases.public].add(node(r))
2248 2248
2249 2249 # transform data in a format used by the encoding function
2250 2250 phasemapping = []
2251 2251 for phase in phases.allphases:
2252 2252 phasemapping.append(sorted(headsbyphase[phase]))
2253 2253
2254 2254 # generate the actual part
2255 2255 phasedata = phases.binaryencode(phasemapping)
2256 2256 bundler.newpart('phase-heads', data=phasedata)
2257 2257
2258 2258 @getbundle2partsgenerator('hgtagsfnodes')
2259 2259 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
2260 2260 b2caps=None, heads=None, common=None,
2261 2261 **kwargs):
2262 2262 """Transfer the .hgtags filenodes mapping.
2263 2263
2264 2264 Only values for heads in this bundle will be transferred.
2265 2265
2266 2266 The part data consists of pairs of 20 byte changeset node and .hgtags
2267 2267 filenodes raw values.
2268 2268 """
2269 2269 # Don't send unless:
2270 2270 # - changeset are being exchanged,
2271 2271 # - the client supports it.
2272 2272 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
2273 2273 return
2274 2274
2275 2275 outgoing = _computeoutgoing(repo, heads, common)
2276 2276 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2277 2277
2278 2278 @getbundle2partsgenerator('cache:rev-branch-cache')
2279 2279 def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
2280 2280 b2caps=None, heads=None, common=None,
2281 2281 **kwargs):
2282 2282 """Transfer the rev-branch-cache mapping
2283 2283
2284 2284 The payload is a series of data related to each branch
2285 2285
2286 2286 1) branch name length
2287 2287 2) number of open heads
2288 2288 3) number of closed heads
2289 2289 4) open heads nodes
2290 2290 5) closed heads nodes
2291 2291 """
2292 2292 # Don't send unless:
2293 2293 # - changeset are being exchanged,
2294 2294 # - the client supports it.
2295 2295 # - narrow bundle isn't in play (not currently compatible).
2296 2296 if (not kwargs.get(r'cg', True)
2297 2297 or 'rev-branch-cache' not in b2caps
2298 2298 or kwargs.get(r'narrow', False)
2299 2299 or repo.ui.has_section(_NARROWACL_SECTION)):
2300 2300 return
2301 2301
2302 2302 outgoing = _computeoutgoing(repo, heads, common)
2303 2303 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2304 2304
2305 2305 def check_heads(repo, their_heads, context):
2306 2306 """check if the heads of a repo have been modified
2307 2307
2308 2308 Used by peer for unbundling.
2309 2309 """
2310 2310 heads = repo.heads()
2311 2311 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
2312 2312 if not (their_heads == ['force'] or their_heads == heads or
2313 2313 their_heads == ['hashed', heads_hash]):
2314 2314 # someone else committed/pushed/unbundled while we
2315 2315 # were transferring data
2316 2316 raise error.PushRaced('repository changed while %s - '
2317 2317 'please try again' % context)
2318 2318
2319 2319 def unbundle(repo, cg, heads, source, url):
2320 2320 """Apply a bundle to a repo.
2321 2321
2322 2322 this function makes sure the repo is locked during the application and have
2323 2323 mechanism to check that no push race occurred between the creation of the
2324 2324 bundle and its application.
2325 2325
2326 2326 If the push was raced as PushRaced exception is raised."""
2327 2327 r = 0
2328 2328 # need a transaction when processing a bundle2 stream
2329 2329 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2330 2330 lockandtr = [None, None, None]
2331 2331 recordout = None
2332 2332 # quick fix for output mismatch with bundle2 in 3.4
2333 2333 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
2334 2334 if url.startswith('remote:http:') or url.startswith('remote:https:'):
2335 2335 captureoutput = True
2336 2336 try:
2337 2337 # note: outside bundle1, 'heads' is expected to be empty and this
2338 2338 # 'check_heads' call wil be a no-op
2339 2339 check_heads(repo, heads, 'uploading changes')
2340 2340 # push can proceed
2341 2341 if not isinstance(cg, bundle2.unbundle20):
2342 2342 # legacy case: bundle1 (changegroup 01)
2343 2343 txnname = "\n".join([source, util.hidepassword(url)])
2344 2344 with repo.lock(), repo.transaction(txnname) as tr:
2345 2345 op = bundle2.applybundle(repo, cg, tr, source, url)
2346 2346 r = bundle2.combinechangegroupresults(op)
2347 2347 else:
2348 2348 r = None
2349 2349 try:
2350 2350 def gettransaction():
2351 2351 if not lockandtr[2]:
2352 2352 lockandtr[0] = repo.wlock()
2353 2353 lockandtr[1] = repo.lock()
2354 2354 lockandtr[2] = repo.transaction(source)
2355 2355 lockandtr[2].hookargs['source'] = source
2356 2356 lockandtr[2].hookargs['url'] = url
2357 2357 lockandtr[2].hookargs['bundle2'] = '1'
2358 2358 return lockandtr[2]
2359 2359
2360 2360 # Do greedy locking by default until we're satisfied with lazy
2361 2361 # locking.
2362 2362 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
2363 2363 gettransaction()
2364 2364
2365 2365 op = bundle2.bundleoperation(repo, gettransaction,
2366 2366 captureoutput=captureoutput,
2367 2367 source='push')
2368 2368 try:
2369 2369 op = bundle2.processbundle(repo, cg, op=op)
2370 2370 finally:
2371 2371 r = op.reply
2372 2372 if captureoutput and r is not None:
2373 2373 repo.ui.pushbuffer(error=True, subproc=True)
2374 2374 def recordout(output):
2375 2375 r.newpart('output', data=output, mandatory=False)
2376 2376 if lockandtr[2] is not None:
2377 2377 lockandtr[2].close()
2378 2378 except BaseException as exc:
2379 2379 exc.duringunbundle2 = True
2380 2380 if captureoutput and r is not None:
2381 2381 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2382 2382 def recordout(output):
2383 2383 part = bundle2.bundlepart('output', data=output,
2384 2384 mandatory=False)
2385 2385 parts.append(part)
2386 2386 raise
2387 2387 finally:
2388 2388 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2389 2389 if recordout is not None:
2390 2390 recordout(repo.ui.popbuffer())
2391 2391 return r
2392 2392
2393 2393 def _maybeapplyclonebundle(pullop):
2394 2394 """Apply a clone bundle from a remote, if possible."""
2395 2395
2396 2396 repo = pullop.repo
2397 2397 remote = pullop.remote
2398 2398
2399 2399 if not repo.ui.configbool('ui', 'clonebundles'):
2400 2400 return
2401 2401
2402 2402 # Only run if local repo is empty.
2403 2403 if len(repo):
2404 2404 return
2405 2405
2406 2406 if pullop.heads:
2407 2407 return
2408 2408
2409 2409 if not remote.capable('clonebundles'):
2410 2410 return
2411 2411
2412 2412 with remote.commandexecutor() as e:
2413 2413 res = e.callcommand('clonebundles', {}).result()
2414 2414
2415 2415 # If we call the wire protocol command, that's good enough to record the
2416 2416 # attempt.
2417 2417 pullop.clonebundleattempted = True
2418 2418
2419 2419 entries = parseclonebundlesmanifest(repo, res)
2420 2420 if not entries:
2421 2421 repo.ui.note(_('no clone bundles available on remote; '
2422 2422 'falling back to regular clone\n'))
2423 2423 return
2424 2424
2425 2425 entries = filterclonebundleentries(
2426 2426 repo, entries, streamclonerequested=pullop.streamclonerequested)
2427 2427
2428 2428 if not entries:
2429 2429 # There is a thundering herd concern here. However, if a server
2430 2430 # operator doesn't advertise bundles appropriate for its clients,
2431 2431 # they deserve what's coming. Furthermore, from a client's
2432 2432 # perspective, no automatic fallback would mean not being able to
2433 2433 # clone!
2434 2434 repo.ui.warn(_('no compatible clone bundles available on server; '
2435 2435 'falling back to regular clone\n'))
2436 2436 repo.ui.warn(_('(you may want to report this to the server '
2437 2437 'operator)\n'))
2438 2438 return
2439 2439
2440 2440 entries = sortclonebundleentries(repo.ui, entries)
2441 2441
2442 2442 url = entries[0]['URL']
2443 2443 repo.ui.status(_('applying clone bundle from %s\n') % url)
2444 2444 if trypullbundlefromurl(repo.ui, repo, url):
2445 2445 repo.ui.status(_('finished applying clone bundle\n'))
2446 2446 # Bundle failed.
2447 2447 #
2448 2448 # We abort by default to avoid the thundering herd of
2449 2449 # clients flooding a server that was expecting expensive
2450 2450 # clone load to be offloaded.
2451 2451 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2452 2452 repo.ui.warn(_('falling back to normal clone\n'))
2453 2453 else:
2454 2454 raise error.Abort(_('error applying bundle'),
2455 2455 hint=_('if this error persists, consider contacting '
2456 2456 'the server operator or disable clone '
2457 2457 'bundles via '
2458 2458 '"--config ui.clonebundles=false"'))
2459 2459
2460 2460 def parseclonebundlesmanifest(repo, s):
2461 2461 """Parses the raw text of a clone bundles manifest.
2462 2462
2463 2463 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2464 2464 to the URL and other keys are the attributes for the entry.
2465 2465 """
2466 2466 m = []
2467 2467 for line in s.splitlines():
2468 2468 fields = line.split()
2469 2469 if not fields:
2470 2470 continue
2471 2471 attrs = {'URL': fields[0]}
2472 2472 for rawattr in fields[1:]:
2473 2473 key, value = rawattr.split('=', 1)
2474 2474 key = urlreq.unquote(key)
2475 2475 value = urlreq.unquote(value)
2476 2476 attrs[key] = value
2477 2477
2478 2478 # Parse BUNDLESPEC into components. This makes client-side
2479 2479 # preferences easier to specify since you can prefer a single
2480 2480 # component of the BUNDLESPEC.
2481 2481 if key == 'BUNDLESPEC':
2482 2482 try:
2483 2483 bundlespec = parsebundlespec(repo, value)
2484 2484 attrs['COMPRESSION'] = bundlespec.compression
2485 2485 attrs['VERSION'] = bundlespec.version
2486 2486 except error.InvalidBundleSpecification:
2487 2487 pass
2488 2488 except error.UnsupportedBundleSpecification:
2489 2489 pass
2490 2490
2491 2491 m.append(attrs)
2492 2492
2493 2493 return m
2494 2494
2495 2495 def isstreamclonespec(bundlespec):
2496 2496 # Stream clone v1
2497 2497 if (bundlespec.wirecompression == 'UN' and bundlespec.wireversion == 's1'):
2498 2498 return True
2499 2499
2500 2500 # Stream clone v2
2501 2501 if (bundlespec.wirecompression == 'UN' and \
2502 2502 bundlespec.wireversion == '02' and \
2503 2503 bundlespec.contentopts.get('streamv2')):
2504 2504 return True
2505 2505
2506 2506 return False
2507 2507
2508 2508 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2509 2509 """Remove incompatible clone bundle manifest entries.
2510 2510
2511 2511 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2512 2512 and returns a new list consisting of only the entries that this client
2513 2513 should be able to apply.
2514 2514
2515 2515 There is no guarantee we'll be able to apply all returned entries because
2516 2516 the metadata we use to filter on may be missing or wrong.
2517 2517 """
2518 2518 newentries = []
2519 2519 for entry in entries:
2520 2520 spec = entry.get('BUNDLESPEC')
2521 2521 if spec:
2522 2522 try:
2523 2523 bundlespec = parsebundlespec(repo, spec, strict=True)
2524 2524
2525 2525 # If a stream clone was requested, filter out non-streamclone
2526 2526 # entries.
2527 2527 if streamclonerequested and not isstreamclonespec(bundlespec):
2528 2528 repo.ui.debug('filtering %s because not a stream clone\n' %
2529 2529 entry['URL'])
2530 2530 continue
2531 2531
2532 2532 except error.InvalidBundleSpecification as e:
2533 2533 repo.ui.debug(stringutil.forcebytestr(e) + '\n')
2534 2534 continue
2535 2535 except error.UnsupportedBundleSpecification as e:
2536 2536 repo.ui.debug('filtering %s because unsupported bundle '
2537 2537 'spec: %s\n' % (
2538 2538 entry['URL'], stringutil.forcebytestr(e)))
2539 2539 continue
2540 2540 # If we don't have a spec and requested a stream clone, we don't know
2541 2541 # what the entry is so don't attempt to apply it.
2542 2542 elif streamclonerequested:
2543 2543 repo.ui.debug('filtering %s because cannot determine if a stream '
2544 2544 'clone bundle\n' % entry['URL'])
2545 2545 continue
2546 2546
2547 2547 if 'REQUIRESNI' in entry and not sslutil.hassni:
2548 2548 repo.ui.debug('filtering %s because SNI not supported\n' %
2549 2549 entry['URL'])
2550 2550 continue
2551 2551
2552 2552 newentries.append(entry)
2553 2553
2554 2554 return newentries
2555 2555
2556 2556 class clonebundleentry(object):
2557 2557 """Represents an item in a clone bundles manifest.
2558 2558
2559 2559 This rich class is needed to support sorting since sorted() in Python 3
2560 2560 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2561 2561 won't work.
2562 2562 """
2563 2563
2564 2564 def __init__(self, value, prefers):
2565 2565 self.value = value
2566 2566 self.prefers = prefers
2567 2567
2568 2568 def _cmp(self, other):
2569 2569 for prefkey, prefvalue in self.prefers:
2570 2570 avalue = self.value.get(prefkey)
2571 2571 bvalue = other.value.get(prefkey)
2572 2572
2573 2573 # Special case for b missing attribute and a matches exactly.
2574 2574 if avalue is not None and bvalue is None and avalue == prefvalue:
2575 2575 return -1
2576 2576
2577 2577 # Special case for a missing attribute and b matches exactly.
2578 2578 if bvalue is not None and avalue is None and bvalue == prefvalue:
2579 2579 return 1
2580 2580
2581 2581 # We can't compare unless attribute present on both.
2582 2582 if avalue is None or bvalue is None:
2583 2583 continue
2584 2584
2585 2585 # Same values should fall back to next attribute.
2586 2586 if avalue == bvalue:
2587 2587 continue
2588 2588
2589 2589 # Exact matches come first.
2590 2590 if avalue == prefvalue:
2591 2591 return -1
2592 2592 if bvalue == prefvalue:
2593 2593 return 1
2594 2594
2595 2595 # Fall back to next attribute.
2596 2596 continue
2597 2597
2598 2598 # If we got here we couldn't sort by attributes and prefers. Fall
2599 2599 # back to index order.
2600 2600 return 0
2601 2601
2602 2602 def __lt__(self, other):
2603 2603 return self._cmp(other) < 0
2604 2604
2605 2605 def __gt__(self, other):
2606 2606 return self._cmp(other) > 0
2607 2607
2608 2608 def __eq__(self, other):
2609 2609 return self._cmp(other) == 0
2610 2610
2611 2611 def __le__(self, other):
2612 2612 return self._cmp(other) <= 0
2613 2613
2614 2614 def __ge__(self, other):
2615 2615 return self._cmp(other) >= 0
2616 2616
2617 2617 def __ne__(self, other):
2618 2618 return self._cmp(other) != 0
2619 2619
2620 2620 def sortclonebundleentries(ui, entries):
2621 2621 prefers = ui.configlist('ui', 'clonebundleprefers')
2622 2622 if not prefers:
2623 2623 return list(entries)
2624 2624
2625 2625 prefers = [p.split('=', 1) for p in prefers]
2626 2626
2627 2627 items = sorted(clonebundleentry(v, prefers) for v in entries)
2628 2628 return [i.value for i in items]
2629 2629
2630 2630 def trypullbundlefromurl(ui, repo, url):
2631 2631 """Attempt to apply a bundle from a URL."""
2632 2632 with repo.lock(), repo.transaction('bundleurl') as tr:
2633 2633 try:
2634 2634 fh = urlmod.open(ui, url)
2635 2635 cg = readbundle(ui, fh, 'stream')
2636 2636
2637 2637 if isinstance(cg, streamclone.streamcloneapplier):
2638 2638 cg.apply(repo)
2639 2639 else:
2640 2640 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2641 2641 return True
2642 2642 except urlerr.httperror as e:
2643 2643 ui.warn(_('HTTP error fetching bundle: %s\n') %
2644 2644 stringutil.forcebytestr(e))
2645 2645 except urlerr.urlerror as e:
2646 2646 ui.warn(_('error fetching bundle: %s\n') %
2647 2647 stringutil.forcebytestr(e.reason))
2648 2648
2649 2649 return False
@@ -1,169 +1,253
1 1 **Experimental and under active development**
2 2
3 3 This section documents the wire protocol commands exposed to transports
4 4 using the frame-based protocol. The set of commands exposed through
5 5 these transports is distinct from the set of commands exposed to legacy
6 6 transports.
7 7
8 8 The frame-based protocol uses CBOR to encode command execution requests.
9 9 All command arguments must be mapped to a specific or set of CBOR data
10 10 types.
11 11
12 12 The response to many commands is also CBOR. There is no common response
13 13 format: each command defines its own response format.
14 14
15 15 TODOs
16 16 =====
17 17
18 18 * Add "node namespace" support to each command. In order to support
19 19 SHA-1 hash transition, we want servers to be able to expose different
20 20 "node namespaces" for the same data. Every command operating on nodes
21 21 should specify which "node namespace" it is operating on and responses
22 22 should encode the "node namespace" accordingly.
23 23
24 24 Commands
25 25 ========
26 26
27 27 The sections below detail all commands available to wire protocol version
28 28 2.
29 29
30 30 branchmap
31 31 ---------
32 32
33 33 Obtain heads in named branches.
34 34
35 35 Receives no arguments.
36 36
37 37 The response is a map with bytestring keys defining the branch name.
38 38 Values are arrays of bytestring defining raw changeset nodes.
39 39
40 40 capabilities
41 41 ------------
42 42
43 43 Obtain the server's capabilities.
44 44
45 45 Receives no arguments.
46 46
47 47 This command is typically called only as part of the handshake during
48 48 initial connection establishment.
49 49
50 50 The response is a map with bytestring keys defining server information.
51 51
52 52 The defined keys are:
53 53
54 54 commands
55 55 A map defining available wire protocol commands on this server.
56 56
57 57 Keys in the map are the names of commands that can be invoked. Values
58 58 are maps defining information about that command. The bytestring keys
59 59 are:
60 60
61 61 args
62 62 A map of argument names and their expected types.
63 63
64 64 Types are defined as a representative value for the expected type.
65 65 e.g. an argument expecting a boolean type will have its value
66 66 set to true. An integer type will have its value set to 42. The
67 67 actual values are arbitrary and may not have meaning.
68 68 permissions
69 69 An array of permissions required to execute this command.
70 70
71 71 compression
72 72 An array of maps defining available compression format support.
73 73
74 74 The array is sorted from most preferred to least preferred.
75 75
76 76 Each entry has the following bytestring keys:
77 77
78 78 name
79 79 Name of the compression engine. e.g. ``zstd`` or ``zlib``.
80 80
81 81 framingmediatypes
82 82 An array of bytestrings defining the supported framing protocol
83 83 media types. Servers will not accept media types not in this list.
84 84
85 85 rawrepoformats
86 86 An array of storage formats the repository is using. This set of
87 87 requirements can be used to determine whether a client can read a
88 88 *raw* copy of file data available.
89 89
90 changesetdata
91 -------------
92
93 Obtain various data related to changesets.
94
95 The command accepts the following arguments:
96
97 noderange
98 (array of arrays of bytestrings) An array of 2 elements, each being an
99 array of node bytestrings. The first array denotes the changelog revisions
100 that are already known to the client. The second array denotes the changelog
101 revision DAG heads to fetch. The argument essentially defines a DAG range
102 bounded by root and head nodes to fetch.
103
104 The roots array may be empty. The heads array must be defined.
105
106 nodes
107 (array of bytestrings) Changelog revisions to request explicitly.
108
109 fields
110 (set of bytestring) Which data associated with changelog revisions to
111 fetch. The following values are recognized:
112
113 parents
114 Parent revisions.
115
116 revision
117 The raw, revision data for the changelog entry. The hash of this data
118 will match the revision's node value.
119
120 The server resolves the set of revisions relevant to the request by taking
121 the union of the ``noderange`` and ``nodes`` arguments. At least one of these
122 arguments must be defined.
123
124 The response bytestream starts with a CBOR map describing the data that follows.
125 This map has the following bytestring keys:
126
127 totalitems
128 (unsigned integer) Total number of changelog revisions whose data is being
129 transferred.
130
131 Following the map header is a series of 0 or more CBOR values. If values
132 are present, the first value will always be a map describing a single changeset
133 revision. If revision data is requested, the raw revision data (encoded as
134 a CBOR bytestring) will follow the map describing it. Otherwise, another CBOR
135 map describing the next changeset revision will occur.
136
137 Each map has the following bytestring keys:
138
139 node
140 (bytestring) The node value for this revision. This is the SHA-1 hash of
141 the raw revision data.
142
143 parents (optional)
144 (array of bytestrings) The nodes representing the parent revisions of this
145 revision. Only present if ``parents`` data is being requested.
146
147 revisionsize (optional)
148 (unsigned integer) Indicates the size of raw revision data that follows this
149 map. The following data contains a serialized form of the changeset data,
150 including the author, date, commit message, set of changed files, manifest
151 node, and other metadata.
152
153 Only present if ``revision`` data was requested and the data follows this
154 map.
155
156 If nodes are requested via ``noderange``, they will be emitted in DAG order,
157 parents always before children.
158
159 If nodes are requested via ``nodes``, they will be emitted in requested order.
160
161 Nodes from ``nodes`` are emitted before nodes from ``noderange``.
162
163 TODO support different revision selection mechanisms (e.g. non-public, specific
164 revisions)
165 TODO support different hash "namespaces" for revisions (e.g. sha-1 versus other)
166 TODO support emitting phases data
167 TODO support emitting bookmarks data
168 TODO support emitting obsolescence data
169 TODO support filtering based on relevant paths (narrow clone)
170 TODO support depth limiting
171 TODO support hgtagsfnodes cache / tags data
172 TODO support branch heads cache
173
90 174 heads
91 175 -----
92 176
93 177 Obtain DAG heads in the repository.
94 178
95 179 The command accepts the following arguments:
96 180
97 181 publiconly (optional)
98 182 (boolean) If set, operate on the DAG for public phase changesets only.
99 183 Non-public (i.e. draft) phase DAG heads will not be returned.
100 184
101 185 The response is a CBOR array of bytestrings defining changeset nodes
102 186 of DAG heads. The array can be empty if the repository is empty or no
103 187 changesets satisfied the request.
104 188
105 189 TODO consider exposing phase of heads in response
106 190
107 191 known
108 192 -----
109 193
110 194 Determine whether a series of changeset nodes is known to the server.
111 195
112 196 The command accepts the following arguments:
113 197
114 198 nodes
115 199 (array of bytestrings) List of changeset nodes whose presence to
116 200 query.
117 201
118 202 The response is a bytestring where each byte contains a 0 or 1 for the
119 203 corresponding requested node at the same index.
120 204
121 205 TODO use a bit array for even more compact response
122 206
123 207 listkeys
124 208 --------
125 209
126 210 List values in a specified ``pushkey`` namespace.
127 211
128 212 The command receives the following arguments:
129 213
130 214 namespace
131 215 (bytestring) Pushkey namespace to query.
132 216
133 217 The response is a map with bytestring keys and values.
134 218
135 219 TODO consider using binary to represent nodes in certain pushkey namespaces.
136 220
137 221 lookup
138 222 ------
139 223
140 224 Try to resolve a value to a changeset revision.
141 225
142 226 Unlike ``known`` which operates on changeset nodes, lookup operates on
143 227 node fragments and other names that a user may use.
144 228
145 229 The command receives the following arguments:
146 230
147 231 key
148 232 (bytestring) Value to try to resolve.
149 233
150 234 On success, returns a bytestring containing the resolved node.
151 235
152 236 pushkey
153 237 -------
154 238
155 239 Set a value using the ``pushkey`` protocol.
156 240
157 241 The command receives the following arguments:
158 242
159 243 namespace
160 244 (bytestring) Pushkey namespace to operate on.
161 245 key
162 246 (bytestring) The pushkey key to set.
163 247 old
164 248 (bytestring) Old value for this key.
165 249 new
166 250 (bytestring) New value for this key.
167 251
168 252 TODO consider using binary to represent nodes is certain pushkey namespaces.
169 253 TODO better define response type and meaning.
@@ -1,1007 +1,1006
1 1 # httppeer.py - HTTP repository proxy classes for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import io
13 13 import os
14 14 import socket
15 15 import struct
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from . import (
20 20 bundle2,
21 21 error,
22 22 httpconnection,
23 23 pycompat,
24 24 repository,
25 25 statichttprepo,
26 26 url as urlmod,
27 27 util,
28 28 wireprotoframing,
29 29 wireprototypes,
30 30 wireprotov1peer,
31 31 wireprotov2peer,
32 32 wireprotov2server,
33 33 )
34 34 from .utils import (
35 35 cborutil,
36 36 interfaceutil,
37 37 stringutil,
38 38 )
39 39
40 40 httplib = util.httplib
41 41 urlerr = util.urlerr
42 42 urlreq = util.urlreq
43 43
44 44 def encodevalueinheaders(value, header, limit):
45 45 """Encode a string value into multiple HTTP headers.
46 46
47 47 ``value`` will be encoded into 1 or more HTTP headers with the names
48 48 ``header-<N>`` where ``<N>`` is an integer starting at 1. Each header
49 49 name + value will be at most ``limit`` bytes long.
50 50
51 51 Returns an iterable of 2-tuples consisting of header names and
52 52 values as native strings.
53 53 """
54 54 # HTTP Headers are ASCII. Python 3 requires them to be unicodes,
55 55 # not bytes. This function always takes bytes in as arguments.
56 56 fmt = pycompat.strurl(header) + r'-%s'
57 57 # Note: it is *NOT* a bug that the last bit here is a bytestring
58 58 # and not a unicode: we're just getting the encoded length anyway,
59 59 # and using an r-string to make it portable between Python 2 and 3
60 60 # doesn't work because then the \r is a literal backslash-r
61 61 # instead of a carriage return.
62 62 valuelen = limit - len(fmt % r'000') - len(': \r\n')
63 63 result = []
64 64
65 65 n = 0
66 66 for i in pycompat.xrange(0, len(value), valuelen):
67 67 n += 1
68 68 result.append((fmt % str(n), pycompat.strurl(value[i:i + valuelen])))
69 69
70 70 return result
71 71
72 72 def _wraphttpresponse(resp):
73 73 """Wrap an HTTPResponse with common error handlers.
74 74
75 75 This ensures that any I/O from any consumer raises the appropriate
76 76 error and messaging.
77 77 """
78 78 origread = resp.read
79 79
80 80 class readerproxy(resp.__class__):
81 81 def read(self, size=None):
82 82 try:
83 83 return origread(size)
84 84 except httplib.IncompleteRead as e:
85 85 # e.expected is an integer if length known or None otherwise.
86 86 if e.expected:
87 87 got = len(e.partial)
88 88 total = e.expected + got
89 89 msg = _('HTTP request error (incomplete response; '
90 90 'expected %d bytes got %d)') % (total, got)
91 91 else:
92 92 msg = _('HTTP request error (incomplete response)')
93 93
94 94 raise error.PeerTransportError(
95 95 msg,
96 96 hint=_('this may be an intermittent network failure; '
97 97 'if the error persists, consider contacting the '
98 98 'network or server operator'))
99 99 except httplib.HTTPException as e:
100 100 raise error.PeerTransportError(
101 101 _('HTTP request error (%s)') % e,
102 102 hint=_('this may be an intermittent network failure; '
103 103 'if the error persists, consider contacting the '
104 104 'network or server operator'))
105 105
106 106 resp.__class__ = readerproxy
107 107
108 108 class _multifile(object):
109 109 def __init__(self, *fileobjs):
110 110 for f in fileobjs:
111 111 if not util.safehasattr(f, 'length'):
112 112 raise ValueError(
113 113 '_multifile only supports file objects that '
114 114 'have a length but this one does not:', type(f), f)
115 115 self._fileobjs = fileobjs
116 116 self._index = 0
117 117
118 118 @property
119 119 def length(self):
120 120 return sum(f.length for f in self._fileobjs)
121 121
122 122 def read(self, amt=None):
123 123 if amt <= 0:
124 124 return ''.join(f.read() for f in self._fileobjs)
125 125 parts = []
126 126 while amt and self._index < len(self._fileobjs):
127 127 parts.append(self._fileobjs[self._index].read(amt))
128 128 got = len(parts[-1])
129 129 if got < amt:
130 130 self._index += 1
131 131 amt -= got
132 132 return ''.join(parts)
133 133
134 134 def seek(self, offset, whence=os.SEEK_SET):
135 135 if whence != os.SEEK_SET:
136 136 raise NotImplementedError(
137 137 '_multifile does not support anything other'
138 138 ' than os.SEEK_SET for whence on seek()')
139 139 if offset != 0:
140 140 raise NotImplementedError(
141 141 '_multifile only supports seeking to start, but that '
142 142 'could be fixed if you need it')
143 143 for f in self._fileobjs:
144 144 f.seek(0)
145 145 self._index = 0
146 146
147 147 def makev1commandrequest(ui, requestbuilder, caps, capablefn,
148 148 repobaseurl, cmd, args):
149 149 """Make an HTTP request to run a command for a version 1 client.
150 150
151 151 ``caps`` is a set of known server capabilities. The value may be
152 152 None if capabilities are not yet known.
153 153
154 154 ``capablefn`` is a function to evaluate a capability.
155 155
156 156 ``cmd``, ``args``, and ``data`` define the command, its arguments, and
157 157 raw data to pass to it.
158 158 """
159 159 if cmd == 'pushkey':
160 160 args['data'] = ''
161 161 data = args.pop('data', None)
162 162 headers = args.pop('headers', {})
163 163
164 164 ui.debug("sending %s command\n" % cmd)
165 165 q = [('cmd', cmd)]
166 166 headersize = 0
167 167 # Important: don't use self.capable() here or else you end up
168 168 # with infinite recursion when trying to look up capabilities
169 169 # for the first time.
170 170 postargsok = caps is not None and 'httppostargs' in caps
171 171
172 172 # Send arguments via POST.
173 173 if postargsok and args:
174 174 strargs = urlreq.urlencode(sorted(args.items()))
175 175 if not data:
176 176 data = strargs
177 177 else:
178 178 if isinstance(data, bytes):
179 179 i = io.BytesIO(data)
180 180 i.length = len(data)
181 181 data = i
182 182 argsio = io.BytesIO(strargs)
183 183 argsio.length = len(strargs)
184 184 data = _multifile(argsio, data)
185 185 headers[r'X-HgArgs-Post'] = len(strargs)
186 186 elif args:
187 187 # Calling self.capable() can infinite loop if we are calling
188 188 # "capabilities". But that command should never accept wire
189 189 # protocol arguments. So this should never happen.
190 190 assert cmd != 'capabilities'
191 191 httpheader = capablefn('httpheader')
192 192 if httpheader:
193 193 headersize = int(httpheader.split(',', 1)[0])
194 194
195 195 # Send arguments via HTTP headers.
196 196 if headersize > 0:
197 197 # The headers can typically carry more data than the URL.
198 198 encargs = urlreq.urlencode(sorted(args.items()))
199 199 for header, value in encodevalueinheaders(encargs, 'X-HgArg',
200 200 headersize):
201 201 headers[header] = value
202 202 # Send arguments via query string (Mercurial <1.9).
203 203 else:
204 204 q += sorted(args.items())
205 205
206 206 qs = '?%s' % urlreq.urlencode(q)
207 207 cu = "%s%s" % (repobaseurl, qs)
208 208 size = 0
209 209 if util.safehasattr(data, 'length'):
210 210 size = data.length
211 211 elif data is not None:
212 212 size = len(data)
213 213 if data is not None and r'Content-Type' not in headers:
214 214 headers[r'Content-Type'] = r'application/mercurial-0.1'
215 215
216 216 # Tell the server we accept application/mercurial-0.2 and multiple
217 217 # compression formats if the server is capable of emitting those
218 218 # payloads.
219 219 # Note: Keep this set empty by default, as client advertisement of
220 220 # protocol parameters should only occur after the handshake.
221 221 protoparams = set()
222 222
223 223 mediatypes = set()
224 224 if caps is not None:
225 225 mt = capablefn('httpmediatype')
226 226 if mt:
227 227 protoparams.add('0.1')
228 228 mediatypes = set(mt.split(','))
229 229
230 230 protoparams.add('partial-pull')
231 231
232 232 if '0.2tx' in mediatypes:
233 233 protoparams.add('0.2')
234 234
235 235 if '0.2tx' in mediatypes and capablefn('compression'):
236 236 # We /could/ compare supported compression formats and prune
237 237 # non-mutually supported or error if nothing is mutually supported.
238 238 # For now, send the full list to the server and have it error.
239 239 comps = [e.wireprotosupport().name for e in
240 240 util.compengines.supportedwireengines(util.CLIENTROLE)]
241 241 protoparams.add('comp=%s' % ','.join(comps))
242 242
243 243 if protoparams:
244 244 protoheaders = encodevalueinheaders(' '.join(sorted(protoparams)),
245 245 'X-HgProto',
246 246 headersize or 1024)
247 247 for header, value in protoheaders:
248 248 headers[header] = value
249 249
250 250 varyheaders = []
251 251 for header in headers:
252 252 if header.lower().startswith(r'x-hg'):
253 253 varyheaders.append(header)
254 254
255 255 if varyheaders:
256 256 headers[r'Vary'] = r','.join(sorted(varyheaders))
257 257
258 258 req = requestbuilder(pycompat.strurl(cu), data, headers)
259 259
260 260 if data is not None:
261 261 ui.debug("sending %d bytes\n" % size)
262 262 req.add_unredirected_header(r'Content-Length', r'%d' % size)
263 263
264 264 return req, cu, qs
265 265
266 266 def _reqdata(req):
267 267 """Get request data, if any. If no data, returns None."""
268 268 if pycompat.ispy3:
269 269 return req.data
270 270 if not req.has_data():
271 271 return None
272 272 return req.get_data()
273 273
274 274 def sendrequest(ui, opener, req):
275 275 """Send a prepared HTTP request.
276 276
277 277 Returns the response object.
278 278 """
279 279 dbg = ui.debug
280 280 if (ui.debugflag
281 281 and ui.configbool('devel', 'debug.peer-request')):
282 282 line = 'devel-peer-request: %s\n'
283 283 dbg(line % '%s %s' % (pycompat.bytesurl(req.get_method()),
284 284 pycompat.bytesurl(req.get_full_url())))
285 285 hgargssize = None
286 286
287 287 for header, value in sorted(req.header_items()):
288 288 header = pycompat.bytesurl(header)
289 289 value = pycompat.bytesurl(value)
290 290 if header.startswith('X-hgarg-'):
291 291 if hgargssize is None:
292 292 hgargssize = 0
293 293 hgargssize += len(value)
294 294 else:
295 295 dbg(line % ' %s %s' % (header, value))
296 296
297 297 if hgargssize is not None:
298 298 dbg(line % ' %d bytes of commands arguments in headers'
299 299 % hgargssize)
300 300 data = _reqdata(req)
301 301 if data is not None:
302 302 length = getattr(data, 'length', None)
303 303 if length is None:
304 304 length = len(data)
305 305 dbg(line % ' %d bytes of data' % length)
306 306
307 307 start = util.timer()
308 308
309 309 res = None
310 310 try:
311 311 res = opener.open(req)
312 312 except urlerr.httperror as inst:
313 313 if inst.code == 401:
314 314 raise error.Abort(_('authorization failed'))
315 315 raise
316 316 except httplib.HTTPException as inst:
317 317 ui.debug('http error requesting %s\n' %
318 318 util.hidepassword(req.get_full_url()))
319 319 ui.traceback()
320 320 raise IOError(None, inst)
321 321 finally:
322 322 if ui.debugflag and ui.configbool('devel', 'debug.peer-request'):
323 323 code = res.code if res else -1
324 324 dbg(line % ' finished in %.4f seconds (%d)'
325 325 % (util.timer() - start, code))
326 326
327 327 # Insert error handlers for common I/O failures.
328 328 _wraphttpresponse(res)
329 329
330 330 return res
331 331
332 332 class RedirectedRepoError(error.RepoError):
333 333 def __init__(self, msg, respurl):
334 334 super(RedirectedRepoError, self).__init__(msg)
335 335 self.respurl = respurl
336 336
337 337 def parsev1commandresponse(ui, baseurl, requrl, qs, resp, compressible,
338 338 allowcbor=False):
339 339 # record the url we got redirected to
340 340 redirected = False
341 341 respurl = pycompat.bytesurl(resp.geturl())
342 342 if respurl.endswith(qs):
343 343 respurl = respurl[:-len(qs)]
344 344 qsdropped = False
345 345 else:
346 346 qsdropped = True
347 347
348 348 if baseurl.rstrip('/') != respurl.rstrip('/'):
349 349 redirected = True
350 350 if not ui.quiet:
351 351 ui.warn(_('real URL is %s\n') % respurl)
352 352
353 353 try:
354 354 proto = pycompat.bytesurl(resp.getheader(r'content-type', r''))
355 355 except AttributeError:
356 356 proto = pycompat.bytesurl(resp.headers.get(r'content-type', r''))
357 357
358 358 safeurl = util.hidepassword(baseurl)
359 359 if proto.startswith('application/hg-error'):
360 360 raise error.OutOfBandError(resp.read())
361 361
362 362 # Pre 1.0 versions of Mercurial used text/plain and
363 363 # application/hg-changegroup. We don't support such old servers.
364 364 if not proto.startswith('application/mercurial-'):
365 365 ui.debug("requested URL: '%s'\n" % util.hidepassword(requrl))
366 366 msg = _("'%s' does not appear to be an hg repository:\n"
367 367 "---%%<--- (%s)\n%s\n---%%<---\n") % (
368 368 safeurl, proto or 'no content-type', resp.read(1024))
369 369
370 370 # Some servers may strip the query string from the redirect. We
371 371 # raise a special error type so callers can react to this specially.
372 372 if redirected and qsdropped:
373 373 raise RedirectedRepoError(msg, respurl)
374 374 else:
375 375 raise error.RepoError(msg)
376 376
377 377 try:
378 378 subtype = proto.split('-', 1)[1]
379 379
380 380 # Unless we end up supporting CBOR in the legacy wire protocol,
381 381 # this should ONLY be encountered for the initial capabilities
382 382 # request during handshake.
383 383 if subtype == 'cbor':
384 384 if allowcbor:
385 385 return respurl, proto, resp
386 386 else:
387 387 raise error.RepoError(_('unexpected CBOR response from '
388 388 'server'))
389 389
390 390 version_info = tuple([int(n) for n in subtype.split('.')])
391 391 except ValueError:
392 392 raise error.RepoError(_("'%s' sent a broken Content-Type "
393 393 "header (%s)") % (safeurl, proto))
394 394
395 395 # TODO consider switching to a decompression reader that uses
396 396 # generators.
397 397 if version_info == (0, 1):
398 398 if compressible:
399 399 resp = util.compengines['zlib'].decompressorreader(resp)
400 400
401 401 elif version_info == (0, 2):
402 402 # application/mercurial-0.2 always identifies the compression
403 403 # engine in the payload header.
404 404 elen = struct.unpack('B', util.readexactly(resp, 1))[0]
405 405 ename = util.readexactly(resp, elen)
406 406 engine = util.compengines.forwiretype(ename)
407 407
408 408 resp = engine.decompressorreader(resp)
409 409 else:
410 410 raise error.RepoError(_("'%s' uses newer protocol %s") %
411 411 (safeurl, subtype))
412 412
413 413 return respurl, proto, resp
414 414
415 415 class httppeer(wireprotov1peer.wirepeer):
416 416 def __init__(self, ui, path, url, opener, requestbuilder, caps):
417 417 self.ui = ui
418 418 self._path = path
419 419 self._url = url
420 420 self._caps = caps
421 421 self._urlopener = opener
422 422 self._requestbuilder = requestbuilder
423 423
424 424 def __del__(self):
425 425 for h in self._urlopener.handlers:
426 426 h.close()
427 427 getattr(h, "close_all", lambda: None)()
428 428
429 429 # Begin of ipeerconnection interface.
430 430
431 431 def url(self):
432 432 return self._path
433 433
434 434 def local(self):
435 435 return None
436 436
437 437 def peer(self):
438 438 return self
439 439
440 440 def canpush(self):
441 441 return True
442 442
443 443 def close(self):
444 444 pass
445 445
446 446 # End of ipeerconnection interface.
447 447
448 448 # Begin of ipeercommands interface.
449 449
450 450 def capabilities(self):
451 451 return self._caps
452 452
453 453 # End of ipeercommands interface.
454 454
455 455 def _callstream(self, cmd, _compressible=False, **args):
456 456 args = pycompat.byteskwargs(args)
457 457
458 458 req, cu, qs = makev1commandrequest(self.ui, self._requestbuilder,
459 459 self._caps, self.capable,
460 460 self._url, cmd, args)
461 461
462 462 resp = sendrequest(self.ui, self._urlopener, req)
463 463
464 464 self._url, ct, resp = parsev1commandresponse(self.ui, self._url, cu, qs,
465 465 resp, _compressible)
466 466
467 467 return resp
468 468
469 469 def _call(self, cmd, **args):
470 470 fp = self._callstream(cmd, **args)
471 471 try:
472 472 return fp.read()
473 473 finally:
474 474 # if using keepalive, allow connection to be reused
475 475 fp.close()
476 476
477 477 def _callpush(self, cmd, cg, **args):
478 478 # have to stream bundle to a temp file because we do not have
479 479 # http 1.1 chunked transfer.
480 480
481 481 types = self.capable('unbundle')
482 482 try:
483 483 types = types.split(',')
484 484 except AttributeError:
485 485 # servers older than d1b16a746db6 will send 'unbundle' as a
486 486 # boolean capability. They only support headerless/uncompressed
487 487 # bundles.
488 488 types = [""]
489 489 for x in types:
490 490 if x in bundle2.bundletypes:
491 491 type = x
492 492 break
493 493
494 494 tempname = bundle2.writebundle(self.ui, cg, None, type)
495 495 fp = httpconnection.httpsendfile(self.ui, tempname, "rb")
496 496 headers = {r'Content-Type': r'application/mercurial-0.1'}
497 497
498 498 try:
499 499 r = self._call(cmd, data=fp, headers=headers, **args)
500 500 vals = r.split('\n', 1)
501 501 if len(vals) < 2:
502 502 raise error.ResponseError(_("unexpected response:"), r)
503 503 return vals
504 504 except urlerr.httperror:
505 505 # Catch and re-raise these so we don't try and treat them
506 506 # like generic socket errors. They lack any values in
507 507 # .args on Python 3 which breaks our socket.error block.
508 508 raise
509 509 except socket.error as err:
510 510 if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
511 511 raise error.Abort(_('push failed: %s') % err.args[1])
512 512 raise error.Abort(err.args[1])
513 513 finally:
514 514 fp.close()
515 515 os.unlink(tempname)
516 516
517 517 def _calltwowaystream(self, cmd, fp, **args):
518 518 fh = None
519 519 fp_ = None
520 520 filename = None
521 521 try:
522 522 # dump bundle to disk
523 523 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
524 524 fh = os.fdopen(fd, r"wb")
525 525 d = fp.read(4096)
526 526 while d:
527 527 fh.write(d)
528 528 d = fp.read(4096)
529 529 fh.close()
530 530 # start http push
531 531 fp_ = httpconnection.httpsendfile(self.ui, filename, "rb")
532 532 headers = {r'Content-Type': r'application/mercurial-0.1'}
533 533 return self._callstream(cmd, data=fp_, headers=headers, **args)
534 534 finally:
535 535 if fp_ is not None:
536 536 fp_.close()
537 537 if fh is not None:
538 538 fh.close()
539 539 os.unlink(filename)
540 540
541 541 def _callcompressable(self, cmd, **args):
542 542 return self._callstream(cmd, _compressible=True, **args)
543 543
544 544 def _abort(self, exception):
545 545 raise exception
546 546
547 547 def sendv2request(ui, opener, requestbuilder, apiurl, permission, requests):
548 548 reactor = wireprotoframing.clientreactor(hasmultiplesend=False,
549 549 buffersends=True)
550 550
551 551 handler = wireprotov2peer.clienthandler(ui, reactor)
552 552
553 553 url = '%s/%s' % (apiurl, permission)
554 554
555 555 if len(requests) > 1:
556 556 url += '/multirequest'
557 557 else:
558 558 url += '/%s' % requests[0][0]
559 559
560 560 ui.debug('sending %d commands\n' % len(requests))
561 561 for command, args, f in requests:
562 562 ui.debug('sending command %s: %s\n' % (
563 563 command, stringutil.pprint(args, indent=2)))
564 564 assert not list(handler.callcommand(command, args, f))
565 565
566 566 # TODO stream this.
567 567 body = b''.join(map(bytes, handler.flushcommands()))
568 568
569 569 # TODO modify user-agent to reflect v2
570 570 headers = {
571 571 r'Accept': wireprotov2server.FRAMINGTYPE,
572 572 r'Content-Type': wireprotov2server.FRAMINGTYPE,
573 573 }
574 574
575 575 req = requestbuilder(pycompat.strurl(url), body, headers)
576 576 req.add_unredirected_header(r'Content-Length', r'%d' % len(body))
577 577
578 578 try:
579 579 res = opener.open(req)
580 580 except urlerr.httperror as e:
581 581 if e.code == 401:
582 582 raise error.Abort(_('authorization failed'))
583 583
584 584 raise
585 585 except httplib.HTTPException as e:
586 586 ui.traceback()
587 587 raise IOError(None, e)
588 588
589 589 return handler, res
590 590
591 591 class queuedcommandfuture(pycompat.futures.Future):
592 592 """Wraps result() on command futures to trigger submission on call."""
593 593
594 594 def result(self, timeout=None):
595 595 if self.done():
596 596 return pycompat.futures.Future.result(self, timeout)
597 597
598 598 self._peerexecutor.sendcommands()
599 599
600 600 # sendcommands() will restore the original __class__ and self.result
601 601 # will resolve to Future.result.
602 602 return self.result(timeout)
603 603
604 604 @interfaceutil.implementer(repository.ipeercommandexecutor)
605 605 class httpv2executor(object):
606 606 def __init__(self, ui, opener, requestbuilder, apiurl, descriptor):
607 607 self._ui = ui
608 608 self._opener = opener
609 609 self._requestbuilder = requestbuilder
610 610 self._apiurl = apiurl
611 611 self._descriptor = descriptor
612 612 self._sent = False
613 613 self._closed = False
614 614 self._neededpermissions = set()
615 615 self._calls = []
616 616 self._futures = weakref.WeakSet()
617 617 self._responseexecutor = None
618 618 self._responsef = None
619 619
620 620 def __enter__(self):
621 621 return self
622 622
623 623 def __exit__(self, exctype, excvalue, exctb):
624 624 self.close()
625 625
626 626 def callcommand(self, command, args):
627 627 if self._sent:
628 628 raise error.ProgrammingError('callcommand() cannot be used after '
629 629 'commands are sent')
630 630
631 631 if self._closed:
632 632 raise error.ProgrammingError('callcommand() cannot be used after '
633 633 'close()')
634 634
635 635 # The service advertises which commands are available. So if we attempt
636 636 # to call an unknown command or pass an unknown argument, we can screen
637 637 # for this.
638 638 if command not in self._descriptor['commands']:
639 639 raise error.ProgrammingError(
640 640 'wire protocol command %s is not available' % command)
641 641
642 642 cmdinfo = self._descriptor['commands'][command]
643 643 unknownargs = set(args.keys()) - set(cmdinfo.get('args', {}))
644 644
645 645 if unknownargs:
646 646 raise error.ProgrammingError(
647 647 'wire protocol command %s does not accept argument: %s' % (
648 648 command, ', '.join(sorted(unknownargs))))
649 649
650 650 self._neededpermissions |= set(cmdinfo['permissions'])
651 651
652 652 # TODO we /could/ also validate types here, since the API descriptor
653 653 # includes types...
654 654
655 655 f = pycompat.futures.Future()
656 656
657 657 # Monkeypatch it so result() triggers sendcommands(), otherwise result()
658 658 # could deadlock.
659 659 f.__class__ = queuedcommandfuture
660 660 f._peerexecutor = self
661 661
662 662 self._futures.add(f)
663 663 self._calls.append((command, args, f))
664 664
665 665 return f
666 666
667 667 def sendcommands(self):
668 668 if self._sent:
669 669 return
670 670
671 671 if not self._calls:
672 672 return
673 673
674 674 self._sent = True
675 675
676 676 # Unhack any future types so caller sees a clean type and so we
677 677 # break reference cycle.
678 678 for f in self._futures:
679 679 if isinstance(f, queuedcommandfuture):
680 680 f.__class__ = pycompat.futures.Future
681 681 f._peerexecutor = None
682 682
683 683 # Mark the future as running and filter out cancelled futures.
684 684 calls = [(command, args, f)
685 685 for command, args, f in self._calls
686 686 if f.set_running_or_notify_cancel()]
687 687
688 688 # Clear out references, prevent improper object usage.
689 689 self._calls = None
690 690
691 691 if not calls:
692 692 return
693 693
694 694 permissions = set(self._neededpermissions)
695 695
696 696 if 'push' in permissions and 'pull' in permissions:
697 697 permissions.remove('pull')
698 698
699 699 if len(permissions) > 1:
700 700 raise error.RepoError(_('cannot make request requiring multiple '
701 701 'permissions: %s') %
702 702 _(', ').join(sorted(permissions)))
703 703
704 704 permission = {
705 705 'push': 'rw',
706 706 'pull': 'ro',
707 707 }[permissions.pop()]
708 708
709 709 handler, resp = sendv2request(
710 710 self._ui, self._opener, self._requestbuilder, self._apiurl,
711 711 permission, calls)
712 712
713 713 # TODO we probably want to validate the HTTP code, media type, etc.
714 714
715 715 self._responseexecutor = pycompat.futures.ThreadPoolExecutor(1)
716 716 self._responsef = self._responseexecutor.submit(self._handleresponse,
717 717 handler, resp)
718 718
719 719 def close(self):
720 720 if self._closed:
721 721 return
722 722
723 723 self.sendcommands()
724 724
725 725 self._closed = True
726 726
727 727 if not self._responsef:
728 728 return
729 729
730 730 # TODO ^C here may not result in immediate program termination.
731 731
732 732 try:
733 733 self._responsef.result()
734 734 finally:
735 735 self._responseexecutor.shutdown(wait=True)
736 736 self._responsef = None
737 737 self._responseexecutor = None
738 738
739 739 # If any of our futures are still in progress, mark them as
740 740 # errored, otherwise a result() could wait indefinitely.
741 741 for f in self._futures:
742 742 if not f.done():
743 743 f.set_exception(error.ResponseError(
744 744 _('unfulfilled command response')))
745 745
746 746 self._futures = None
747 747
748 748 def _handleresponse(self, handler, resp):
749 749 # Called in a thread to read the response.
750 750
751 751 while handler.readframe(resp):
752 752 pass
753 753
754 754 # TODO implement interface for version 2 peers
755 755 @interfaceutil.implementer(repository.ipeerconnection,
756 756 repository.ipeercapabilities,
757 757 repository.ipeerrequests)
758 758 class httpv2peer(object):
759 759 def __init__(self, ui, repourl, apipath, opener, requestbuilder,
760 760 apidescriptor):
761 761 self.ui = ui
762 762
763 763 if repourl.endswith('/'):
764 764 repourl = repourl[:-1]
765 765
766 766 self._url = repourl
767 767 self._apipath = apipath
768 768 self._apiurl = '%s/%s' % (repourl, apipath)
769 769 self._opener = opener
770 770 self._requestbuilder = requestbuilder
771 771 self._descriptor = apidescriptor
772 772
773 773 # Start of ipeerconnection.
774 774
775 775 def url(self):
776 776 return self._url
777 777
778 778 def local(self):
779 779 return None
780 780
781 781 def peer(self):
782 782 return self
783 783
784 784 def canpush(self):
785 785 # TODO change once implemented.
786 786 return False
787 787
788 788 def close(self):
789 789 pass
790 790
791 791 # End of ipeerconnection.
792 792
793 793 # Start of ipeercapabilities.
794 794
795 795 def capable(self, name):
796 796 # The capabilities used internally historically map to capabilities
797 797 # advertised from the "capabilities" wire protocol command. However,
798 798 # version 2 of that command works differently.
799 799
800 800 # Maps to commands that are available.
801 801 if name in ('branchmap', 'getbundle', 'known', 'lookup', 'pushkey'):
802 802 return True
803 803
804 804 # Other concepts.
805 # TODO remove exchangev2 once we have a command implemented.
806 if name in ('bundle2', 'exchangev2'):
805 if name in ('bundle2'):
807 806 return True
808 807
809 808 # Alias command-* to presence of command of that name.
810 809 if name.startswith('command-'):
811 810 return name[len('command-'):] in self._descriptor['commands']
812 811
813 812 return False
814 813
815 814 def requirecap(self, name, purpose):
816 815 if self.capable(name):
817 816 return
818 817
819 818 raise error.CapabilityError(
820 819 _('cannot %s; client or remote repository does not support the %r '
821 820 'capability') % (purpose, name))
822 821
823 822 # End of ipeercapabilities.
824 823
825 824 def _call(self, name, **args):
826 825 with self.commandexecutor() as e:
827 826 return e.callcommand(name, args).result()
828 827
829 828 def commandexecutor(self):
830 829 return httpv2executor(self.ui, self._opener, self._requestbuilder,
831 830 self._apiurl, self._descriptor)
832 831
833 832 # Registry of API service names to metadata about peers that handle it.
834 833 #
835 834 # The following keys are meaningful:
836 835 #
837 836 # init
838 837 # Callable receiving (ui, repourl, servicepath, opener, requestbuilder,
839 838 # apidescriptor) to create a peer.
840 839 #
841 840 # priority
842 841 # Integer priority for the service. If we could choose from multiple
843 842 # services, we choose the one with the highest priority.
844 843 API_PEERS = {
845 844 wireprototypes.HTTP_WIREPROTO_V2: {
846 845 'init': httpv2peer,
847 846 'priority': 50,
848 847 },
849 848 }
850 849
851 850 def performhandshake(ui, url, opener, requestbuilder):
852 851 # The handshake is a request to the capabilities command.
853 852
854 853 caps = None
855 854 def capable(x):
856 855 raise error.ProgrammingError('should not be called')
857 856
858 857 args = {}
859 858
860 859 # The client advertises support for newer protocols by adding an
861 860 # X-HgUpgrade-* header with a list of supported APIs and an
862 861 # X-HgProto-* header advertising which serializing formats it supports.
863 862 # We only support the HTTP version 2 transport and CBOR responses for
864 863 # now.
865 864 advertisev2 = ui.configbool('experimental', 'httppeer.advertise-v2')
866 865
867 866 if advertisev2:
868 867 args['headers'] = {
869 868 r'X-HgProto-1': r'cbor',
870 869 }
871 870
872 871 args['headers'].update(
873 872 encodevalueinheaders(' '.join(sorted(API_PEERS)),
874 873 'X-HgUpgrade',
875 874 # We don't know the header limit this early.
876 875 # So make it small.
877 876 1024))
878 877
879 878 req, requrl, qs = makev1commandrequest(ui, requestbuilder, caps,
880 879 capable, url, 'capabilities',
881 880 args)
882 881 resp = sendrequest(ui, opener, req)
883 882
884 883 # The server may redirect us to the repo root, stripping the
885 884 # ?cmd=capabilities query string from the URL. The server would likely
886 885 # return HTML in this case and ``parsev1commandresponse()`` would raise.
887 886 # We catch this special case and re-issue the capabilities request against
888 887 # the new URL.
889 888 #
890 889 # We should ideally not do this, as a redirect that drops the query
891 890 # string from the URL is arguably a server bug. (Garbage in, garbage out).
892 891 # However, Mercurial clients for several years appeared to handle this
893 892 # issue without behavior degradation. And according to issue 5860, it may
894 893 # be a longstanding bug in some server implementations. So we allow a
895 894 # redirect that drops the query string to "just work."
896 895 try:
897 896 respurl, ct, resp = parsev1commandresponse(ui, url, requrl, qs, resp,
898 897 compressible=False,
899 898 allowcbor=advertisev2)
900 899 except RedirectedRepoError as e:
901 900 req, requrl, qs = makev1commandrequest(ui, requestbuilder, caps,
902 901 capable, e.respurl,
903 902 'capabilities', args)
904 903 resp = sendrequest(ui, opener, req)
905 904 respurl, ct, resp = parsev1commandresponse(ui, url, requrl, qs, resp,
906 905 compressible=False,
907 906 allowcbor=advertisev2)
908 907
909 908 try:
910 909 rawdata = resp.read()
911 910 finally:
912 911 resp.close()
913 912
914 913 if not ct.startswith('application/mercurial-'):
915 914 raise error.ProgrammingError('unexpected content-type: %s' % ct)
916 915
917 916 if advertisev2:
918 917 if ct == 'application/mercurial-cbor':
919 918 try:
920 919 info = cborutil.decodeall(rawdata)[0]
921 920 except cborutil.CBORDecodeError:
922 921 raise error.Abort(_('error decoding CBOR from remote server'),
923 922 hint=_('try again and consider contacting '
924 923 'the server operator'))
925 924
926 925 # We got a legacy response. That's fine.
927 926 elif ct in ('application/mercurial-0.1', 'application/mercurial-0.2'):
928 927 info = {
929 928 'v1capabilities': set(rawdata.split())
930 929 }
931 930
932 931 else:
933 932 raise error.RepoError(
934 933 _('unexpected response type from server: %s') % ct)
935 934 else:
936 935 info = {
937 936 'v1capabilities': set(rawdata.split())
938 937 }
939 938
940 939 return respurl, info
941 940
942 941 def makepeer(ui, path, opener=None, requestbuilder=urlreq.request):
943 942 """Construct an appropriate HTTP peer instance.
944 943
945 944 ``opener`` is an ``url.opener`` that should be used to establish
946 945 connections, perform HTTP requests.
947 946
948 947 ``requestbuilder`` is the type used for constructing HTTP requests.
949 948 It exists as an argument so extensions can override the default.
950 949 """
951 950 u = util.url(path)
952 951 if u.query or u.fragment:
953 952 raise error.Abort(_('unsupported URL component: "%s"') %
954 953 (u.query or u.fragment))
955 954
956 955 # urllib cannot handle URLs with embedded user or passwd.
957 956 url, authinfo = u.authinfo()
958 957 ui.debug('using %s\n' % url)
959 958
960 959 opener = opener or urlmod.opener(ui, authinfo)
961 960
962 961 respurl, info = performhandshake(ui, url, opener, requestbuilder)
963 962
964 963 # Given the intersection of APIs that both we and the server support,
965 964 # sort by their advertised priority and pick the first one.
966 965 #
967 966 # TODO consider making this request-based and interface driven. For
968 967 # example, the caller could say "I want a peer that does X." It's quite
969 968 # possible that not all peers would do that. Since we know the service
970 969 # capabilities, we could filter out services not meeting the
971 970 # requirements. Possibly by consulting the interfaces defined by the
972 971 # peer type.
973 972 apipeerchoices = set(info.get('apis', {}).keys()) & set(API_PEERS.keys())
974 973
975 974 preferredchoices = sorted(apipeerchoices,
976 975 key=lambda x: API_PEERS[x]['priority'],
977 976 reverse=True)
978 977
979 978 for service in preferredchoices:
980 979 apipath = '%s/%s' % (info['apibase'].rstrip('/'), service)
981 980
982 981 return API_PEERS[service]['init'](ui, respurl, apipath, opener,
983 982 requestbuilder,
984 983 info['apis'][service])
985 984
986 985 # Failed to construct an API peer. Fall back to legacy.
987 986 return httppeer(ui, path, respurl, opener, requestbuilder,
988 987 info['v1capabilities'])
989 988
990 989 def instance(ui, path, create, intents=None, createopts=None):
991 990 if create:
992 991 raise error.Abort(_('cannot create new http repository'))
993 992 try:
994 993 if path.startswith('https:') and not urlmod.has_https:
995 994 raise error.Abort(_('Python support for SSL and HTTPS '
996 995 'is not installed'))
997 996
998 997 inst = makepeer(ui, path)
999 998
1000 999 return inst
1001 1000 except error.RepoError as httpexception:
1002 1001 try:
1003 1002 r = statichttprepo.instance(ui, "static-" + path, create)
1004 1003 ui.note(_('(falling back to static-http)\n'))
1005 1004 return r
1006 1005 except error.RepoError:
1007 1006 raise httpexception # use the original http RepoError instead
@@ -1,522 +1,601
1 1 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
2 2 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 3 #
4 4 # This software may be used and distributed according to the terms of the
5 5 # GNU General Public License version 2 or any later version.
6 6
7 7 from __future__ import absolute_import
8 8
9 9 import contextlib
10 10
11 11 from .i18n import _
12 from .node import (
13 nullid,
14 )
12 15 from . import (
16 discovery,
13 17 encoding,
14 18 error,
15 19 pycompat,
16 20 streamclone,
17 21 util,
18 22 wireprotoframing,
19 23 wireprototypes,
20 24 )
21 25 from .utils import (
22 26 interfaceutil,
23 27 )
24 28
25 29 FRAMINGTYPE = b'application/mercurial-exp-framing-0005'
26 30
27 31 HTTP_WIREPROTO_V2 = wireprototypes.HTTP_WIREPROTO_V2
28 32
29 33 COMMANDS = wireprototypes.commanddict()
30 34
31 35 def handlehttpv2request(rctx, req, res, checkperm, urlparts):
32 36 from .hgweb import common as hgwebcommon
33 37
34 38 # URL space looks like: <permissions>/<command>, where <permission> can
35 39 # be ``ro`` or ``rw`` to signal read-only or read-write, respectively.
36 40
37 41 # Root URL does nothing meaningful... yet.
38 42 if not urlparts:
39 43 res.status = b'200 OK'
40 44 res.headers[b'Content-Type'] = b'text/plain'
41 45 res.setbodybytes(_('HTTP version 2 API handler'))
42 46 return
43 47
44 48 if len(urlparts) == 1:
45 49 res.status = b'404 Not Found'
46 50 res.headers[b'Content-Type'] = b'text/plain'
47 51 res.setbodybytes(_('do not know how to process %s\n') %
48 52 req.dispatchpath)
49 53 return
50 54
51 55 permission, command = urlparts[0:2]
52 56
53 57 if permission not in (b'ro', b'rw'):
54 58 res.status = b'404 Not Found'
55 59 res.headers[b'Content-Type'] = b'text/plain'
56 60 res.setbodybytes(_('unknown permission: %s') % permission)
57 61 return
58 62
59 63 if req.method != 'POST':
60 64 res.status = b'405 Method Not Allowed'
61 65 res.headers[b'Allow'] = b'POST'
62 66 res.setbodybytes(_('commands require POST requests'))
63 67 return
64 68
65 69 # At some point we'll want to use our own API instead of recycling the
66 70 # behavior of version 1 of the wire protocol...
67 71 # TODO return reasonable responses - not responses that overload the
68 72 # HTTP status line message for error reporting.
69 73 try:
70 74 checkperm(rctx, req, 'pull' if permission == b'ro' else 'push')
71 75 except hgwebcommon.ErrorResponse as e:
72 76 res.status = hgwebcommon.statusmessage(e.code, pycompat.bytestr(e))
73 77 for k, v in e.headers:
74 78 res.headers[k] = v
75 79 res.setbodybytes('permission denied')
76 80 return
77 81
78 82 # We have a special endpoint to reflect the request back at the client.
79 83 if command == b'debugreflect':
80 84 _processhttpv2reflectrequest(rctx.repo.ui, rctx.repo, req, res)
81 85 return
82 86
83 87 # Extra commands that we handle that aren't really wire protocol
84 88 # commands. Think extra hard before making this hackery available to
85 89 # extension.
86 90 extracommands = {'multirequest'}
87 91
88 92 if command not in COMMANDS and command not in extracommands:
89 93 res.status = b'404 Not Found'
90 94 res.headers[b'Content-Type'] = b'text/plain'
91 95 res.setbodybytes(_('unknown wire protocol command: %s\n') % command)
92 96 return
93 97
94 98 repo = rctx.repo
95 99 ui = repo.ui
96 100
97 101 proto = httpv2protocolhandler(req, ui)
98 102
99 103 if (not COMMANDS.commandavailable(command, proto)
100 104 and command not in extracommands):
101 105 res.status = b'404 Not Found'
102 106 res.headers[b'Content-Type'] = b'text/plain'
103 107 res.setbodybytes(_('invalid wire protocol command: %s') % command)
104 108 return
105 109
106 110 # TODO consider cases where proxies may add additional Accept headers.
107 111 if req.headers.get(b'Accept') != FRAMINGTYPE:
108 112 res.status = b'406 Not Acceptable'
109 113 res.headers[b'Content-Type'] = b'text/plain'
110 114 res.setbodybytes(_('client MUST specify Accept header with value: %s\n')
111 115 % FRAMINGTYPE)
112 116 return
113 117
114 118 if req.headers.get(b'Content-Type') != FRAMINGTYPE:
115 119 res.status = b'415 Unsupported Media Type'
116 120 # TODO we should send a response with appropriate media type,
117 121 # since client does Accept it.
118 122 res.headers[b'Content-Type'] = b'text/plain'
119 123 res.setbodybytes(_('client MUST send Content-Type header with '
120 124 'value: %s\n') % FRAMINGTYPE)
121 125 return
122 126
123 127 _processhttpv2request(ui, repo, req, res, permission, command, proto)
124 128
125 129 def _processhttpv2reflectrequest(ui, repo, req, res):
126 130 """Reads unified frame protocol request and dumps out state to client.
127 131
128 132 This special endpoint can be used to help debug the wire protocol.
129 133
130 134 Instead of routing the request through the normal dispatch mechanism,
131 135 we instead read all frames, decode them, and feed them into our state
132 136 tracker. We then dump the log of all that activity back out to the
133 137 client.
134 138 """
135 139 import json
136 140
137 141 # Reflection APIs have a history of being abused, accidentally disclosing
138 142 # sensitive data, etc. So we have a config knob.
139 143 if not ui.configbool('experimental', 'web.api.debugreflect'):
140 144 res.status = b'404 Not Found'
141 145 res.headers[b'Content-Type'] = b'text/plain'
142 146 res.setbodybytes(_('debugreflect service not available'))
143 147 return
144 148
145 149 # We assume we have a unified framing protocol request body.
146 150
147 151 reactor = wireprotoframing.serverreactor()
148 152 states = []
149 153
150 154 while True:
151 155 frame = wireprotoframing.readframe(req.bodyfh)
152 156
153 157 if not frame:
154 158 states.append(b'received: <no frame>')
155 159 break
156 160
157 161 states.append(b'received: %d %d %d %s' % (frame.typeid, frame.flags,
158 162 frame.requestid,
159 163 frame.payload))
160 164
161 165 action, meta = reactor.onframerecv(frame)
162 166 states.append(json.dumps((action, meta), sort_keys=True,
163 167 separators=(', ', ': ')))
164 168
165 169 action, meta = reactor.oninputeof()
166 170 meta['action'] = action
167 171 states.append(json.dumps(meta, sort_keys=True, separators=(', ',': ')))
168 172
169 173 res.status = b'200 OK'
170 174 res.headers[b'Content-Type'] = b'text/plain'
171 175 res.setbodybytes(b'\n'.join(states))
172 176
173 177 def _processhttpv2request(ui, repo, req, res, authedperm, reqcommand, proto):
174 178 """Post-validation handler for HTTPv2 requests.
175 179
176 180 Called when the HTTP request contains unified frame-based protocol
177 181 frames for evaluation.
178 182 """
179 183 # TODO Some HTTP clients are full duplex and can receive data before
180 184 # the entire request is transmitted. Figure out a way to indicate support
181 185 # for that so we can opt into full duplex mode.
182 186 reactor = wireprotoframing.serverreactor(deferoutput=True)
183 187 seencommand = False
184 188
185 189 outstream = reactor.makeoutputstream()
186 190
187 191 while True:
188 192 frame = wireprotoframing.readframe(req.bodyfh)
189 193 if not frame:
190 194 break
191 195
192 196 action, meta = reactor.onframerecv(frame)
193 197
194 198 if action == 'wantframe':
195 199 # Need more data before we can do anything.
196 200 continue
197 201 elif action == 'runcommand':
198 202 sentoutput = _httpv2runcommand(ui, repo, req, res, authedperm,
199 203 reqcommand, reactor, outstream,
200 204 meta, issubsequent=seencommand)
201 205
202 206 if sentoutput:
203 207 return
204 208
205 209 seencommand = True
206 210
207 211 elif action == 'error':
208 212 # TODO define proper error mechanism.
209 213 res.status = b'200 OK'
210 214 res.headers[b'Content-Type'] = b'text/plain'
211 215 res.setbodybytes(meta['message'] + b'\n')
212 216 return
213 217 else:
214 218 raise error.ProgrammingError(
215 219 'unhandled action from frame processor: %s' % action)
216 220
217 221 action, meta = reactor.oninputeof()
218 222 if action == 'sendframes':
219 223 # We assume we haven't started sending the response yet. If we're
220 224 # wrong, the response type will raise an exception.
221 225 res.status = b'200 OK'
222 226 res.headers[b'Content-Type'] = FRAMINGTYPE
223 227 res.setbodygen(meta['framegen'])
224 228 elif action == 'noop':
225 229 pass
226 230 else:
227 231 raise error.ProgrammingError('unhandled action from frame processor: %s'
228 232 % action)
229 233
230 234 def _httpv2runcommand(ui, repo, req, res, authedperm, reqcommand, reactor,
231 235 outstream, command, issubsequent):
232 236 """Dispatch a wire protocol command made from HTTPv2 requests.
233 237
234 238 The authenticated permission (``authedperm``) along with the original
235 239 command from the URL (``reqcommand``) are passed in.
236 240 """
237 241 # We already validated that the session has permissions to perform the
238 242 # actions in ``authedperm``. In the unified frame protocol, the canonical
239 243 # command to run is expressed in a frame. However, the URL also requested
240 244 # to run a specific command. We need to be careful that the command we
241 245 # run doesn't have permissions requirements greater than what was granted
242 246 # by ``authedperm``.
243 247 #
244 248 # Our rule for this is we only allow one command per HTTP request and
245 249 # that command must match the command in the URL. However, we make
246 250 # an exception for the ``multirequest`` URL. This URL is allowed to
247 251 # execute multiple commands. We double check permissions of each command
248 252 # as it is invoked to ensure there is no privilege escalation.
249 253 # TODO consider allowing multiple commands to regular command URLs
250 254 # iff each command is the same.
251 255
252 256 proto = httpv2protocolhandler(req, ui, args=command['args'])
253 257
254 258 if reqcommand == b'multirequest':
255 259 if not COMMANDS.commandavailable(command['command'], proto):
256 260 # TODO proper error mechanism
257 261 res.status = b'200 OK'
258 262 res.headers[b'Content-Type'] = b'text/plain'
259 263 res.setbodybytes(_('wire protocol command not available: %s') %
260 264 command['command'])
261 265 return True
262 266
263 267 # TODO don't use assert here, since it may be elided by -O.
264 268 assert authedperm in (b'ro', b'rw')
265 269 wirecommand = COMMANDS[command['command']]
266 270 assert wirecommand.permission in ('push', 'pull')
267 271
268 272 if authedperm == b'ro' and wirecommand.permission != 'pull':
269 273 # TODO proper error mechanism
270 274 res.status = b'403 Forbidden'
271 275 res.headers[b'Content-Type'] = b'text/plain'
272 276 res.setbodybytes(_('insufficient permissions to execute '
273 277 'command: %s') % command['command'])
274 278 return True
275 279
276 280 # TODO should we also call checkperm() here? Maybe not if we're going
277 281 # to overhaul that API. The granted scope from the URL check should
278 282 # be good enough.
279 283
280 284 else:
281 285 # Don't allow multiple commands outside of ``multirequest`` URL.
282 286 if issubsequent:
283 287 # TODO proper error mechanism
284 288 res.status = b'200 OK'
285 289 res.headers[b'Content-Type'] = b'text/plain'
286 290 res.setbodybytes(_('multiple commands cannot be issued to this '
287 291 'URL'))
288 292 return True
289 293
290 294 if reqcommand != command['command']:
291 295 # TODO define proper error mechanism
292 296 res.status = b'200 OK'
293 297 res.headers[b'Content-Type'] = b'text/plain'
294 298 res.setbodybytes(_('command in frame must match command in URL'))
295 299 return True
296 300
297 301 res.status = b'200 OK'
298 302 res.headers[b'Content-Type'] = FRAMINGTYPE
299 303
300 304 try:
301 305 objs = dispatch(repo, proto, command['command'])
302 306
303 307 action, meta = reactor.oncommandresponsereadyobjects(
304 308 outstream, command['requestid'], objs)
305 309
306 310 except Exception as e:
307 311 action, meta = reactor.onservererror(
308 312 outstream, command['requestid'],
309 313 _('exception when invoking command: %s') % e)
310 314
311 315 if action == 'sendframes':
312 316 res.setbodygen(meta['framegen'])
313 317 return True
314 318 elif action == 'noop':
315 319 return False
316 320 else:
317 321 raise error.ProgrammingError('unhandled event from reactor: %s' %
318 322 action)
319 323
320 324 def getdispatchrepo(repo, proto, command):
321 325 return repo.filtered('served')
322 326
323 327 def dispatch(repo, proto, command):
324 328 repo = getdispatchrepo(repo, proto, command)
325 329
326 330 func, spec = COMMANDS[command]
327 331 args = proto.getargs(spec)
328 332
329 333 return func(repo, proto, **args)
330 334
331 335 @interfaceutil.implementer(wireprototypes.baseprotocolhandler)
332 336 class httpv2protocolhandler(object):
333 337 def __init__(self, req, ui, args=None):
334 338 self._req = req
335 339 self._ui = ui
336 340 self._args = args
337 341
338 342 @property
339 343 def name(self):
340 344 return HTTP_WIREPROTO_V2
341 345
342 346 def getargs(self, args):
343 347 data = {}
344 348 for k, typ in args.items():
345 349 if k == '*':
346 350 raise NotImplementedError('do not support * args')
347 351 elif k in self._args:
348 352 # TODO consider validating value types.
349 353 data[k] = self._args[k]
350 354
351 355 return data
352 356
353 357 def getprotocaps(self):
354 358 # Protocol capabilities are currently not implemented for HTTP V2.
355 359 return set()
356 360
357 361 def getpayload(self):
358 362 raise NotImplementedError
359 363
360 364 @contextlib.contextmanager
361 365 def mayberedirectstdio(self):
362 366 raise NotImplementedError
363 367
364 368 def client(self):
365 369 raise NotImplementedError
366 370
367 371 def addcapabilities(self, repo, caps):
368 372 return caps
369 373
370 374 def checkperm(self, perm):
371 375 raise NotImplementedError
372 376
373 377 def httpv2apidescriptor(req, repo):
374 378 proto = httpv2protocolhandler(req, repo.ui)
375 379
376 380 return _capabilitiesv2(repo, proto)
377 381
378 382 def _capabilitiesv2(repo, proto):
379 383 """Obtain the set of capabilities for version 2 transports.
380 384
381 385 These capabilities are distinct from the capabilities for version 1
382 386 transports.
383 387 """
384 388 compression = []
385 389 for engine in wireprototypes.supportedcompengines(repo.ui, util.SERVERROLE):
386 390 compression.append({
387 391 b'name': engine.wireprotosupport().name,
388 392 })
389 393
390 394 caps = {
391 395 'commands': {},
392 396 'compression': compression,
393 397 'framingmediatypes': [FRAMINGTYPE],
394 398 }
395 399
396 400 for command, entry in COMMANDS.items():
397 401 caps['commands'][command] = {
398 402 'args': entry.args,
399 403 'permissions': [entry.permission],
400 404 }
401 405
402 406 if streamclone.allowservergeneration(repo):
403 407 caps['rawrepoformats'] = sorted(repo.requirements &
404 408 repo.supportedformats)
405 409
406 410 return proto.addcapabilities(repo, caps)
407 411
408 412 def wireprotocommand(name, args=None, permission='push'):
409 413 """Decorator to declare a wire protocol command.
410 414
411 415 ``name`` is the name of the wire protocol command being provided.
412 416
413 417 ``args`` is a dict of argument names to example values.
414 418
415 419 ``permission`` defines the permission type needed to run this command.
416 420 Can be ``push`` or ``pull``. These roughly map to read-write and read-only,
417 421 respectively. Default is to assume command requires ``push`` permissions
418 422 because otherwise commands not declaring their permissions could modify
419 423 a repository that is supposed to be read-only.
420 424
421 425 Wire protocol commands are generators of objects to be serialized and
422 426 sent to the client.
423 427
424 428 If a command raises an uncaught exception, this will be translated into
425 429 a command error.
426 430 """
427 431 transports = {k for k, v in wireprototypes.TRANSPORTS.items()
428 432 if v['version'] == 2}
429 433
430 434 if permission not in ('push', 'pull'):
431 435 raise error.ProgrammingError('invalid wire protocol permission; '
432 436 'got %s; expected "push" or "pull"' %
433 437 permission)
434 438
435 439 if args is None:
436 440 args = {}
437 441
438 442 if not isinstance(args, dict):
439 443 raise error.ProgrammingError('arguments for version 2 commands '
440 444 'must be declared as dicts')
441 445
442 446 def register(func):
443 447 if name in COMMANDS:
444 448 raise error.ProgrammingError('%s command already registered '
445 449 'for version 2' % name)
446 450
447 451 COMMANDS[name] = wireprototypes.commandentry(
448 452 func, args=args, transports=transports, permission=permission)
449 453
450 454 return func
451 455
452 456 return register
453 457
454 458 @wireprotocommand('branchmap', permission='pull')
455 459 def branchmapv2(repo, proto):
456 460 yield {encoding.fromlocal(k): v
457 461 for k, v in repo.branchmap().iteritems()}
458 462
459 463 @wireprotocommand('capabilities', permission='pull')
460 464 def capabilitiesv2(repo, proto):
461 465 yield _capabilitiesv2(repo, proto)
462 466
467 @wireprotocommand('changesetdata',
468 args={
469 'noderange': [[b'0123456...'], [b'abcdef...']],
470 'nodes': [b'0123456...'],
471 'fields': {b'parents', b'revision'},
472 },
473 permission='pull')
474 def changesetdata(repo, proto, noderange=None, nodes=None, fields=None):
475 fields = fields or set()
476
477 if noderange is None and nodes is None:
478 raise error.WireprotoCommandError(
479 'noderange or nodes must be defined')
480
481 if noderange is not None:
482 if len(noderange) != 2:
483 raise error.WireprotoCommandError(
484 'noderange must consist of 2 elements')
485
486 if not noderange[1]:
487 raise error.WireprotoCommandError(
488 'heads in noderange request cannot be empty')
489
490 cl = repo.changelog
491 hasnode = cl.hasnode
492
493 seen = set()
494 outgoing = []
495
496 if nodes is not None:
497 outgoing.extend(n for n in nodes if hasnode(n))
498 seen |= set(outgoing)
499
500 if noderange is not None:
501 if noderange[0]:
502 common = [n for n in noderange[0] if hasnode(n)]
503 else:
504 common = [nullid]
505
506 for n in discovery.outgoing(repo, common, noderange[1]).missing:
507 if n not in seen:
508 outgoing.append(n)
509 # Don't need to add to seen here because this is the final
510 # source of nodes and there should be no duplicates in this
511 # list.
512
513 seen.clear()
514
515 if outgoing:
516 repo.hook('preoutgoing', throw=True, source='serve')
517
518 yield {
519 b'totalitems': len(outgoing),
520 }
521
522 # It is already topologically sorted by revision number.
523 for node in outgoing:
524 d = {
525 b'node': node,
526 }
527
528 if b'parents' in fields:
529 d[b'parents'] = cl.parents(node)
530
531 revisiondata = None
532
533 if b'revision' in fields:
534 revisiondata = cl.revision(node, raw=True)
535 d[b'revisionsize'] = len(revisiondata)
536
537 yield d
538
539 if revisiondata is not None:
540 yield revisiondata
541
463 542 @wireprotocommand('heads',
464 543 args={
465 544 'publiconly': False,
466 545 },
467 546 permission='pull')
468 547 def headsv2(repo, proto, publiconly=False):
469 548 if publiconly:
470 549 repo = repo.filtered('immutable')
471 550
472 551 yield repo.heads()
473 552
474 553 @wireprotocommand('known',
475 554 args={
476 555 'nodes': [b'deadbeef'],
477 556 },
478 557 permission='pull')
479 558 def knownv2(repo, proto, nodes=None):
480 559 nodes = nodes or []
481 560 result = b''.join(b'1' if n else b'0' for n in repo.known(nodes))
482 561 yield result
483 562
484 563 @wireprotocommand('listkeys',
485 564 args={
486 565 'namespace': b'ns',
487 566 },
488 567 permission='pull')
489 568 def listkeysv2(repo, proto, namespace=None):
490 569 keys = repo.listkeys(encoding.tolocal(namespace))
491 570 keys = {encoding.fromlocal(k): encoding.fromlocal(v)
492 571 for k, v in keys.iteritems()}
493 572
494 573 yield keys
495 574
496 575 @wireprotocommand('lookup',
497 576 args={
498 577 'key': b'foo',
499 578 },
500 579 permission='pull')
501 580 def lookupv2(repo, proto, key):
502 581 key = encoding.tolocal(key)
503 582
504 583 # TODO handle exception.
505 584 node = repo.lookup(key)
506 585
507 586 yield node
508 587
509 588 @wireprotocommand('pushkey',
510 589 args={
511 590 'namespace': b'ns',
512 591 'key': b'key',
513 592 'old': b'old',
514 593 'new': b'new',
515 594 },
516 595 permission='push')
517 596 def pushkeyv2(repo, proto, namespace, key, old, new):
518 597 # TODO handle ui output redirection
519 598 yield repo.pushkey(encoding.tolocal(namespace),
520 599 encoding.tolocal(key),
521 600 encoding.tolocal(old),
522 601 encoding.tolocal(new))
@@ -1,749 +1,749
1 1 #require no-chg
2 2
3 3 $ . $TESTDIR/wireprotohelpers.sh
4 4
5 5 $ cat >> $HGRCPATH << EOF
6 6 > [web]
7 7 > push_ssl = false
8 8 > allow_push = *
9 9 > EOF
10 10
11 11 $ hg init server
12 12 $ cd server
13 13 $ touch a
14 14 $ hg -q commit -A -m initial
15 15 $ cd ..
16 16
17 17 $ hg serve -R server -p $HGPORT -d --pid-file hg.pid
18 18 $ cat hg.pid >> $DAEMON_PIDS
19 19
20 20 compression formats are advertised in compression capability
21 21
22 22 #if zstd
23 23 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=zstd,zlib$' > /dev/null
24 24 #else
25 25 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=zlib$' > /dev/null
26 26 #endif
27 27
28 28 $ killdaemons.py
29 29
30 30 server.compressionengines can replace engines list wholesale
31 31
32 32 $ hg serve --config server.compressionengines=none -R server -p $HGPORT -d --pid-file hg.pid
33 33 $ cat hg.pid > $DAEMON_PIDS
34 34 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=none$' > /dev/null
35 35
36 36 $ killdaemons.py
37 37
38 38 Order of engines can also change
39 39
40 40 $ hg serve --config server.compressionengines=none,zlib -R server -p $HGPORT -d --pid-file hg.pid
41 41 $ cat hg.pid > $DAEMON_PIDS
42 42 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=none,zlib$' > /dev/null
43 43
44 44 $ killdaemons.py
45 45
46 46 Start a default server again
47 47
48 48 $ hg serve -R server -p $HGPORT -d --pid-file hg.pid
49 49 $ cat hg.pid > $DAEMON_PIDS
50 50
51 51 Server should send application/mercurial-0.1 to clients if no Accept is used
52 52
53 53 $ get-with-headers.py --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
54 54 200 Script output follows
55 55 content-type: application/mercurial-0.1
56 56 date: $HTTP_DATE$
57 57 server: testing stub value
58 58 transfer-encoding: chunked
59 59
60 60 Server should send application/mercurial-0.1 when client says it wants it
61 61
62 62 $ get-with-headers.py --hgproto '0.1' --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
63 63 200 Script output follows
64 64 content-type: application/mercurial-0.1
65 65 date: $HTTP_DATE$
66 66 server: testing stub value
67 67 transfer-encoding: chunked
68 68
69 69 Server should send application/mercurial-0.2 when client says it wants it
70 70
71 71 $ get-with-headers.py --hgproto '0.2' --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
72 72 200 Script output follows
73 73 content-type: application/mercurial-0.2
74 74 date: $HTTP_DATE$
75 75 server: testing stub value
76 76 transfer-encoding: chunked
77 77
78 78 $ get-with-headers.py --hgproto '0.1 0.2' --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
79 79 200 Script output follows
80 80 content-type: application/mercurial-0.2
81 81 date: $HTTP_DATE$
82 82 server: testing stub value
83 83 transfer-encoding: chunked
84 84
85 85 Requesting a compression format that server doesn't support results will fall back to 0.1
86 86
87 87 $ get-with-headers.py --hgproto '0.2 comp=aa' --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
88 88 200 Script output follows
89 89 content-type: application/mercurial-0.1
90 90 date: $HTTP_DATE$
91 91 server: testing stub value
92 92 transfer-encoding: chunked
93 93
94 94 #if zstd
95 95 zstd is used if available
96 96
97 97 $ get-with-headers.py --hgproto '0.2 comp=zstd' $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp
98 98 $ f --size --hexdump --bytes 36 --sha1 resp
99 99 resp: size=248, sha1=4d8d8f87fb82bd542ce52881fdc94f850748
100 100 0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
101 101 0010: 74 20 66 6f 6c 6c 6f 77 73 0a 0a 04 7a 73 74 64 |t follows...zstd|
102 102 0020: 28 b5 2f fd |(./.|
103 103
104 104 #endif
105 105
106 106 application/mercurial-0.2 is not yet used on non-streaming responses
107 107
108 108 $ get-with-headers.py --hgproto '0.2' $LOCALIP:$HGPORT '?cmd=heads' -
109 109 200 Script output follows
110 110 content-length: 41
111 111 content-type: application/mercurial-0.1
112 112 date: $HTTP_DATE$
113 113 server: testing stub value
114 114
115 115 e93700bd72895c5addab234c56d4024b487a362f
116 116
117 117 Now test protocol preference usage
118 118
119 119 $ killdaemons.py
120 120 $ hg serve --config server.compressionengines=none,zlib -R server -p $HGPORT -d --pid-file hg.pid
121 121 $ cat hg.pid > $DAEMON_PIDS
122 122
123 123 No Accept will send 0.1+zlib, even though "none" is preferred b/c "none" isn't supported on 0.1
124 124
125 125 $ get-with-headers.py --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' Content-Type
126 126 200 Script output follows
127 127 content-type: application/mercurial-0.1
128 128
129 129 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp
130 130 $ f --size --hexdump --bytes 28 --sha1 resp
131 131 resp: size=227, sha1=35a4c074da74f32f5440da3cbf04
132 132 0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
133 133 0010: 74 20 66 6f 6c 6c 6f 77 73 0a 0a 78 |t follows..x|
134 134
135 135 Explicit 0.1 will send zlib because "none" isn't supported on 0.1
136 136
137 137 $ get-with-headers.py --hgproto '0.1' $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp
138 138 $ f --size --hexdump --bytes 28 --sha1 resp
139 139 resp: size=227, sha1=35a4c074da74f32f5440da3cbf04
140 140 0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
141 141 0010: 74 20 66 6f 6c 6c 6f 77 73 0a 0a 78 |t follows..x|
142 142
143 143 0.2 with no compression will get "none" because that is server's preference
144 144 (spec says ZL and UN are implicitly supported)
145 145
146 146 $ get-with-headers.py --hgproto '0.2' $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp
147 147 $ f --size --hexdump --bytes 32 --sha1 resp
148 148 resp: size=432, sha1=ac931b412ec185a02e0e5bcff98dac83
149 149 0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
150 150 0010: 74 20 66 6f 6c 6c 6f 77 73 0a 0a 04 6e 6f 6e 65 |t follows...none|
151 151
152 152 Client receives server preference even if local order doesn't match
153 153
154 154 $ get-with-headers.py --hgproto '0.2 comp=zlib,none' $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp
155 155 $ f --size --hexdump --bytes 32 --sha1 resp
156 156 resp: size=432, sha1=ac931b412ec185a02e0e5bcff98dac83
157 157 0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
158 158 0010: 74 20 66 6f 6c 6c 6f 77 73 0a 0a 04 6e 6f 6e 65 |t follows...none|
159 159
160 160 Client receives only supported format even if not server preferred format
161 161
162 162 $ get-with-headers.py --hgproto '0.2 comp=zlib' $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp
163 163 $ f --size --hexdump --bytes 33 --sha1 resp
164 164 resp: size=232, sha1=a1c727f0c9693ca15742a75c30419bc36
165 165 0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
166 166 0010: 74 20 66 6f 6c 6c 6f 77 73 0a 0a 04 7a 6c 69 62 |t follows...zlib|
167 167 0020: 78 |x|
168 168
169 169 $ killdaemons.py
170 170 $ cd ..
171 171
172 172 Test listkeys for listing namespaces
173 173
174 174 $ hg init empty
175 175 $ hg -R empty serve -p $HGPORT -d --pid-file hg.pid
176 176 $ cat hg.pid > $DAEMON_PIDS
177 177
178 178 $ hg --verbose debugwireproto http://$LOCALIP:$HGPORT << EOF
179 179 > command listkeys
180 180 > namespace namespaces
181 181 > EOF
182 182 s> GET /?cmd=capabilities HTTP/1.1\r\n
183 183 s> Accept-Encoding: identity\r\n
184 184 s> accept: application/mercurial-0.1\r\n
185 185 s> host: $LOCALIP:$HGPORT\r\n (glob)
186 186 s> user-agent: Mercurial debugwireproto\r\n
187 187 s> \r\n
188 188 s> makefile('rb', None)
189 189 s> HTTP/1.1 200 Script output follows\r\n
190 190 s> Server: testing stub value\r\n
191 191 s> Date: $HTTP_DATE$\r\n
192 192 s> Content-Type: application/mercurial-0.1\r\n
193 193 s> Content-Length: *\r\n (glob)
194 194 s> \r\n
195 195 s> batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
196 196 sending listkeys command
197 197 s> GET /?cmd=listkeys HTTP/1.1\r\n
198 198 s> Accept-Encoding: identity\r\n
199 199 s> vary: X-HgArg-1,X-HgProto-1\r\n
200 200 s> x-hgarg-1: namespace=namespaces\r\n
201 201 s> x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n
202 202 s> accept: application/mercurial-0.1\r\n
203 203 s> host: $LOCALIP:$HGPORT\r\n (glob)
204 204 s> user-agent: Mercurial debugwireproto\r\n
205 205 s> \r\n
206 206 s> makefile('rb', None)
207 207 s> HTTP/1.1 200 Script output follows\r\n
208 208 s> Server: testing stub value\r\n
209 209 s> Date: $HTTP_DATE$\r\n
210 210 s> Content-Type: application/mercurial-0.1\r\n
211 211 s> Content-Length: 30\r\n
212 212 s> \r\n
213 213 s> bookmarks\t\n
214 214 s> namespaces\t\n
215 215 s> phases\t
216 216 response: {
217 217 b'bookmarks': b'',
218 218 b'namespaces': b'',
219 219 b'phases': b''
220 220 }
221 221
222 222 Same thing, but with "httprequest" command
223 223
224 224 $ hg --verbose debugwireproto --peer raw http://$LOCALIP:$HGPORT << EOF
225 225 > httprequest GET ?cmd=listkeys
226 226 > user-agent: test
227 227 > x-hgarg-1: namespace=namespaces
228 228 > EOF
229 229 using raw connection to peer
230 230 s> GET /?cmd=listkeys HTTP/1.1\r\n
231 231 s> Accept-Encoding: identity\r\n
232 232 s> user-agent: test\r\n
233 233 s> x-hgarg-1: namespace=namespaces\r\n
234 234 s> host: $LOCALIP:$HGPORT\r\n (glob)
235 235 s> \r\n
236 236 s> makefile('rb', None)
237 237 s> HTTP/1.1 200 Script output follows\r\n
238 238 s> Server: testing stub value\r\n
239 239 s> Date: $HTTP_DATE$\r\n
240 240 s> Content-Type: application/mercurial-0.1\r\n
241 241 s> Content-Length: 30\r\n
242 242 s> \r\n
243 243 s> bookmarks\t\n
244 244 s> namespaces\t\n
245 245 s> phases\t
246 246
247 247 Client with HTTPv2 enabled advertises that and gets old capabilities response from old server
248 248
249 249 $ hg --config experimental.httppeer.advertise-v2=true --verbose debugwireproto http://$LOCALIP:$HGPORT << EOF
250 250 > command heads
251 251 > EOF
252 252 s> GET /?cmd=capabilities HTTP/1.1\r\n
253 253 s> Accept-Encoding: identity\r\n
254 254 s> vary: X-HgProto-1,X-HgUpgrade-1\r\n
255 255 s> x-hgproto-1: cbor\r\n
256 256 s> x-hgupgrade-1: exp-http-v2-0001\r\n
257 257 s> accept: application/mercurial-0.1\r\n
258 258 s> host: $LOCALIP:$HGPORT\r\n (glob)
259 259 s> user-agent: Mercurial debugwireproto\r\n
260 260 s> \r\n
261 261 s> makefile('rb', None)
262 262 s> HTTP/1.1 200 Script output follows\r\n
263 263 s> Server: testing stub value\r\n
264 264 s> Date: $HTTP_DATE$\r\n
265 265 s> Content-Type: application/mercurial-0.1\r\n
266 266 s> Content-Length: *\r\n (glob)
267 267 s> \r\n
268 268 s> batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
269 269 sending heads command
270 270 s> GET /?cmd=heads HTTP/1.1\r\n
271 271 s> Accept-Encoding: identity\r\n
272 272 s> vary: X-HgProto-1\r\n
273 273 s> x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n
274 274 s> accept: application/mercurial-0.1\r\n
275 275 s> host: $LOCALIP:$HGPORT\r\n (glob)
276 276 s> user-agent: Mercurial debugwireproto\r\n
277 277 s> \r\n
278 278 s> makefile('rb', None)
279 279 s> HTTP/1.1 200 Script output follows\r\n
280 280 s> Server: testing stub value\r\n
281 281 s> Date: $HTTP_DATE$\r\n
282 282 s> Content-Type: application/mercurial-0.1\r\n
283 283 s> Content-Length: 41\r\n
284 284 s> \r\n
285 285 s> 0000000000000000000000000000000000000000\n
286 286 response: [
287 287 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
288 288 ]
289 289
290 290 $ killdaemons.py
291 291 $ enablehttpv2 empty
292 292 $ hg --config server.compressionengines=zlib -R empty serve -p $HGPORT -d --pid-file hg.pid
293 293 $ cat hg.pid > $DAEMON_PIDS
294 294
295 295 Client with HTTPv2 enabled automatically upgrades if the server supports it
296 296
297 297 $ hg --config experimental.httppeer.advertise-v2=true --verbose debugwireproto http://$LOCALIP:$HGPORT << EOF
298 298 > command heads
299 299 > EOF
300 300 s> GET /?cmd=capabilities HTTP/1.1\r\n
301 301 s> Accept-Encoding: identity\r\n
302 302 s> vary: X-HgProto-1,X-HgUpgrade-1\r\n
303 303 s> x-hgproto-1: cbor\r\n
304 304 s> x-hgupgrade-1: exp-http-v2-0001\r\n
305 305 s> accept: application/mercurial-0.1\r\n
306 306 s> host: $LOCALIP:$HGPORT\r\n (glob)
307 307 s> user-agent: Mercurial debugwireproto\r\n
308 308 s> \r\n
309 309 s> makefile('rb', None)
310 310 s> HTTP/1.1 200 OK\r\n
311 311 s> Server: testing stub value\r\n
312 312 s> Date: $HTTP_DATE$\r\n
313 313 s> Content-Type: application/mercurial-cbor\r\n
314 314 s> Content-Length: *\r\n (glob)
315 315 s> \r\n
316 s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0001\xa4Hcommands\xa7Ibranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullEheads\xa2Dargs\xa1Jpubliconly\xf4Kpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\x81HdeadbeefKpermissions\x81DpullHlistkeys\xa2Dargs\xa1InamespaceBnsKpermissions\x81DpullFlookup\xa2Dargs\xa1CkeyCfooKpermissions\x81DpullGpushkey\xa2Dargs\xa4CkeyCkeyInamespaceBnsCnewCnewColdColdKpermissions\x81DpushKcompression\x81\xa1DnameDzlibQframingmediatypes\x81X&application/mercurial-exp-framing-0005Nrawrepoformats\x82LgeneraldeltaHrevlogv1Nv1capabilitiesY\x01\xc5batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
316 s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0001\xa4Hcommands\xa8Ibranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa3Ffields\xd9\x01\x02\x82GparentsHrevisionInoderange\x82\x81J0123456...\x81Iabcdef...Enodes\x81J0123456...Kpermissions\x81DpullEheads\xa2Dargs\xa1Jpubliconly\xf4Kpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\x81HdeadbeefKpermissions\x81DpullHlistkeys\xa2Dargs\xa1InamespaceBnsKpermissions\x81DpullFlookup\xa2Dargs\xa1CkeyCfooKpermissions\x81DpullGpushkey\xa2Dargs\xa4CkeyCkeyInamespaceBnsCnewCnewColdColdKpermissions\x81DpushKcompression\x81\xa1DnameDzlibQframingmediatypes\x81X&application/mercurial-exp-framing-0005Nrawrepoformats\x82LgeneraldeltaHrevlogv1Nv1capabilitiesY\x01\xc5batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
317 317 sending heads command
318 318 s> POST /api/exp-http-v2-0001/ro/heads HTTP/1.1\r\n
319 319 s> Accept-Encoding: identity\r\n
320 320 s> accept: application/mercurial-exp-framing-0005\r\n
321 321 s> content-type: application/mercurial-exp-framing-0005\r\n
322 322 s> content-length: 20\r\n
323 323 s> host: $LOCALIP:$HGPORT\r\n (glob)
324 324 s> user-agent: Mercurial debugwireproto\r\n
325 325 s> \r\n
326 326 s> \x0c\x00\x00\x01\x00\x01\x01\x11\xa1DnameEheads
327 327 s> makefile('rb', None)
328 328 s> HTTP/1.1 200 OK\r\n
329 329 s> Server: testing stub value\r\n
330 330 s> Date: $HTTP_DATE$\r\n
331 331 s> Content-Type: application/mercurial-exp-framing-0005\r\n
332 332 s> Transfer-Encoding: chunked\r\n
333 333 s> \r\n
334 334 s> 13\r\n
335 335 s> \x0b\x00\x00\x01\x00\x02\x011
336 336 s> \xa1FstatusBok
337 337 s> \r\n
338 338 received frame(size=11; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=continuation)
339 339 s> 1e\r\n
340 340 s> \x16\x00\x00\x01\x00\x02\x001
341 341 s> \x81T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00
342 342 s> \r\n
343 343 received frame(size=22; request=1; stream=2; streamflags=; type=command-response; flags=continuation)
344 344 s> 8\r\n
345 345 s> \x00\x00\x00\x01\x00\x02\x002
346 346 s> \r\n
347 347 s> 0\r\n
348 348 s> \r\n
349 349 received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
350 350 response: [
351 351 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
352 352 ]
353 353
354 354 $ killdaemons.py
355 355
356 356 HTTP client follows HTTP redirect on handshake to new repo
357 357
358 358 $ cd $TESTTMP
359 359
360 360 $ hg init redirector
361 361 $ hg init redirected
362 362 $ cd redirected
363 363 $ touch foo
364 364 $ hg -q commit -A -m initial
365 365 $ cd ..
366 366
367 367 $ cat > paths.conf << EOF
368 368 > [paths]
369 369 > / = $TESTTMP/*
370 370 > EOF
371 371
372 372 $ cat > redirectext.py << EOF
373 373 > from mercurial import extensions, wireprotoserver
374 374 > def wrappedcallhttp(orig, repo, req, res, proto, cmd):
375 375 > path = req.advertisedurl[len(req.advertisedbaseurl):]
376 376 > if not path.startswith(b'/redirector'):
377 377 > return orig(repo, req, res, proto, cmd)
378 378 > relpath = path[len(b'/redirector'):]
379 379 > res.status = b'301 Redirect'
380 380 > newurl = b'%s/redirected%s' % (req.baseurl, relpath)
381 381 > if not repo.ui.configbool('testing', 'redirectqs', True) and b'?' in newurl:
382 382 > newurl = newurl[0:newurl.index(b'?')]
383 383 > res.headers[b'Location'] = newurl
384 384 > res.headers[b'Content-Type'] = b'text/plain'
385 385 > res.setbodybytes(b'redirected')
386 386 > return True
387 387 >
388 388 > extensions.wrapfunction(wireprotoserver, '_callhttp', wrappedcallhttp)
389 389 > EOF
390 390
391 391 $ hg --config extensions.redirect=$TESTTMP/redirectext.py \
392 392 > --config server.compressionengines=zlib \
393 393 > serve --web-conf paths.conf --pid-file hg.pid -p $HGPORT -d
394 394 $ cat hg.pid > $DAEMON_PIDS
395 395
396 396 Verify our HTTP 301 is served properly
397 397
398 398 $ hg --verbose debugwireproto --peer raw http://$LOCALIP:$HGPORT << EOF
399 399 > httprequest GET /redirector?cmd=capabilities
400 400 > user-agent: test
401 401 > EOF
402 402 using raw connection to peer
403 403 s> GET /redirector?cmd=capabilities HTTP/1.1\r\n
404 404 s> Accept-Encoding: identity\r\n
405 405 s> user-agent: test\r\n
406 406 s> host: $LOCALIP:$HGPORT\r\n (glob)
407 407 s> \r\n
408 408 s> makefile('rb', None)
409 409 s> HTTP/1.1 301 Redirect\r\n
410 410 s> Server: testing stub value\r\n
411 411 s> Date: $HTTP_DATE$\r\n
412 412 s> Location: http://$LOCALIP:$HGPORT/redirected?cmd=capabilities\r\n (glob)
413 413 s> Content-Type: text/plain\r\n
414 414 s> Content-Length: 10\r\n
415 415 s> \r\n
416 416 s> redirected
417 417 s> GET /redirected?cmd=capabilities HTTP/1.1\r\n
418 418 s> Accept-Encoding: identity\r\n
419 419 s> user-agent: test\r\n
420 420 s> host: $LOCALIP:$HGPORT\r\n (glob)
421 421 s> \r\n
422 422 s> makefile('rb', None)
423 423 s> HTTP/1.1 200 Script output follows\r\n
424 424 s> Server: testing stub value\r\n
425 425 s> Date: $HTTP_DATE$\r\n
426 426 s> Content-Type: application/mercurial-0.1\r\n
427 427 s> Content-Length: 453\r\n
428 428 s> \r\n
429 429 s> batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
430 430
431 431 Test with the HTTP peer
432 432
433 433 $ hg --verbose debugwireproto http://$LOCALIP:$HGPORT/redirector << EOF
434 434 > command heads
435 435 > EOF
436 436 s> GET /redirector?cmd=capabilities HTTP/1.1\r\n
437 437 s> Accept-Encoding: identity\r\n
438 438 s> accept: application/mercurial-0.1\r\n
439 439 s> host: $LOCALIP:$HGPORT\r\n (glob)
440 440 s> user-agent: Mercurial debugwireproto\r\n
441 441 s> \r\n
442 442 s> makefile('rb', None)
443 443 s> HTTP/1.1 301 Redirect\r\n
444 444 s> Server: testing stub value\r\n
445 445 s> Date: $HTTP_DATE$\r\n
446 446 s> Location: http://$LOCALIP:$HGPORT/redirected?cmd=capabilities\r\n (glob)
447 447 s> Content-Type: text/plain\r\n
448 448 s> Content-Length: 10\r\n
449 449 s> \r\n
450 450 s> redirected
451 451 s> GET /redirected?cmd=capabilities HTTP/1.1\r\n
452 452 s> Accept-Encoding: identity\r\n
453 453 s> accept: application/mercurial-0.1\r\n
454 454 s> host: $LOCALIP:$HGPORT\r\n (glob)
455 455 s> user-agent: Mercurial debugwireproto\r\n
456 456 s> \r\n
457 457 s> makefile('rb', None)
458 458 s> HTTP/1.1 200 Script output follows\r\n
459 459 s> Server: testing stub value\r\n
460 460 s> Date: $HTTP_DATE$\r\n
461 461 s> Content-Type: application/mercurial-0.1\r\n
462 462 s> Content-Length: 453\r\n
463 463 s> \r\n
464 464 real URL is http://$LOCALIP:$HGPORT/redirected (glob)
465 465 s> batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
466 466 sending heads command
467 467 s> GET /redirected?cmd=heads HTTP/1.1\r\n
468 468 s> Accept-Encoding: identity\r\n
469 469 s> vary: X-HgProto-1\r\n
470 470 s> x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n
471 471 s> accept: application/mercurial-0.1\r\n
472 472 s> host: $LOCALIP:$HGPORT\r\n (glob)
473 473 s> user-agent: Mercurial debugwireproto\r\n
474 474 s> \r\n
475 475 s> makefile('rb', None)
476 476 s> HTTP/1.1 200 Script output follows\r\n
477 477 s> Server: testing stub value\r\n
478 478 s> Date: $HTTP_DATE$\r\n
479 479 s> Content-Type: application/mercurial-0.1\r\n
480 480 s> Content-Length: 41\r\n
481 481 s> \r\n
482 482 s> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n
483 483 response: [
484 484 b'\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL'
485 485 ]
486 486
487 487 $ killdaemons.py
488 488
489 489 Now test a variation where we strip the query string from the redirect URL.
490 490 (SCM Manager apparently did this and clients would recover from it)
491 491
492 492 $ hg --config extensions.redirect=$TESTTMP/redirectext.py \
493 493 > --config server.compressionengines=zlib \
494 494 > --config testing.redirectqs=false \
495 495 > serve --web-conf paths.conf --pid-file hg.pid -p $HGPORT -d
496 496 $ cat hg.pid > $DAEMON_PIDS
497 497
498 498 $ hg --verbose debugwireproto --peer raw http://$LOCALIP:$HGPORT << EOF
499 499 > httprequest GET /redirector?cmd=capabilities
500 500 > user-agent: test
501 501 > EOF
502 502 using raw connection to peer
503 503 s> GET /redirector?cmd=capabilities HTTP/1.1\r\n
504 504 s> Accept-Encoding: identity\r\n
505 505 s> user-agent: test\r\n
506 506 s> host: $LOCALIP:$HGPORT\r\n (glob)
507 507 s> \r\n
508 508 s> makefile('rb', None)
509 509 s> HTTP/1.1 301 Redirect\r\n
510 510 s> Server: testing stub value\r\n
511 511 s> Date: $HTTP_DATE$\r\n
512 512 s> Location: http://$LOCALIP:$HGPORT/redirected\r\n (glob)
513 513 s> Content-Type: text/plain\r\n
514 514 s> Content-Length: 10\r\n
515 515 s> \r\n
516 516 s> redirected
517 517 s> GET /redirected HTTP/1.1\r\n
518 518 s> Accept-Encoding: identity\r\n
519 519 s> user-agent: test\r\n
520 520 s> host: $LOCALIP:$HGPORT\r\n (glob)
521 521 s> \r\n
522 522 s> makefile('rb', None)
523 523 s> HTTP/1.1 200 Script output follows\r\n
524 524 s> Server: testing stub value\r\n
525 525 s> Date: $HTTP_DATE$\r\n
526 526 s> ETag: W/"*"\r\n (glob)
527 527 s> Content-Type: text/html; charset=ascii\r\n
528 528 s> Transfer-Encoding: chunked\r\n
529 529 s> \r\n
530 530 s> 414\r\n
531 531 s> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n
532 532 s> <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en-US">\n
533 533 s> <head>\n
534 534 s> <link rel="icon" href="/redirected/static/hgicon.png" type="image/png" />\n
535 535 s> <meta name="robots" content="index, nofollow" />\n
536 536 s> <link rel="stylesheet" href="/redirected/static/style-paper.css" type="text/css" />\n
537 537 s> <script type="text/javascript" src="/redirected/static/mercurial.js"></script>\n
538 538 s> \n
539 539 s> <title>redirected: log</title>\n
540 540 s> <link rel="alternate" type="application/atom+xml"\n
541 541 s> href="/redirected/atom-log" title="Atom feed for redirected" />\n
542 542 s> <link rel="alternate" type="application/rss+xml"\n
543 543 s> href="/redirected/rss-log" title="RSS feed for redirected" />\n
544 544 s> </head>\n
545 545 s> <body>\n
546 546 s> \n
547 547 s> <div class="container">\n
548 548 s> <div class="menu">\n
549 549 s> <div class="logo">\n
550 550 s> <a href="https://mercurial-scm.org/">\n
551 551 s> <img src="/redirected/static/hglogo.png" alt="mercurial" /></a>\n
552 552 s> </div>\n
553 553 s> <ul>\n
554 554 s> <li class="active">log</li>\n
555 555 s> <li><a href="/redirected/graph/tip">graph</a></li>\n
556 556 s> <li><a href="/redirected/tags">tags</a></li>\n
557 557 s> <li><a href="
558 558 s> \r\n
559 559 s> 810\r\n
560 560 s> /redirected/bookmarks">bookmarks</a></li>\n
561 561 s> <li><a href="/redirected/branches">branches</a></li>\n
562 562 s> </ul>\n
563 563 s> <ul>\n
564 564 s> <li><a href="/redirected/rev/tip">changeset</a></li>\n
565 565 s> <li><a href="/redirected/file/tip">browse</a></li>\n
566 566 s> </ul>\n
567 567 s> <ul>\n
568 568 s> \n
569 569 s> </ul>\n
570 570 s> <ul>\n
571 571 s> <li><a href="/redirected/help">help</a></li>\n
572 572 s> </ul>\n
573 573 s> <div class="atom-logo">\n
574 574 s> <a href="/redirected/atom-log" title="subscribe to atom feed">\n
575 575 s> <img class="atom-logo" src="/redirected/static/feed-icon-14x14.png" alt="atom feed" />\n
576 576 s> </a>\n
577 577 s> </div>\n
578 578 s> </div>\n
579 579 s> \n
580 580 s> <div class="main">\n
581 581 s> <h2 class="breadcrumb"><a href="/">Mercurial</a> &gt; <a href="/redirected">redirected</a> </h2>\n
582 582 s> <h3>log</h3>\n
583 583 s> \n
584 584 s> \n
585 585 s> <form class="search" action="/redirected/log">\n
586 586 s> \n
587 587 s> <p><input name="rev" id="search1" type="text" size="30" value="" /></p>\n
588 588 s> <div id="hint">Find changesets by keywords (author, files, the commit message), revision\n
589 589 s> number or hash, or <a href="/redirected/help/revsets">revset expression</a>.</div>\n
590 590 s> </form>\n
591 591 s> \n
592 592 s> <div class="navigate">\n
593 593 s> <a href="/redirected/shortlog/tip?revcount=30">less</a>\n
594 594 s> <a href="/redirected/shortlog/tip?revcount=120">more</a>\n
595 595 s> | rev 0: <a href="/redirected/shortlog/96ee1d7354c4">(0)</a> <a href="/redirected/shortlog/tip">tip</a> \n
596 596 s> </div>\n
597 597 s> \n
598 598 s> <table class="bigtable">\n
599 599 s> <thead>\n
600 600 s> <tr>\n
601 601 s> <th class="age">age</th>\n
602 602 s> <th class="author">author</th>\n
603 603 s> <th class="description">description</th>\n
604 604 s> </tr>\n
605 605 s> </thead>\n
606 606 s> <tbody class="stripes2">\n
607 607 s> <tr>\n
608 608 s> <td class="age">Thu, 01 Jan 1970 00:00:00 +0000</td>\n
609 609 s> <td class="author">test</td>\n
610 610 s> <td class="description">\n
611 611 s> <a href="/redirected/rev/96ee1d7354c4">initial</a>\n
612 612 s> <span class="phase">draft</span> <span class="branchhead">default</span> <span class="tag">tip</span> \n
613 613 s> </td>\n
614 614 s> </tr>\n
615 615 s> \n
616 616 s> </tbody>\n
617 617 s> </table>\n
618 618 s> \n
619 619 s> <div class="navigate">\n
620 620 s> <a href="/redirected/shortlog/tip?revcount=30">less</a>\n
621 621 s> <a href="/redirected/shortlog/tip?revcount=120">more</a>\n
622 622 s> | rev 0: <a href="/redirected/shortlog/96ee1d7354c4">(0)</a> <a href="/redirected/shortlog/tip">tip</a> \n
623 623 s> </div>\n
624 624 s> \n
625 625 s> <script type="text/javascript">\n
626 626 s> ajaxScrollInit(\n
627 627 s> \'/redirected/shortlog/%next%\',\n
628 628 s> \'\', <!-- NEXTHASH\n
629 629 s> function (htmlText) {
630 630 s> \r\n
631 631 s> 14a\r\n
632 632 s> \n
633 633 s> var m = htmlText.match(/\'(\\w+)\', <!-- NEXTHASH/);\n
634 634 s> return m ? m[1] : null;\n
635 635 s> },\n
636 636 s> \'.bigtable > tbody\',\n
637 637 s> \'<tr class="%class%">\\\n
638 638 s> <td colspan="3" style="text-align: center;">%text%</td>\\\n
639 639 s> </tr>\'\n
640 640 s> );\n
641 641 s> </script>\n
642 642 s> \n
643 643 s> </div>\n
644 644 s> </div>\n
645 645 s> \n
646 646 s> \n
647 647 s> \n
648 648 s> </body>\n
649 649 s> </html>\n
650 650 s> \n
651 651 s> \r\n
652 652 s> 0\r\n
653 653 s> \r\n
654 654
655 655 $ hg --verbose debugwireproto http://$LOCALIP:$HGPORT/redirector << EOF
656 656 > command heads
657 657 > EOF
658 658 s> GET /redirector?cmd=capabilities HTTP/1.1\r\n
659 659 s> Accept-Encoding: identity\r\n
660 660 s> accept: application/mercurial-0.1\r\n
661 661 s> host: $LOCALIP:$HGPORT\r\n (glob)
662 662 s> user-agent: Mercurial debugwireproto\r\n
663 663 s> \r\n
664 664 s> makefile('rb', None)
665 665 s> HTTP/1.1 301 Redirect\r\n
666 666 s> Server: testing stub value\r\n
667 667 s> Date: $HTTP_DATE$\r\n
668 668 s> Location: http://$LOCALIP:$HGPORT/redirected\r\n (glob)
669 669 s> Content-Type: text/plain\r\n
670 670 s> Content-Length: 10\r\n
671 671 s> \r\n
672 672 s> redirected
673 673 s> GET /redirected HTTP/1.1\r\n
674 674 s> Accept-Encoding: identity\r\n
675 675 s> accept: application/mercurial-0.1\r\n
676 676 s> host: $LOCALIP:$HGPORT\r\n (glob)
677 677 s> user-agent: Mercurial debugwireproto\r\n
678 678 s> \r\n
679 679 s> makefile('rb', None)
680 680 s> HTTP/1.1 200 Script output follows\r\n
681 681 s> Server: testing stub value\r\n
682 682 s> Date: $HTTP_DATE$\r\n
683 683 s> ETag: W/"*"\r\n (glob)
684 684 s> Content-Type: text/html; charset=ascii\r\n
685 685 s> Transfer-Encoding: chunked\r\n
686 686 s> \r\n
687 687 real URL is http://$LOCALIP:$HGPORT/redirected (glob)
688 688 s> 414\r\n
689 689 s> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n
690 690 s> <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en-US">\n
691 691 s> <head>\n
692 692 s> <link rel="icon" href="/redirected/static/hgicon.png" type="image/png" />\n
693 693 s> <meta name="robots" content="index, nofollow" />\n
694 694 s> <link rel="stylesheet" href="/redirected/static/style-paper.css" type="text/css" />\n
695 695 s> <script type="text/javascript" src="/redirected/static/mercurial.js"></script>\n
696 696 s> \n
697 697 s> <title>redirected: log</title>\n
698 698 s> <link rel="alternate" type="application/atom+xml"\n
699 699 s> href="/redirected/atom-log" title="Atom feed for redirected" />\n
700 700 s> <link rel="alternate" type="application/rss+xml"\n
701 701 s> href="/redirected/rss-log" title="RSS feed for redirected" />\n
702 702 s> </head>\n
703 703 s> <body>\n
704 704 s> \n
705 705 s> <div class="container">\n
706 706 s> <div class="menu">\n
707 707 s> <div class="logo">\n
708 708 s> <a href="https://mercurial-scm.org/">\n
709 709 s> <img src="/redirected/static/hglogo.png" alt="mercurial" /></a>\n
710 710 s> </div>\n
711 711 s> <ul>\n
712 712 s> <li class="active">log</li>\n
713 713 s> <li><a href="/redirected/graph/tip">graph</a></li>\n
714 714 s> <li><a href="/redirected/tags">tags</a
715 715 s> GET /redirected?cmd=capabilities HTTP/1.1\r\n
716 716 s> Accept-Encoding: identity\r\n
717 717 s> accept: application/mercurial-0.1\r\n
718 718 s> host: $LOCALIP:$HGPORT\r\n (glob)
719 719 s> user-agent: Mercurial debugwireproto\r\n
720 720 s> \r\n
721 721 s> makefile('rb', None)
722 722 s> HTTP/1.1 200 Script output follows\r\n
723 723 s> Server: testing stub value\r\n
724 724 s> Date: $HTTP_DATE$\r\n
725 725 s> Content-Type: application/mercurial-0.1\r\n
726 726 s> Content-Length: 453\r\n
727 727 s> \r\n
728 728 real URL is http://$LOCALIP:$HGPORT/redirected (glob)
729 729 s> batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
730 730 sending heads command
731 731 s> GET /redirected?cmd=heads HTTP/1.1\r\n
732 732 s> Accept-Encoding: identity\r\n
733 733 s> vary: X-HgProto-1\r\n
734 734 s> x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n
735 735 s> accept: application/mercurial-0.1\r\n
736 736 s> host: $LOCALIP:$HGPORT\r\n (glob)
737 737 s> user-agent: Mercurial debugwireproto\r\n
738 738 s> \r\n
739 739 s> makefile('rb', None)
740 740 s> HTTP/1.1 200 Script output follows\r\n
741 741 s> Server: testing stub value\r\n
742 742 s> Date: $HTTP_DATE$\r\n
743 743 s> Content-Type: application/mercurial-0.1\r\n
744 744 s> Content-Length: 41\r\n
745 745 s> \r\n
746 746 s> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n
747 747 response: [
748 748 b'\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL'
749 749 ]
@@ -1,428 +1,472
1 1 #require no-chg
2 2
3 3 $ . $TESTDIR/wireprotohelpers.sh
4 4
5 5 $ hg init server
6 6
7 7 zstd isn't present in plain builds. Make tests easier by removing
8 8 zstd from the equation.
9 9
10 10 $ cat >> server/.hg/hgrc << EOF
11 11 > [server]
12 12 > compressionengines = zlib
13 13 > EOF
14 14
15 15 $ hg -R server serve -p $HGPORT -d --pid-file hg.pid -E error.log
16 16 $ cat hg.pid > $DAEMON_PIDS
17 17
18 18 A normal capabilities request is serviced for version 1
19 19
20 20 $ sendhttpraw << EOF
21 21 > httprequest GET ?cmd=capabilities
22 22 > user-agent: test
23 23 > EOF
24 24 using raw connection to peer
25 25 s> GET /?cmd=capabilities HTTP/1.1\r\n
26 26 s> Accept-Encoding: identity\r\n
27 27 s> user-agent: test\r\n
28 28 s> host: $LOCALIP:$HGPORT\r\n (glob)
29 29 s> \r\n
30 30 s> makefile('rb', None)
31 31 s> HTTP/1.1 200 Script output follows\r\n
32 32 s> Server: testing stub value\r\n
33 33 s> Date: $HTTP_DATE$\r\n
34 34 s> Content-Type: application/mercurial-0.1\r\n
35 35 s> Content-Length: *\r\n (glob)
36 36 s> \r\n
37 37 s> batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
38 38
39 39 A proper request without the API server enabled returns the legacy response
40 40
41 41 $ sendhttpraw << EOF
42 42 > httprequest GET ?cmd=capabilities
43 43 > user-agent: test
44 44 > x-hgupgrade-1: foo
45 45 > x-hgproto-1: cbor
46 46 > EOF
47 47 using raw connection to peer
48 48 s> GET /?cmd=capabilities HTTP/1.1\r\n
49 49 s> Accept-Encoding: identity\r\n
50 50 s> user-agent: test\r\n
51 51 s> x-hgproto-1: cbor\r\n
52 52 s> x-hgupgrade-1: foo\r\n
53 53 s> host: $LOCALIP:$HGPORT\r\n (glob)
54 54 s> \r\n
55 55 s> makefile('rb', None)
56 56 s> HTTP/1.1 200 Script output follows\r\n
57 57 s> Server: testing stub value\r\n
58 58 s> Date: $HTTP_DATE$\r\n
59 59 s> Content-Type: application/mercurial-0.1\r\n
60 60 s> Content-Length: *\r\n (glob)
61 61 s> \r\n
62 62 s> batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
63 63
64 64 Restart with just API server enabled. This enables serving the new format.
65 65
66 66 $ killdaemons.py
67 67 $ cat error.log
68 68
69 69 $ cat >> server/.hg/hgrc << EOF
70 70 > [experimental]
71 71 > web.apiserver = true
72 72 > EOF
73 73
74 74 $ hg -R server serve -p $HGPORT -d --pid-file hg.pid -E error.log
75 75 $ cat hg.pid > $DAEMON_PIDS
76 76
77 77 X-HgUpgrade-<N> without CBOR advertisement uses legacy response
78 78
79 79 $ sendhttpraw << EOF
80 80 > httprequest GET ?cmd=capabilities
81 81 > user-agent: test
82 82 > x-hgupgrade-1: foo bar
83 83 > EOF
84 84 using raw connection to peer
85 85 s> GET /?cmd=capabilities HTTP/1.1\r\n
86 86 s> Accept-Encoding: identity\r\n
87 87 s> user-agent: test\r\n
88 88 s> x-hgupgrade-1: foo bar\r\n
89 89 s> host: $LOCALIP:$HGPORT\r\n (glob)
90 90 s> \r\n
91 91 s> makefile('rb', None)
92 92 s> HTTP/1.1 200 Script output follows\r\n
93 93 s> Server: testing stub value\r\n
94 94 s> Date: $HTTP_DATE$\r\n
95 95 s> Content-Type: application/mercurial-0.1\r\n
96 96 s> Content-Length: *\r\n (glob)
97 97 s> \r\n
98 98 s> batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
99 99
100 100 X-HgUpgrade-<N> without known serialization in X-HgProto-<N> uses legacy response
101 101
102 102 $ sendhttpraw << EOF
103 103 > httprequest GET ?cmd=capabilities
104 104 > user-agent: test
105 105 > x-hgupgrade-1: foo bar
106 106 > x-hgproto-1: some value
107 107 > EOF
108 108 using raw connection to peer
109 109 s> GET /?cmd=capabilities HTTP/1.1\r\n
110 110 s> Accept-Encoding: identity\r\n
111 111 s> user-agent: test\r\n
112 112 s> x-hgproto-1: some value\r\n
113 113 s> x-hgupgrade-1: foo bar\r\n
114 114 s> host: $LOCALIP:$HGPORT\r\n (glob)
115 115 s> \r\n
116 116 s> makefile('rb', None)
117 117 s> HTTP/1.1 200 Script output follows\r\n
118 118 s> Server: testing stub value\r\n
119 119 s> Date: $HTTP_DATE$\r\n
120 120 s> Content-Type: application/mercurial-0.1\r\n
121 121 s> Content-Length: *\r\n (glob)
122 122 s> \r\n
123 123 s> batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
124 124
125 125 X-HgUpgrade-<N> + X-HgProto-<N> headers trigger new response format
126 126
127 127 $ sendhttpraw << EOF
128 128 > httprequest GET ?cmd=capabilities
129 129 > user-agent: test
130 130 > x-hgupgrade-1: foo bar
131 131 > x-hgproto-1: cbor
132 132 > EOF
133 133 using raw connection to peer
134 134 s> GET /?cmd=capabilities HTTP/1.1\r\n
135 135 s> Accept-Encoding: identity\r\n
136 136 s> user-agent: test\r\n
137 137 s> x-hgproto-1: cbor\r\n
138 138 s> x-hgupgrade-1: foo bar\r\n
139 139 s> host: $LOCALIP:$HGPORT\r\n (glob)
140 140 s> \r\n
141 141 s> makefile('rb', None)
142 142 s> HTTP/1.1 200 OK\r\n
143 143 s> Server: testing stub value\r\n
144 144 s> Date: $HTTP_DATE$\r\n
145 145 s> Content-Type: application/mercurial-cbor\r\n
146 146 s> Content-Length: *\r\n (glob)
147 147 s> \r\n
148 148 s> \xa3GapibaseDapi/Dapis\xa0Nv1capabilitiesY\x01\xc5batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
149 149 cbor> {
150 150 b'apibase': b'api/',
151 151 b'apis': {},
152 152 b'v1capabilities': b'batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash'
153 153 }
154 154
155 155 Restart server to enable HTTPv2
156 156
157 157 $ killdaemons.py
158 158 $ enablehttpv2 server
159 159 $ hg -R server serve -p $HGPORT -d --pid-file hg.pid -E error.log
160 160 $ cat hg.pid > $DAEMON_PIDS
161 161
162 162 Only requested API services are returned
163 163
164 164 $ sendhttpraw << EOF
165 165 > httprequest GET ?cmd=capabilities
166 166 > user-agent: test
167 167 > x-hgupgrade-1: foo bar
168 168 > x-hgproto-1: cbor
169 169 > EOF
170 170 using raw connection to peer
171 171 s> GET /?cmd=capabilities HTTP/1.1\r\n
172 172 s> Accept-Encoding: identity\r\n
173 173 s> user-agent: test\r\n
174 174 s> x-hgproto-1: cbor\r\n
175 175 s> x-hgupgrade-1: foo bar\r\n
176 176 s> host: $LOCALIP:$HGPORT\r\n (glob)
177 177 s> \r\n
178 178 s> makefile('rb', None)
179 179 s> HTTP/1.1 200 OK\r\n
180 180 s> Server: testing stub value\r\n
181 181 s> Date: $HTTP_DATE$\r\n
182 182 s> Content-Type: application/mercurial-cbor\r\n
183 183 s> Content-Length: *\r\n (glob)
184 184 s> \r\n
185 185 s> \xa3GapibaseDapi/Dapis\xa0Nv1capabilitiesY\x01\xc5batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
186 186 cbor> {
187 187 b'apibase': b'api/',
188 188 b'apis': {},
189 189 b'v1capabilities': b'batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash'
190 190 }
191 191
192 192 Request for HTTPv2 service returns information about it
193 193
194 194 $ sendhttpraw << EOF
195 195 > httprequest GET ?cmd=capabilities
196 196 > user-agent: test
197 197 > x-hgupgrade-1: exp-http-v2-0001 foo bar
198 198 > x-hgproto-1: cbor
199 199 > EOF
200 200 using raw connection to peer
201 201 s> GET /?cmd=capabilities HTTP/1.1\r\n
202 202 s> Accept-Encoding: identity\r\n
203 203 s> user-agent: test\r\n
204 204 s> x-hgproto-1: cbor\r\n
205 205 s> x-hgupgrade-1: exp-http-v2-0001 foo bar\r\n
206 206 s> host: $LOCALIP:$HGPORT\r\n (glob)
207 207 s> \r\n
208 208 s> makefile('rb', None)
209 209 s> HTTP/1.1 200 OK\r\n
210 210 s> Server: testing stub value\r\n
211 211 s> Date: $HTTP_DATE$\r\n
212 212 s> Content-Type: application/mercurial-cbor\r\n
213 213 s> Content-Length: *\r\n (glob)
214 214 s> \r\n
215 s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0001\xa4Hcommands\xa7Ibranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullEheads\xa2Dargs\xa1Jpubliconly\xf4Kpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\x81HdeadbeefKpermissions\x81DpullHlistkeys\xa2Dargs\xa1InamespaceBnsKpermissions\x81DpullFlookup\xa2Dargs\xa1CkeyCfooKpermissions\x81DpullGpushkey\xa2Dargs\xa4CkeyCkeyInamespaceBnsCnewCnewColdColdKpermissions\x81DpushKcompression\x81\xa1DnameDzlibQframingmediatypes\x81X&application/mercurial-exp-framing-0005Nrawrepoformats\x82LgeneraldeltaHrevlogv1Nv1capabilitiesY\x01\xc5batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
215 s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0001\xa4Hcommands\xa8Ibranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa3Ffields\xd9\x01\x02\x82GparentsHrevisionInoderange\x82\x81J0123456...\x81Iabcdef...Enodes\x81J0123456...Kpermissions\x81DpullEheads\xa2Dargs\xa1Jpubliconly\xf4Kpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\x81HdeadbeefKpermissions\x81DpullHlistkeys\xa2Dargs\xa1InamespaceBnsKpermissions\x81DpullFlookup\xa2Dargs\xa1CkeyCfooKpermissions\x81DpullGpushkey\xa2Dargs\xa4CkeyCkeyInamespaceBnsCnewCnewColdColdKpermissions\x81DpushKcompression\x81\xa1DnameDzlibQframingmediatypes\x81X&application/mercurial-exp-framing-0005Nrawrepoformats\x82LgeneraldeltaHrevlogv1Nv1capabilitiesY\x01\xc5batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
216 216 cbor> {
217 217 b'apibase': b'api/',
218 218 b'apis': {
219 219 b'exp-http-v2-0001': {
220 220 b'commands': {
221 221 b'branchmap': {
222 222 b'args': {},
223 223 b'permissions': [
224 224 b'pull'
225 225 ]
226 226 },
227 227 b'capabilities': {
228 228 b'args': {},
229 229 b'permissions': [
230 230 b'pull'
231 231 ]
232 232 },
233 b'changesetdata': {
234 b'args': {
235 b'fields': set([
236 b'parents',
237 b'revision'
238 ]),
239 b'noderange': [
240 [
241 b'0123456...'
242 ],
243 [
244 b'abcdef...'
245 ]
246 ],
247 b'nodes': [
248 b'0123456...'
249 ]
250 },
251 b'permissions': [
252 b'pull'
253 ]
254 },
233 255 b'heads': {
234 256 b'args': {
235 257 b'publiconly': False
236 258 },
237 259 b'permissions': [
238 260 b'pull'
239 261 ]
240 262 },
241 263 b'known': {
242 264 b'args': {
243 265 b'nodes': [
244 266 b'deadbeef'
245 267 ]
246 268 },
247 269 b'permissions': [
248 270 b'pull'
249 271 ]
250 272 },
251 273 b'listkeys': {
252 274 b'args': {
253 275 b'namespace': b'ns'
254 276 },
255 277 b'permissions': [
256 278 b'pull'
257 279 ]
258 280 },
259 281 b'lookup': {
260 282 b'args': {
261 283 b'key': b'foo'
262 284 },
263 285 b'permissions': [
264 286 b'pull'
265 287 ]
266 288 },
267 289 b'pushkey': {
268 290 b'args': {
269 291 b'key': b'key',
270 292 b'namespace': b'ns',
271 293 b'new': b'new',
272 294 b'old': b'old'
273 295 },
274 296 b'permissions': [
275 297 b'push'
276 298 ]
277 299 }
278 300 },
279 301 b'compression': [
280 302 {
281 303 b'name': b'zlib'
282 304 }
283 305 ],
284 306 b'framingmediatypes': [
285 307 b'application/mercurial-exp-framing-0005'
286 308 ],
287 309 b'rawrepoformats': [
288 310 b'generaldelta',
289 311 b'revlogv1'
290 312 ]
291 313 }
292 314 },
293 315 b'v1capabilities': b'batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash'
294 316 }
295 317
296 318 capabilities command returns expected info
297 319
298 320 $ sendhttpv2peerhandshake << EOF
299 321 > command capabilities
300 322 > EOF
301 323 creating http peer for wire protocol version 2
302 324 s> GET /?cmd=capabilities HTTP/1.1\r\n
303 325 s> Accept-Encoding: identity\r\n
304 326 s> vary: X-HgProto-1,X-HgUpgrade-1\r\n
305 327 s> x-hgproto-1: cbor\r\n
306 328 s> x-hgupgrade-1: exp-http-v2-0001\r\n
307 329 s> accept: application/mercurial-0.1\r\n
308 330 s> host: $LOCALIP:$HGPORT\r\n (glob)
309 331 s> user-agent: Mercurial debugwireproto\r\n
310 332 s> \r\n
311 333 s> makefile('rb', None)
312 334 s> HTTP/1.1 200 OK\r\n
313 335 s> Server: testing stub value\r\n
314 336 s> Date: $HTTP_DATE$\r\n
315 337 s> Content-Type: application/mercurial-cbor\r\n
316 338 s> Content-Length: *\r\n (glob)
317 339 s> \r\n
318 s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0001\xa4Hcommands\xa7Ibranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullEheads\xa2Dargs\xa1Jpubliconly\xf4Kpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\x81HdeadbeefKpermissions\x81DpullHlistkeys\xa2Dargs\xa1InamespaceBnsKpermissions\x81DpullFlookup\xa2Dargs\xa1CkeyCfooKpermissions\x81DpullGpushkey\xa2Dargs\xa4CkeyCkeyInamespaceBnsCnewCnewColdColdKpermissions\x81DpushKcompression\x81\xa1DnameDzlibQframingmediatypes\x81X&application/mercurial-exp-framing-0005Nrawrepoformats\x82LgeneraldeltaHrevlogv1Nv1capabilitiesY\x01\xc5batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
340 s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0001\xa4Hcommands\xa8Ibranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa3Ffields\xd9\x01\x02\x82GparentsHrevisionInoderange\x82\x81J0123456...\x81Iabcdef...Enodes\x81J0123456...Kpermissions\x81DpullEheads\xa2Dargs\xa1Jpubliconly\xf4Kpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\x81HdeadbeefKpermissions\x81DpullHlistkeys\xa2Dargs\xa1InamespaceBnsKpermissions\x81DpullFlookup\xa2Dargs\xa1CkeyCfooKpermissions\x81DpullGpushkey\xa2Dargs\xa4CkeyCkeyInamespaceBnsCnewCnewColdColdKpermissions\x81DpushKcompression\x81\xa1DnameDzlibQframingmediatypes\x81X&application/mercurial-exp-framing-0005Nrawrepoformats\x82LgeneraldeltaHrevlogv1Nv1capabilitiesY\x01\xc5batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
319 341 sending capabilities command
320 342 s> POST /api/exp-http-v2-0001/ro/capabilities HTTP/1.1\r\n
321 343 s> Accept-Encoding: identity\r\n
322 s> *\r\n (glob)
344 s> accept: application/mercurial-exp-framing-0005\r\n
323 345 s> content-type: application/mercurial-exp-framing-0005\r\n
324 346 s> content-length: 27\r\n
325 347 s> host: $LOCALIP:$HGPORT\r\n (glob)
326 348 s> user-agent: Mercurial debugwireproto\r\n
327 349 s> \r\n
328 350 s> \x13\x00\x00\x01\x00\x01\x01\x11\xa1DnameLcapabilities
329 351 s> makefile('rb', None)
330 352 s> HTTP/1.1 200 OK\r\n
331 353 s> Server: testing stub value\r\n
332 354 s> Date: $HTTP_DATE$\r\n
333 355 s> Content-Type: application/mercurial-exp-framing-0005\r\n
334 356 s> Transfer-Encoding: chunked\r\n
335 357 s> \r\n
336 358 s> 13\r\n
337 359 s> \x0b\x00\x00\x01\x00\x02\x011
338 360 s> \xa1FstatusBok
339 361 s> \r\n
340 362 received frame(size=11; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=continuation)
341 s> 1cc\r\n
342 s> \xc4\x01\x00\x01\x00\x02\x001
343 s> \xa4Hcommands\xa7Ibranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullEheads\xa2Dargs\xa1Jpubliconly\xf4Kpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\x81HdeadbeefKpermissions\x81DpullHlistkeys\xa2Dargs\xa1InamespaceBnsKpermissions\x81DpullFlookup\xa2Dargs\xa1CkeyCfooKpermissions\x81DpullGpushkey\xa2Dargs\xa4CkeyCkeyInamespaceBnsCnewCnewColdColdKpermissions\x81DpushKcompression\x81\xa1DnameDzlibQframingmediatypes\x81X&application/mercurial-exp-framing-0005Nrawrepoformats\x82LgeneraldeltaHrevlogv1
363 s> 243\r\n
364 s> ;\x02\x00\x01\x00\x02\x001
365 s> \xa4Hcommands\xa8Ibranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa3Ffields\xd9\x01\x02\x82GparentsHrevisionInoderange\x82\x81J0123456...\x81Iabcdef...Enodes\x81J0123456...Kpermissions\x81DpullEheads\xa2Dargs\xa1Jpubliconly\xf4Kpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\x81HdeadbeefKpermissions\x81DpullHlistkeys\xa2Dargs\xa1InamespaceBnsKpermissions\x81DpullFlookup\xa2Dargs\xa1CkeyCfooKpermissions\x81DpullGpushkey\xa2Dargs\xa4CkeyCkeyInamespaceBnsCnewCnewColdColdKpermissions\x81DpushKcompression\x81\xa1DnameDzlibQframingmediatypes\x81X&application/mercurial-exp-framing-0005Nrawrepoformats\x82LgeneraldeltaHrevlogv1
344 366 s> \r\n
345 received frame(size=452; request=1; stream=2; streamflags=; type=command-response; flags=continuation)
367 received frame(size=571; request=1; stream=2; streamflags=; type=command-response; flags=continuation)
346 368 s> 8\r\n
347 369 s> \x00\x00\x00\x01\x00\x02\x002
348 370 s> \r\n
349 371 s> 0\r\n
350 372 s> \r\n
351 373 received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
352 374 response: gen[
353 375 {
354 376 b'commands': {
355 377 b'branchmap': {
356 378 b'args': {},
357 379 b'permissions': [
358 380 b'pull'
359 381 ]
360 382 },
361 383 b'capabilities': {
362 384 b'args': {},
363 385 b'permissions': [
364 386 b'pull'
365 387 ]
366 388 },
389 b'changesetdata': {
390 b'args': {
391 b'fields': set([
392 b'parents',
393 b'revision'
394 ]),
395 b'noderange': [
396 [
397 b'0123456...'
398 ],
399 [
400 b'abcdef...'
401 ]
402 ],
403 b'nodes': [
404 b'0123456...'
405 ]
406 },
407 b'permissions': [
408 b'pull'
409 ]
410 },
367 411 b'heads': {
368 412 b'args': {
369 413 b'publiconly': False
370 414 },
371 415 b'permissions': [
372 416 b'pull'
373 417 ]
374 418 },
375 419 b'known': {
376 420 b'args': {
377 421 b'nodes': [
378 422 b'deadbeef'
379 423 ]
380 424 },
381 425 b'permissions': [
382 426 b'pull'
383 427 ]
384 428 },
385 429 b'listkeys': {
386 430 b'args': {
387 431 b'namespace': b'ns'
388 432 },
389 433 b'permissions': [
390 434 b'pull'
391 435 ]
392 436 },
393 437 b'lookup': {
394 438 b'args': {
395 439 b'key': b'foo'
396 440 },
397 441 b'permissions': [
398 442 b'pull'
399 443 ]
400 444 },
401 445 b'pushkey': {
402 446 b'args': {
403 447 b'key': b'key',
404 448 b'namespace': b'ns',
405 449 b'new': b'new',
406 450 b'old': b'old'
407 451 },
408 452 b'permissions': [
409 453 b'push'
410 454 ]
411 455 }
412 456 },
413 457 b'compression': [
414 458 {
415 459 b'name': b'zlib'
416 460 }
417 461 ],
418 462 b'framingmediatypes': [
419 463 b'application/mercurial-exp-framing-0005'
420 464 ],
421 465 b'rawrepoformats': [
422 466 b'generaldelta',
423 467 b'revlogv1'
424 468 ]
425 469 }
426 470 ]
427 471
428 472 $ cat error.log
General Comments 0
You need to be logged in to leave comments. Login now